mirror of
https://github.com/yihong0618/bilingual_book_maker.git
synced 2025-06-02 09:30:24 +00:00
Merge pull request #448 from cangming/support_openai_o_series_model
feat(model): support openai o series model
This commit is contained in:
commit
81f9b5280b
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@ -11,6 +11,6 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.10'
|
||||
- run: pip install mkdocs mkdocs-material
|
||||
- run: mkdocs gh-deploy --force
|
||||
- run: mkdocs gh-deploy --force
|
||||
|
6
.github/workflows/make_test_ebook.yaml
vendored
6
.github/workflows/make_test_ebook.yaml
vendored
@ -27,10 +27,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: install python 3.9
|
||||
- name: install python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.10'
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- name: Check formatting (black)
|
||||
run: |
|
||||
@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
- name: Rename and Upload ePub
|
||||
if: env.OPENAI_API_KEY != null
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: epub_output
|
||||
path: "test_books/lemo_bilingual.epub"
|
||||
|
@ -17,40 +17,54 @@ def parse_prompt_arg(prompt_arg):
|
||||
if prompt_arg.endswith(".md") and os.path.exists(prompt_arg):
|
||||
try:
|
||||
from promptdown import StructuredPrompt
|
||||
|
||||
structured_prompt = StructuredPrompt.from_promptdown_file(prompt_arg)
|
||||
|
||||
|
||||
# Initialize our prompt structure
|
||||
prompt = {}
|
||||
|
||||
|
||||
# Handle developer_message or system_message
|
||||
# Developer message takes precedence if both are present
|
||||
if hasattr(structured_prompt, 'developer_message') and structured_prompt.developer_message:
|
||||
prompt['system'] = structured_prompt.developer_message
|
||||
elif hasattr(structured_prompt, 'system_message') and structured_prompt.system_message:
|
||||
prompt['system'] = structured_prompt.system_message
|
||||
|
||||
if (
|
||||
hasattr(structured_prompt, "developer_message")
|
||||
and structured_prompt.developer_message
|
||||
):
|
||||
prompt["system"] = structured_prompt.developer_message
|
||||
elif (
|
||||
hasattr(structured_prompt, "system_message")
|
||||
and structured_prompt.system_message
|
||||
):
|
||||
prompt["system"] = structured_prompt.system_message
|
||||
|
||||
# Extract user message from conversation
|
||||
if hasattr(structured_prompt, 'conversation') and structured_prompt.conversation:
|
||||
if (
|
||||
hasattr(structured_prompt, "conversation")
|
||||
and structured_prompt.conversation
|
||||
):
|
||||
for message in structured_prompt.conversation:
|
||||
if message.role.lower() == 'user':
|
||||
prompt['user'] = message.content
|
||||
if message.role.lower() == "user":
|
||||
prompt["user"] = message.content
|
||||
break
|
||||
|
||||
|
||||
# Ensure we found a user message
|
||||
if 'user' not in prompt or not prompt['user']:
|
||||
raise ValueError("PromptDown file must contain at least one user message")
|
||||
|
||||
if "user" not in prompt or not prompt["user"]:
|
||||
raise ValueError(
|
||||
"PromptDown file must contain at least one user message"
|
||||
)
|
||||
|
||||
print(f"Successfully loaded PromptDown file: {prompt_arg}")
|
||||
|
||||
|
||||
# Validate required placeholders
|
||||
if any(c not in prompt["user"] for c in ["{text}"]):
|
||||
raise ValueError("User message in PromptDown must contain `{text}` placeholder")
|
||||
|
||||
raise ValueError(
|
||||
"User message in PromptDown must contain `{text}` placeholder"
|
||||
)
|
||||
|
||||
return prompt
|
||||
except Exception as e:
|
||||
print(f"Error parsing PromptDown file: {e}")
|
||||
# Fall through to other parsing methods
|
||||
|
||||
|
||||
# Existing parsing logic for JSON strings and other formats
|
||||
if not any(prompt_arg.endswith(ext) for ext in [".json", ".txt", ".md"]):
|
||||
try:
|
||||
@ -388,7 +402,17 @@ So you are close to reaching the limit. You have to choose your own value, there
|
||||
translate_model = MODEL_DICT.get(options.model)
|
||||
assert translate_model is not None, "unsupported model"
|
||||
API_KEY = ""
|
||||
if options.model in ["openai", "chatgptapi", "gpt4", "gpt4omini", "gpt4o"]:
|
||||
if options.model in [
|
||||
"openai",
|
||||
"chatgptapi",
|
||||
"gpt4",
|
||||
"gpt4omini",
|
||||
"gpt4o",
|
||||
"o1preview",
|
||||
"o1",
|
||||
"o1mini",
|
||||
"o3mini",
|
||||
]:
|
||||
if OPENAI_API_KEY := (
|
||||
options.openai_key
|
||||
or env.get(
|
||||
@ -510,6 +534,10 @@ So you are close to reaching the limit. You have to choose your own value, there
|
||||
"gpt4",
|
||||
"gpt4omini",
|
||||
"gpt4o",
|
||||
"o1",
|
||||
"o1preview",
|
||||
"o1mini",
|
||||
"o3mini",
|
||||
], "only support chatgptapi for deployment_id"
|
||||
if not options.api_base:
|
||||
raise ValueError("`api_base` must be provided when using `deployment_id`")
|
||||
@ -534,6 +562,14 @@ So you are close to reaching the limit. You have to choose your own value, there
|
||||
e.translate_model.set_gpt4omini_models()
|
||||
if options.model == "gpt4o":
|
||||
e.translate_model.set_gpt4o_models()
|
||||
if options.model == "o1preview":
|
||||
e.translate_model.set_o1preview_models()
|
||||
if options.model == "o1":
|
||||
e.translate_model.set_o1_models()
|
||||
if options.model == "o1mini":
|
||||
e.translate_model.set_o1mini_models()
|
||||
if options.model == "o3mini":
|
||||
e.translate_model.set_o3mini_models()
|
||||
if options.model.startswith("claude-"):
|
||||
e.translate_model.set_claude_model(options.model)
|
||||
if options.block_size > 0:
|
||||
|
@ -16,6 +16,10 @@ MODEL_DICT = {
|
||||
"gpt4": ChatGPTAPI,
|
||||
"gpt4omini": ChatGPTAPI,
|
||||
"gpt4o": ChatGPTAPI,
|
||||
"o1preview": ChatGPTAPI,
|
||||
"o1": ChatGPTAPI,
|
||||
"o1mini": ChatGPTAPI,
|
||||
"o3mini": ChatGPTAPI,
|
||||
"google": Google,
|
||||
"caiyun": Caiyun,
|
||||
"deepl": DeepL,
|
||||
|
@ -48,6 +48,21 @@ GPT4o_MODEL_LIST = [
|
||||
"gpt-4o-2024-08-06",
|
||||
"chatgpt-4o-latest",
|
||||
]
|
||||
O1PREVIEW_MODEL_LIST = [
|
||||
"o1-preview",
|
||||
"o1-preview-2024-09-12",
|
||||
]
|
||||
O1_MODEL_LIST = [
|
||||
"o1",
|
||||
"o1-2024-12-17",
|
||||
]
|
||||
O1MINI_MODEL_LIST = [
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
]
|
||||
O3MINI_MODEL_LIST = [
|
||||
"o3-mini",
|
||||
]
|
||||
|
||||
|
||||
class ChatGPTAPI(Base):
|
||||
@ -422,6 +437,54 @@ class ChatGPTAPI(Base):
|
||||
print(f"Using model list {model_list}")
|
||||
self.model_list = cycle(model_list)
|
||||
|
||||
def set_o1preview_models(self):
|
||||
# for issue #375 azure can not use model list
|
||||
if self.deployment_id:
|
||||
self.model_list = cycle(["o1-preview"])
|
||||
else:
|
||||
my_model_list = [
|
||||
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||
]
|
||||
model_list = list(set(my_model_list) & set(O1PREVIEW_MODEL_LIST))
|
||||
print(f"Using model list {model_list}")
|
||||
self.model_list = cycle(model_list)
|
||||
|
||||
def set_o1_models(self):
|
||||
# for issue #375 azure can not use model list
|
||||
if self.deployment_id:
|
||||
self.model_list = cycle(["o1"])
|
||||
else:
|
||||
my_model_list = [
|
||||
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||
]
|
||||
model_list = list(set(my_model_list) & set(O1_MODEL_LIST))
|
||||
print(f"Using model list {model_list}")
|
||||
self.model_list = cycle(model_list)
|
||||
|
||||
def set_o1mini_models(self):
|
||||
# for issue #375 azure can not use model list
|
||||
if self.deployment_id:
|
||||
self.model_list = cycle(["o1-mini"])
|
||||
else:
|
||||
my_model_list = [
|
||||
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||
]
|
||||
model_list = list(set(my_model_list) & set(O1MINI_MODEL_LIST))
|
||||
print(f"Using model list {model_list}")
|
||||
self.model_list = cycle(model_list)
|
||||
|
||||
def set_o3mini_models(self):
|
||||
# for issue #375 azure can not use model list
|
||||
if self.deployment_id:
|
||||
self.model_list = cycle(["o3-mini"])
|
||||
else:
|
||||
my_model_list = [
|
||||
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||
]
|
||||
model_list = list(set(my_model_list) & set(O3MINI_MODEL_LIST))
|
||||
print(f"Using model list {model_list}")
|
||||
self.model_list = cycle(model_list)
|
||||
|
||||
def set_model_list(self, model_list):
|
||||
model_list = list(set(model_list))
|
||||
print(f"Using model list {model_list}")
|
||||
|
@ -2,7 +2,7 @@
|
||||
## Models
|
||||
`-m, --model <Model>` <br>
|
||||
|
||||
Currently `bbook_maker` supports these models: `chatgptapi` , `gpt3` , `google` , `caiyun` , `deepl` , `deeplfree` , `gpt4` , `gpt4omini` , `claude` , `customapi`.
|
||||
Currently `bbook_maker` supports these models: `chatgptapi` , `gpt3` , `google` , `caiyun` , `deepl` , `deeplfree` , `gpt4` , `gpt4omini` , `o1-preview` , `o1` , `o1-mini` , `o3-mini` , `claude` , `customapi`.
|
||||
Default model is `chatgptapi` .
|
||||
|
||||
### OPENAI models
|
||||
|
Loading…
x
Reference in New Issue
Block a user