mirror of
https://github.com/yihong0618/bilingual_book_maker.git
synced 2025-06-06 11:35:49 +00:00
Merge pull request #448 from cangming/support_openai_o_series_model
feat(model): support openai o series model
This commit is contained in:
commit
81f9b5280b
2
.github/workflows/docs.yaml
vendored
2
.github/workflows/docs.yaml
vendored
@ -11,6 +11,6 @@ jobs:
|
|||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.10'
|
||||||
- run: pip install mkdocs mkdocs-material
|
- run: pip install mkdocs mkdocs-material
|
||||||
- run: mkdocs gh-deploy --force
|
- run: mkdocs gh-deploy --force
|
6
.github/workflows/make_test_ebook.yaml
vendored
6
.github/workflows/make_test_ebook.yaml
vendored
@ -27,10 +27,10 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: install python 3.9
|
- name: install python 3.10
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.10'
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
- name: Check formatting (black)
|
- name: Check formatting (black)
|
||||||
run: |
|
run: |
|
||||||
@ -71,7 +71,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Rename and Upload ePub
|
- name: Rename and Upload ePub
|
||||||
if: env.OPENAI_API_KEY != null
|
if: env.OPENAI_API_KEY != null
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: epub_output
|
name: epub_output
|
||||||
path: "test_books/lemo_bilingual.epub"
|
path: "test_books/lemo_bilingual.epub"
|
||||||
|
@ -17,6 +17,7 @@ def parse_prompt_arg(prompt_arg):
|
|||||||
if prompt_arg.endswith(".md") and os.path.exists(prompt_arg):
|
if prompt_arg.endswith(".md") and os.path.exists(prompt_arg):
|
||||||
try:
|
try:
|
||||||
from promptdown import StructuredPrompt
|
from promptdown import StructuredPrompt
|
||||||
|
|
||||||
structured_prompt = StructuredPrompt.from_promptdown_file(prompt_arg)
|
structured_prompt = StructuredPrompt.from_promptdown_file(prompt_arg)
|
||||||
|
|
||||||
# Initialize our prompt structure
|
# Initialize our prompt structure
|
||||||
@ -24,27 +25,40 @@ def parse_prompt_arg(prompt_arg):
|
|||||||
|
|
||||||
# Handle developer_message or system_message
|
# Handle developer_message or system_message
|
||||||
# Developer message takes precedence if both are present
|
# Developer message takes precedence if both are present
|
||||||
if hasattr(structured_prompt, 'developer_message') and structured_prompt.developer_message:
|
if (
|
||||||
prompt['system'] = structured_prompt.developer_message
|
hasattr(structured_prompt, "developer_message")
|
||||||
elif hasattr(structured_prompt, 'system_message') and structured_prompt.system_message:
|
and structured_prompt.developer_message
|
||||||
prompt['system'] = structured_prompt.system_message
|
):
|
||||||
|
prompt["system"] = structured_prompt.developer_message
|
||||||
|
elif (
|
||||||
|
hasattr(structured_prompt, "system_message")
|
||||||
|
and structured_prompt.system_message
|
||||||
|
):
|
||||||
|
prompt["system"] = structured_prompt.system_message
|
||||||
|
|
||||||
# Extract user message from conversation
|
# Extract user message from conversation
|
||||||
if hasattr(structured_prompt, 'conversation') and structured_prompt.conversation:
|
if (
|
||||||
|
hasattr(structured_prompt, "conversation")
|
||||||
|
and structured_prompt.conversation
|
||||||
|
):
|
||||||
for message in structured_prompt.conversation:
|
for message in structured_prompt.conversation:
|
||||||
if message.role.lower() == 'user':
|
if message.role.lower() == "user":
|
||||||
prompt['user'] = message.content
|
prompt["user"] = message.content
|
||||||
break
|
break
|
||||||
|
|
||||||
# Ensure we found a user message
|
# Ensure we found a user message
|
||||||
if 'user' not in prompt or not prompt['user']:
|
if "user" not in prompt or not prompt["user"]:
|
||||||
raise ValueError("PromptDown file must contain at least one user message")
|
raise ValueError(
|
||||||
|
"PromptDown file must contain at least one user message"
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Successfully loaded PromptDown file: {prompt_arg}")
|
print(f"Successfully loaded PromptDown file: {prompt_arg}")
|
||||||
|
|
||||||
# Validate required placeholders
|
# Validate required placeholders
|
||||||
if any(c not in prompt["user"] for c in ["{text}"]):
|
if any(c not in prompt["user"] for c in ["{text}"]):
|
||||||
raise ValueError("User message in PromptDown must contain `{text}` placeholder")
|
raise ValueError(
|
||||||
|
"User message in PromptDown must contain `{text}` placeholder"
|
||||||
|
)
|
||||||
|
|
||||||
return prompt
|
return prompt
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -388,7 +402,17 @@ So you are close to reaching the limit. You have to choose your own value, there
|
|||||||
translate_model = MODEL_DICT.get(options.model)
|
translate_model = MODEL_DICT.get(options.model)
|
||||||
assert translate_model is not None, "unsupported model"
|
assert translate_model is not None, "unsupported model"
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
if options.model in ["openai", "chatgptapi", "gpt4", "gpt4omini", "gpt4o"]:
|
if options.model in [
|
||||||
|
"openai",
|
||||||
|
"chatgptapi",
|
||||||
|
"gpt4",
|
||||||
|
"gpt4omini",
|
||||||
|
"gpt4o",
|
||||||
|
"o1preview",
|
||||||
|
"o1",
|
||||||
|
"o1mini",
|
||||||
|
"o3mini",
|
||||||
|
]:
|
||||||
if OPENAI_API_KEY := (
|
if OPENAI_API_KEY := (
|
||||||
options.openai_key
|
options.openai_key
|
||||||
or env.get(
|
or env.get(
|
||||||
@ -510,6 +534,10 @@ So you are close to reaching the limit. You have to choose your own value, there
|
|||||||
"gpt4",
|
"gpt4",
|
||||||
"gpt4omini",
|
"gpt4omini",
|
||||||
"gpt4o",
|
"gpt4o",
|
||||||
|
"o1",
|
||||||
|
"o1preview",
|
||||||
|
"o1mini",
|
||||||
|
"o3mini",
|
||||||
], "only support chatgptapi for deployment_id"
|
], "only support chatgptapi for deployment_id"
|
||||||
if not options.api_base:
|
if not options.api_base:
|
||||||
raise ValueError("`api_base` must be provided when using `deployment_id`")
|
raise ValueError("`api_base` must be provided when using `deployment_id`")
|
||||||
@ -534,6 +562,14 @@ So you are close to reaching the limit. You have to choose your own value, there
|
|||||||
e.translate_model.set_gpt4omini_models()
|
e.translate_model.set_gpt4omini_models()
|
||||||
if options.model == "gpt4o":
|
if options.model == "gpt4o":
|
||||||
e.translate_model.set_gpt4o_models()
|
e.translate_model.set_gpt4o_models()
|
||||||
|
if options.model == "o1preview":
|
||||||
|
e.translate_model.set_o1preview_models()
|
||||||
|
if options.model == "o1":
|
||||||
|
e.translate_model.set_o1_models()
|
||||||
|
if options.model == "o1mini":
|
||||||
|
e.translate_model.set_o1mini_models()
|
||||||
|
if options.model == "o3mini":
|
||||||
|
e.translate_model.set_o3mini_models()
|
||||||
if options.model.startswith("claude-"):
|
if options.model.startswith("claude-"):
|
||||||
e.translate_model.set_claude_model(options.model)
|
e.translate_model.set_claude_model(options.model)
|
||||||
if options.block_size > 0:
|
if options.block_size > 0:
|
||||||
|
@ -16,6 +16,10 @@ MODEL_DICT = {
|
|||||||
"gpt4": ChatGPTAPI,
|
"gpt4": ChatGPTAPI,
|
||||||
"gpt4omini": ChatGPTAPI,
|
"gpt4omini": ChatGPTAPI,
|
||||||
"gpt4o": ChatGPTAPI,
|
"gpt4o": ChatGPTAPI,
|
||||||
|
"o1preview": ChatGPTAPI,
|
||||||
|
"o1": ChatGPTAPI,
|
||||||
|
"o1mini": ChatGPTAPI,
|
||||||
|
"o3mini": ChatGPTAPI,
|
||||||
"google": Google,
|
"google": Google,
|
||||||
"caiyun": Caiyun,
|
"caiyun": Caiyun,
|
||||||
"deepl": DeepL,
|
"deepl": DeepL,
|
||||||
|
@ -48,6 +48,21 @@ GPT4o_MODEL_LIST = [
|
|||||||
"gpt-4o-2024-08-06",
|
"gpt-4o-2024-08-06",
|
||||||
"chatgpt-4o-latest",
|
"chatgpt-4o-latest",
|
||||||
]
|
]
|
||||||
|
O1PREVIEW_MODEL_LIST = [
|
||||||
|
"o1-preview",
|
||||||
|
"o1-preview-2024-09-12",
|
||||||
|
]
|
||||||
|
O1_MODEL_LIST = [
|
||||||
|
"o1",
|
||||||
|
"o1-2024-12-17",
|
||||||
|
]
|
||||||
|
O1MINI_MODEL_LIST = [
|
||||||
|
"o1-mini",
|
||||||
|
"o1-mini-2024-09-12",
|
||||||
|
]
|
||||||
|
O3MINI_MODEL_LIST = [
|
||||||
|
"o3-mini",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class ChatGPTAPI(Base):
|
class ChatGPTAPI(Base):
|
||||||
@ -422,6 +437,54 @@ class ChatGPTAPI(Base):
|
|||||||
print(f"Using model list {model_list}")
|
print(f"Using model list {model_list}")
|
||||||
self.model_list = cycle(model_list)
|
self.model_list = cycle(model_list)
|
||||||
|
|
||||||
|
def set_o1preview_models(self):
|
||||||
|
# for issue #375 azure can not use model list
|
||||||
|
if self.deployment_id:
|
||||||
|
self.model_list = cycle(["o1-preview"])
|
||||||
|
else:
|
||||||
|
my_model_list = [
|
||||||
|
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||||
|
]
|
||||||
|
model_list = list(set(my_model_list) & set(O1PREVIEW_MODEL_LIST))
|
||||||
|
print(f"Using model list {model_list}")
|
||||||
|
self.model_list = cycle(model_list)
|
||||||
|
|
||||||
|
def set_o1_models(self):
|
||||||
|
# for issue #375 azure can not use model list
|
||||||
|
if self.deployment_id:
|
||||||
|
self.model_list = cycle(["o1"])
|
||||||
|
else:
|
||||||
|
my_model_list = [
|
||||||
|
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||||
|
]
|
||||||
|
model_list = list(set(my_model_list) & set(O1_MODEL_LIST))
|
||||||
|
print(f"Using model list {model_list}")
|
||||||
|
self.model_list = cycle(model_list)
|
||||||
|
|
||||||
|
def set_o1mini_models(self):
|
||||||
|
# for issue #375 azure can not use model list
|
||||||
|
if self.deployment_id:
|
||||||
|
self.model_list = cycle(["o1-mini"])
|
||||||
|
else:
|
||||||
|
my_model_list = [
|
||||||
|
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||||
|
]
|
||||||
|
model_list = list(set(my_model_list) & set(O1MINI_MODEL_LIST))
|
||||||
|
print(f"Using model list {model_list}")
|
||||||
|
self.model_list = cycle(model_list)
|
||||||
|
|
||||||
|
def set_o3mini_models(self):
|
||||||
|
# for issue #375 azure can not use model list
|
||||||
|
if self.deployment_id:
|
||||||
|
self.model_list = cycle(["o3-mini"])
|
||||||
|
else:
|
||||||
|
my_model_list = [
|
||||||
|
i["id"] for i in self.openai_client.models.list().model_dump()["data"]
|
||||||
|
]
|
||||||
|
model_list = list(set(my_model_list) & set(O3MINI_MODEL_LIST))
|
||||||
|
print(f"Using model list {model_list}")
|
||||||
|
self.model_list = cycle(model_list)
|
||||||
|
|
||||||
def set_model_list(self, model_list):
|
def set_model_list(self, model_list):
|
||||||
model_list = list(set(model_list))
|
model_list = list(set(model_list))
|
||||||
print(f"Using model list {model_list}")
|
print(f"Using model list {model_list}")
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
## Models
|
## Models
|
||||||
`-m, --model <Model>` <br>
|
`-m, --model <Model>` <br>
|
||||||
|
|
||||||
Currently `bbook_maker` supports these models: `chatgptapi` , `gpt3` , `google` , `caiyun` , `deepl` , `deeplfree` , `gpt4` , `gpt4omini` , `claude` , `customapi`.
|
Currently `bbook_maker` supports these models: `chatgptapi` , `gpt3` , `google` , `caiyun` , `deepl` , `deeplfree` , `gpt4` , `gpt4omini` , `o1-preview` , `o1` , `o1-mini` , `o3-mini` , `claude` , `customapi`.
|
||||||
Default model is `chatgptapi` .
|
Default model is `chatgptapi` .
|
||||||
|
|
||||||
### OPENAI models
|
### OPENAI models
|
||||||
|
Loading…
x
Reference in New Issue
Block a user