diff --git a/book_maker/cli.py b/book_maker/cli.py index 9689949..3de908a 100644 --- a/book_maker/cli.py +++ b/book_maker/cli.py @@ -17,40 +17,54 @@ def parse_prompt_arg(prompt_arg): if prompt_arg.endswith(".md") and os.path.exists(prompt_arg): try: from promptdown import StructuredPrompt + structured_prompt = StructuredPrompt.from_promptdown_file(prompt_arg) - + # Initialize our prompt structure prompt = {} - + # Handle developer_message or system_message # Developer message takes precedence if both are present - if hasattr(structured_prompt, 'developer_message') and structured_prompt.developer_message: - prompt['system'] = structured_prompt.developer_message - elif hasattr(structured_prompt, 'system_message') and structured_prompt.system_message: - prompt['system'] = structured_prompt.system_message - + if ( + hasattr(structured_prompt, "developer_message") + and structured_prompt.developer_message + ): + prompt["system"] = structured_prompt.developer_message + elif ( + hasattr(structured_prompt, "system_message") + and structured_prompt.system_message + ): + prompt["system"] = structured_prompt.system_message + # Extract user message from conversation - if hasattr(structured_prompt, 'conversation') and structured_prompt.conversation: + if ( + hasattr(structured_prompt, "conversation") + and structured_prompt.conversation + ): for message in structured_prompt.conversation: - if message.role.lower() == 'user': - prompt['user'] = message.content + if message.role.lower() == "user": + prompt["user"] = message.content break - + # Ensure we found a user message - if 'user' not in prompt or not prompt['user']: - raise ValueError("PromptDown file must contain at least one user message") - + if "user" not in prompt or not prompt["user"]: + raise ValueError( + "PromptDown file must contain at least one user message" + ) + print(f"Successfully loaded PromptDown file: {prompt_arg}") - + # Validate required placeholders if any(c not in prompt["user"] for c in ["{text}"]): - raise ValueError("User message in PromptDown must contain `{text}` placeholder") - + raise ValueError( + "User message in PromptDown must contain `{text}` placeholder" + ) + return prompt except Exception as e: print(f"Error parsing PromptDown file: {e}") # Fall through to other parsing methods - + # Existing parsing logic for JSON strings and other formats if not any(prompt_arg.endswith(ext) for ext in [".json", ".txt", ".md"]): try: @@ -388,7 +402,17 @@ So you are close to reaching the limit. You have to choose your own value, there translate_model = MODEL_DICT.get(options.model) assert translate_model is not None, "unsupported model" API_KEY = "" - if options.model in ["openai", "chatgptapi", "gpt4", "gpt4omini", "gpt4o", "o1preview", "o1", "o1mini", "o3mini"]: + if options.model in [ + "openai", + "chatgptapi", + "gpt4", + "gpt4omini", + "gpt4o", + "o1preview", + "o1", + "o1mini", + "o3mini", + ]: if OPENAI_API_KEY := ( options.openai_key or env.get( diff --git a/book_maker/translator/chatgptapi_translator.py b/book_maker/translator/chatgptapi_translator.py index e97966f..47fbba6 100644 --- a/book_maker/translator/chatgptapi_translator.py +++ b/book_maker/translator/chatgptapi_translator.py @@ -64,6 +64,7 @@ O3MINI_MODEL_LIST = [ "o3-mini", ] + class ChatGPTAPI(Base): DEFAULT_PROMPT = "Please help me to translate,`{text}` to {language}, please return only translated content not include the origin text"