diff --git a/book_maker/cli.py b/book_maker/cli.py index 4ad0c2e..76ecbe1 100644 --- a/book_maker/cli.py +++ b/book_maker/cli.py @@ -247,6 +247,12 @@ So you are close to reaching the limit. You have to choose your own value, there action="store_true", help="adds an additional paragraph for global, updating historical context of the story to the model's input, improving the narrative consistency for the AI model (this uses ~200 more tokens each time)", ) + parser.add_argument( + "--temperature", + type=float, + default=1.0, + help="temperature parameter for `gpt3`/`chatgptapi`/`gpt4`/`claude`", + ) options = parser.parse_args() @@ -331,6 +337,7 @@ So you are close to reaching the limit. You have to choose your own value, there prompt_config=parse_prompt_arg(options.prompt_arg), single_translate=options.single_translate, context_flag=options.context_flag, + temperature=options.temperature, ) # other options if options.allow_navigable_strings: diff --git a/book_maker/loader/epub_loader.py b/book_maker/loader/epub_loader.py index b50af50..56f8dae 100644 --- a/book_maker/loader/epub_loader.py +++ b/book_maker/loader/epub_loader.py @@ -31,6 +31,7 @@ class EPUBBookLoader(BaseBookLoader): prompt_config=None, single_translate=False, context_flag=False, + temperature=1.0, ): self.epub_name = epub_name self.new_epub = epub.EpubBook() @@ -39,6 +40,7 @@ class EPUBBookLoader(BaseBookLoader): language, api_base=model_api_base, context_flag=context_flag, + temperature=temperature, **prompt_config_to_kwargs(prompt_config), ) self.is_test = is_test diff --git a/book_maker/loader/srt_loader.py b/book_maker/loader/srt_loader.py index 99d8c87..e30c660 100644 --- a/book_maker/loader/srt_loader.py +++ b/book_maker/loader/srt_loader.py @@ -24,12 +24,14 @@ class SRTBookLoader(BaseBookLoader): prompt_config=None, single_translate=False, context_flag=False, + temperature=1.0, ) -> None: self.srt_name = srt_name self.translate_model = model( key, language, api_base=model_api_base, + temperature=temperature, **prompt_config_to_kwargs( { "system": "You are a srt subtitle file translator.", diff --git a/book_maker/loader/txt_loader.py b/book_maker/loader/txt_loader.py index 3737950..cc1899c 100644 --- a/book_maker/loader/txt_loader.py +++ b/book_maker/loader/txt_loader.py @@ -20,12 +20,14 @@ class TXTBookLoader(BaseBookLoader): prompt_config=None, single_translate=False, context_flag=False, + temperature=1.0, ) -> None: self.txt_name = txt_name self.translate_model = model( key, language, api_base=model_api_base, + temperature=temperature, **prompt_config_to_kwargs(prompt_config), ) self.is_test = is_test diff --git a/book_maker/translator/chatgptapi_translator.py b/book_maker/translator/chatgptapi_translator.py index 781c4d8..9c4420d 100644 --- a/book_maker/translator/chatgptapi_translator.py +++ b/book_maker/translator/chatgptapi_translator.py @@ -24,6 +24,7 @@ class ChatGPTAPI(Base): api_base=None, prompt_template=None, prompt_sys_msg=None, + temperature=1.0, **kwargs, ) -> None: super().__init__(key, language) @@ -46,6 +47,7 @@ class ChatGPTAPI(Base): ) self.system_content = environ.get("OPENAI_API_SYS_MSG") or "" self.deployment_id = None + self.temperature = temperature def rotate_key(self): openai.api_key = next(self.keys) @@ -64,11 +66,13 @@ class ChatGPTAPI(Base): return openai.ChatCompletion.create( engine=self.deployment_id, messages=messages, + temperature=self.temperature, ) return openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, + temperature=self.temperature, ) def get_translation(self, text): diff --git a/book_maker/translator/claude_translator.py b/book_maker/translator/claude_translator.py index 958536c..5ca99e1 100644 --- a/book_maker/translator/claude_translator.py +++ b/book_maker/translator/claude_translator.py @@ -7,7 +7,13 @@ from .base_translator import Base class Claude(Base): def __init__( - self, key, language, api_base=None, prompt_template=None, **kwargs + self, + key, + language, + api_base=None, + prompt_template=None, + temperature=1.0, + **kwargs, ) -> None: super().__init__(key, language) self.api_url = ( @@ -23,7 +29,7 @@ class Claude(Base): "prompt": "", "model": "claude-v1.3", "max_tokens_to_sample": 1024, - "temperature": 1, + "temperature": temperature, "stop_sequences": ["\n\nHuman:"], } self.session = requests.session() diff --git a/book_maker/translator/gpt3_translator.py b/book_maker/translator/gpt3_translator.py index ae1bb8c..61f0dcf 100644 --- a/book_maker/translator/gpt3_translator.py +++ b/book_maker/translator/gpt3_translator.py @@ -7,7 +7,13 @@ from .base_translator import Base class GPT3(Base): def __init__( - self, key, language, api_base=None, prompt_template=None, **kwargs + self, + key, + language, + api_base=None, + prompt_template=None, + temperature=1.0, + **kwargs, ) -> None: super().__init__(key, language) self.api_url = ( @@ -23,7 +29,7 @@ class GPT3(Base): "prompt": "", "model": "text-davinci-003", "max_tokens": 1024, - "temperature": 1, + "temperature": temperature, "top_p": 1, } self.session = requests.session() diff --git a/book_maker/translator/gpt4_translator.py b/book_maker/translator/gpt4_translator.py index d0036b8..1573eca 100644 --- a/book_maker/translator/gpt4_translator.py +++ b/book_maker/translator/gpt4_translator.py @@ -25,6 +25,7 @@ class GPT4(Base): prompt_template=None, prompt_sys_msg=None, context_flag=False, + temperature=1.0, **kwargs, ) -> None: super().__init__(key, language) @@ -49,6 +50,7 @@ class GPT4(Base): ) self.system_content = environ.get("OPENAI_API_SYS_MSG") or "" self.deployment_id = None + self.temperature = temperature def rotate_key(self): openai.api_key = next(self.keys) @@ -75,11 +77,13 @@ class GPT4(Base): return openai.ChatCompletion.create( engine=self.deployment_id, messages=messages, + temperature=self.temperature, ) return openai.ChatCompletion.create( model="gpt-4", messages=messages, + temperature=self.temperature, ) def get_translation(self, text):