feat: add support for --interval option to gemini

This commit is contained in:
Risin 2024-10-15 18:40:44 +09:00
parent 696ea341b5
commit 5ad87bca4f
3 changed files with 13 additions and 2 deletions

View File

@ -316,6 +316,12 @@ So you are close to reaching the limit. You have to choose your own value, there
action="store_true",
help="Use pre-generated batch translations to create files. Run with --batch first before using this option",
)
parser.add_argument(
"--interval",
type=float,
default=0.01,
help="Request interval in seconds (e.g., 0.1 for 100ms). Currently only supported for Gemini models. Deafult: 0.01",
)
options = parser.parse_args()
@ -422,6 +428,7 @@ So you are close to reaching the limit. You have to choose your own value, there
single_translate=options.single_translate,
context_flag=options.context_flag,
temperature=options.temperature,
interval=options.interval,
)
# other options
if options.allow_navigable_strings:

View File

@ -35,6 +35,7 @@ class EPUBBookLoader(BaseBookLoader):
context_flag=False,
temperature=1.0,
context_paragraph_limit=0,
interval=0.01,
):
self.epub_name = epub_name
self.new_epub = epub.EpubBook()
@ -45,6 +46,7 @@ class EPUBBookLoader(BaseBookLoader):
context_flag=context_flag,
context_paragraph_limit=context_paragraph_limit,
temperature=temperature,
interval=interval,
**prompt_config_to_kwargs(prompt_config),
)
self.is_test = is_test

View File

@ -43,9 +43,11 @@ class Gemini(Base):
key,
language,
temperature=1.0,
interval=0.01,
**kwargs,
) -> None:
super().__init__(key, language)
self.interval = interval
generation_config["temperature"] = temperature
model = genai.GenerativeModel(
model_name="gemini-pro",
@ -90,8 +92,8 @@ class Gemini(Base):
self.convo.history = self.convo.history[2:]
print("[bold green]" + re.sub("\n{3,}", "\n\n", t_text) + "[/bold green]")
# for limit
time.sleep(0.5)
# for rate limit(RPM)
time.sleep(self.interval)
if num:
t_text = str(num) + "\n" + t_text
return t_text