Add support for Llama2, Palm, Cohere, Replicate Models - using litellm (#317)

update readme
This commit is contained in:
Ishaan Jaff 2023-08-14 18:37:08 -07:00 committed by GitHub
parent 8035c18223
commit d8ad734888
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 57 additions and 0 deletions

View File

@ -1,10 +1,15 @@
**[中文](./README-CN.md) | English**
[![litellm](https://img.shields.io/badge/%20%F0%9F%9A%85%20liteLLM-OpenAI%7CAzure%7CAnthropic%7CPalm%7CCohere%7CReplicate%7CHugging%20Face-blue?color=green)](https://github.com/BerriAI/litellm)
# bilingual_book_maker
The bilingual_book_maker is an AI translation tool that uses ChatGPT to assist users in creating multi-language versions of epub/txt/srt files and books. This tool is exclusively designed for translating epub books that have entered the public domain and is not intended for copyrighted works. Before using this tool, please review the project's **[disclaimer](./disclaimer.md)**.
![image](https://user-images.githubusercontent.com/15976103/222317531-a05317c5-4eee-49de-95cd-04063d9539d9.png)
## Supported Models
gpt-4, gpt-3.5-turbo, claude-2, palm, llama-2, azure-openai, command-nightly
For using Non-OpenAI models, use class `liteLLM()` - liteLLM supports all models above.
Find more info here for using liteLLM: https://github.com/BerriAI/litellm/blob/main/setup.py
## Preparation

View File

@ -0,0 +1,51 @@
import re
import time
from copy import copy
from os import environ, linesep
from rich import print
import openai
from litellm import completion
from book_maker.translator.chatgptapi_translator import ChatGPTAPI
from .base_translator import Base
PROMPT_ENV_MAP = {
"user": "BBM_CHATGPTAPI_USER_MSG_TEMPLATE",
"system": "BBM_CHATGPTAPI_SYS_MSG",
}
class liteLLM(ChatGPTAPI):
def create_chat_completion(self, text):
# content = self.prompt_template.format(
# text=text, language=self.language, crlf="\n"
# )
content = f"{self.context if self.context_flag else ''} {self.prompt_template.format(text=text, language=self.language, crlf=linesep)}"
sys_content = self.system_content or self.prompt_sys_msg.format(crlf="\n")
context_sys_str = "For each passage given, you may be provided a summary of the story up until this point (wrapped in tags '<summary>' and '</summary>') for context within the query, to provide background context of the story up until this point. If it's provided, use the context summary to aid you in translation with deeper comprehension, and write a new summary above the returned translation, wrapped in '<summary>' HTML-like tags, including important details (if relevant) from the new passage, retaining the most important key details from the existing summary, and dropping out less important details. If the summary is blank, assume it is the start of the story and write a summary from scratch. Do not make the summary longer than a paragraph, and smaller details can be replaced based on the relative importance of new details. The summary should be formatted in straightforward, inornate text, briefly summarising the entire story (from the start, including information before the given passage, leading up to the given passage) to act as an instructional payload for a Large-Language AI Model to fully understand the context of the passage."
sys_content = f"{self.system_content or self.prompt_sys_msg.format(crlf=linesep)} {context_sys_str if self.context_flag else ''} "
messages = [
{"role": "system", "content": sys_content},
{"role": "user", "content": content},
]
if self.deployment_id:
return completion(
engine=self.deployment_id,
messages=messages,
temperature=self.temperature,
azure=True,
)
return completion(
model="gpt-3.5-turbo",
messages=messages,
temperature=self.temperature,
)

View File

@ -5,6 +5,7 @@ from setuptools import find_packages, setup
packages = [
"bs4",
"openai",
"litellm",
"requests",
"ebooklib",
"rich",