From 746845064814ab0a3fe51d9853586afb636800d6 Mon Sep 17 00:00:00 2001 From: tcsenpai Date: Wed, 23 Oct 2024 18:39:53 +0200 Subject: [PATCH] first commit --- .gitignore | 2 + README.md | 30 ++++++++++ env.example | 2 + requirements.txt | 3 + src/config.py | 10 ++++ src/main.py | 16 ++++++ src/ollama_service.py | 28 +++++++++ src/ui.py | 128 ++++++++++++++++++++++++++++++++++++++++++ system_prompt.txt | 8 +++ 9 files changed, 227 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 env.example create mode 100644 requirements.txt create mode 100644 src/config.py create mode 100644 src/main.py create mode 100644 src/ollama_service.py create mode 100644 src/ui.py create mode 100644 system_prompt.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..dc12cb7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.env +__pycache__ \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..c84d815 --- /dev/null +++ b/README.md @@ -0,0 +1,30 @@ +# OLLAMA Wikipedia-like Interface + +This Streamlit app provides a Wikipedia-like interface for querying an OLLAMA language model. Users can input their queries, and the app will generate informative responses using the configured OLLAMA model. + +## Setup + +1. Install the required dependencies: + ``` + pip install -r requirements.txt + ``` + +2. Configure the `.env` file with your OLLAMA API URL and model name. + +3. Run the Streamlit app: + ``` + streamlit run src/main.py + ``` + +4. Open your web browser and navigate to the URL provided by Streamlit (usually `http://localhost:8501`). + +## Usage + +1. Enter your query in the text area provided. +2. Click the "Submit" button to generate a response. +3. The app will display the OLLAMA-generated response in a Wikipedia-like format. + +## Configuration + +- Modify the `system_prompt.txt` file to change the system prompt sent to the OLLAMA model. +- Update the `.env` file to change the OLLAMA API URL or model name. \ No newline at end of file diff --git a/env.example b/env.example new file mode 100644 index 0000000..ad52295 --- /dev/null +++ b/env.example @@ -0,0 +1,2 @@ +OLLAMA_API_URL=http://localhost:11434/api/chat +OLLAMA_MODEL=llama3.1:8b \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..59104c9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +streamlit==1.22.0 +python-dotenv==1.0.0 +requests==2.31.0 \ No newline at end of file diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..ef2b1bf --- /dev/null +++ b/src/config.py @@ -0,0 +1,10 @@ +import os +from dotenv import load_dotenv + +load_dotenv() + +OLLAMA_API_URL = os.getenv("OLLAMA_API_URL") +OLLAMA_MODEL = os.getenv("OLLAMA_MODEL") + +with open("system_prompt.txt", "r") as f: + SYSTEM_PROMPT = f.read().strip() \ No newline at end of file diff --git a/src/main.py b/src/main.py new file mode 100644 index 0000000..6733389 --- /dev/null +++ b/src/main.py @@ -0,0 +1,16 @@ +import streamlit as st +from ui import render_wiki_interface, display_response +from ollama_service import chat_with_ollama + +def main(): + st.set_page_config(page_title="OLLAMA Wikipedia", page_icon="📚", layout="wide") + + user_query = render_wiki_interface() + + if user_query: + with st.spinner("Generating response..."): + response = chat_with_ollama(user_query) + display_response(user_query, response) + +if __name__ == "__main__": + main() diff --git a/src/ollama_service.py b/src/ollama_service.py new file mode 100644 index 0000000..0fba74f --- /dev/null +++ b/src/ollama_service.py @@ -0,0 +1,28 @@ +import requests +from config import OLLAMA_API_URL, OLLAMA_MODEL, SYSTEM_PROMPT + + +def chat_with_ollama(user_message): + messages = [ + {"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": user_message}, + ] + + try: + response = requests.post( + OLLAMA_API_URL, + json={ + "model": OLLAMA_MODEL, + "messages": messages, + "stream": False, + "temperature": 0.8, + "num_ctx": 8192, + # "mirostat": 1, + "num_predict": -2, + }, + ) + response.raise_for_status() + return response.json()["message"]["content"] + except requests.RequestException as e: + print(f"Error calling OLLAMA API: {e}") + return "An error occurred while processing your request." diff --git a/src/ui.py b/src/ui.py new file mode 100644 index 0000000..543e9ff --- /dev/null +++ b/src/ui.py @@ -0,0 +1,128 @@ +import streamlit as st + + +def set_wiki_style(): + st.markdown( + """ + + """, + unsafe_allow_html=True, + ) + + +def render_wiki_interface(): + set_wiki_style() + + # Create a container for the top bar + top_bar = st.container() + + with top_bar: + # Center the OLLAMA Wikipedia banner + st.markdown( + "

OLLAMA Wikipedia

", + unsafe_allow_html=True, + ) + + user_query = st.text_input("", placeholder="Search OLLAMA Wikipedia") + + search_button = st.button("Search") + + if search_button or ( + user_query and st.session_state.get("last_query") != user_query + ): + st.session_state["last_query"] = user_query + return user_query + + if not user_query: + display_intro() + + return None + + +def display_intro(): + st.title("Welcome to OLLAMA Wikipedia") + st.markdown( + """ + OLLAMA Wikipedia is an AI-powered encyclopedia that generates informative articles on a wide range of topics. + To get started: + + 1. Enter a topic or question in the search bar at the top. + 2. Click the "Search" button or press Enter. + 3. Wait for the AI to generate a comprehensive article based on your query. + + Please note that the information provided is generated by an AI model and should be verified with authoritative sources for critical use. + + Happy exploring! + """ + ) + + +def display_response(query, response): + st.title(query) + st.markdown("---") + st.markdown(response) + + # Add a fake "References" section + st.markdown('
', unsafe_allow_html=True) + st.markdown("## References") + st.markdown("1. OLLAMA Language Model (2023)") + st.markdown("2. AI-generated content based on user query") + st.markdown("
", unsafe_allow_html=True) + + # Add a fake "Categories" section + st.markdown('
', unsafe_allow_html=True) + st.markdown("Categories: AI-generated content | OLLAMA | Encyclopedia articles") + st.markdown("
", unsafe_allow_html=True) diff --git a/system_prompt.txt b/system_prompt.txt new file mode 100644 index 0000000..fdd8e10 --- /dev/null +++ b/system_prompt.txt @@ -0,0 +1,8 @@ +You are a digital encyclopedia like Wikipedia. +You will understand the user query and deliver a Wikipedia-like page about the subject or topic requested by the user. +You cannot add any other elements to the response. +Your aim is to provide a full lenght, informative Wikipedia-like page with all the details. +You will mimic the style of Wikipedia, including the use of headings, subheadings, and concise paragraphs. +You will not add any other elements to the response. +Before you start, think step by step (reasoning) about the task and the response. +Also double check your response for any errors or inconsistencies. \ No newline at end of file