first commit

This commit is contained in:
tcsenpai 2024-10-23 18:39:53 +02:00
commit 7468450648
9 changed files with 227 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.env
__pycache__

30
README.md Normal file
View File

@ -0,0 +1,30 @@
# OLLAMA Wikipedia-like Interface
This Streamlit app provides a Wikipedia-like interface for querying an OLLAMA language model. Users can input their queries, and the app will generate informative responses using the configured OLLAMA model.
## Setup
1. Install the required dependencies:
```
pip install -r requirements.txt
```
2. Configure the `.env` file with your OLLAMA API URL and model name.
3. Run the Streamlit app:
```
streamlit run src/main.py
```
4. Open your web browser and navigate to the URL provided by Streamlit (usually `http://localhost:8501`).
## Usage
1. Enter your query in the text area provided.
2. Click the "Submit" button to generate a response.
3. The app will display the OLLAMA-generated response in a Wikipedia-like format.
## Configuration
- Modify the `system_prompt.txt` file to change the system prompt sent to the OLLAMA model.
- Update the `.env` file to change the OLLAMA API URL or model name.

2
env.example Normal file
View File

@ -0,0 +1,2 @@
OLLAMA_API_URL=http://localhost:11434/api/chat
OLLAMA_MODEL=llama3.1:8b

3
requirements.txt Normal file
View File

@ -0,0 +1,3 @@
streamlit==1.22.0
python-dotenv==1.0.0
requests==2.31.0

10
src/config.py Normal file
View File

@ -0,0 +1,10 @@
import os
from dotenv import load_dotenv
load_dotenv()
OLLAMA_API_URL = os.getenv("OLLAMA_API_URL")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL")
with open("system_prompt.txt", "r") as f:
SYSTEM_PROMPT = f.read().strip()

16
src/main.py Normal file
View File

@ -0,0 +1,16 @@
import streamlit as st
from ui import render_wiki_interface, display_response
from ollama_service import chat_with_ollama
def main():
st.set_page_config(page_title="OLLAMA Wikipedia", page_icon="📚", layout="wide")
user_query = render_wiki_interface()
if user_query:
with st.spinner("Generating response..."):
response = chat_with_ollama(user_query)
display_response(user_query, response)
if __name__ == "__main__":
main()

28
src/ollama_service.py Normal file
View File

@ -0,0 +1,28 @@
import requests
from config import OLLAMA_API_URL, OLLAMA_MODEL, SYSTEM_PROMPT
def chat_with_ollama(user_message):
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": user_message},
]
try:
response = requests.post(
OLLAMA_API_URL,
json={
"model": OLLAMA_MODEL,
"messages": messages,
"stream": False,
"temperature": 0.8,
"num_ctx": 8192,
# "mirostat": 1,
"num_predict": -2,
},
)
response.raise_for_status()
return response.json()["message"]["content"]
except requests.RequestException as e:
print(f"Error calling OLLAMA API: {e}")
return "An error occurred while processing your request."

128
src/ui.py Normal file
View File

@ -0,0 +1,128 @@
import streamlit as st
def set_wiki_style():
st.markdown(
"""
<style>
.main {
background-color: #ffffff;
color: #202122;
font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Lato,Helvetica,Arial,sans-serif;
}
.stApp {
max-width: 1200px;
margin: 0 auto;
}
h1 {
font-family: 'Linux Libertine','Georgia','Times',serif;
color: #202122;
border-bottom: 1px solid #a2a9b1;
padding-bottom: 5px;
margin-bottom: 20px;
}
.stTextInput>div>div>input {
width: 70%;
}
.stButton>button {
background-color: #36c;
color: white;
width: 20%;
margin-left: auto;
}
p, li {
font-size: 14px;
line-height: 1.6;
color: #202122;
}
h2 {
font-family: 'Linux Libertine','Georgia','Times',serif;
border-bottom: 1px solid #a2a9b1;
padding-bottom: 5px;
color: #202122;
}
.references {
background-color: #f8f9fa;
border: 1px solid #a2a9b1;
padding: 10px;
margin-top: 20px;
}
.categories {
background-color: #f8f9fa;
border: 1px solid #a2a9b1;
padding: 10px;
margin-top: 20px;
}
span {
color: #202122;
-webkit-text-stroke: 0.7px white;
}
</style>
""",
unsafe_allow_html=True,
)
def render_wiki_interface():
set_wiki_style()
# Create a container for the top bar
top_bar = st.container()
with top_bar:
# Center the OLLAMA Wikipedia banner
st.markdown(
"<h1 style='text-align: center;'>OLLAMA Wikipedia</h1>",
unsafe_allow_html=True,
)
user_query = st.text_input("", placeholder="Search OLLAMA Wikipedia")
search_button = st.button("Search")
if search_button or (
user_query and st.session_state.get("last_query") != user_query
):
st.session_state["last_query"] = user_query
return user_query
if not user_query:
display_intro()
return None
def display_intro():
st.title("Welcome to OLLAMA Wikipedia")
st.markdown(
"""
OLLAMA Wikipedia is an AI-powered encyclopedia that generates informative articles on a wide range of topics.
To get started:
1. Enter a topic or question in the search bar at the top.
2. Click the "Search" button or press Enter.
3. Wait for the AI to generate a comprehensive article based on your query.
Please note that the information provided is generated by an AI model and should be verified with authoritative sources for critical use.
Happy exploring!
"""
)
def display_response(query, response):
st.title(query)
st.markdown("---")
st.markdown(response)
# Add a fake "References" section
st.markdown('<div class="references">', unsafe_allow_html=True)
st.markdown("## References")
st.markdown("1. OLLAMA Language Model (2023)")
st.markdown("2. AI-generated content based on user query")
st.markdown("</div>", unsafe_allow_html=True)
# Add a fake "Categories" section
st.markdown('<div class="categories">', unsafe_allow_html=True)
st.markdown("Categories: AI-generated content | OLLAMA | Encyclopedia articles")
st.markdown("</div>", unsafe_allow_html=True)

8
system_prompt.txt Normal file
View File

@ -0,0 +1,8 @@
You are a digital encyclopedia like Wikipedia.
You will understand the user query and deliver a Wikipedia-like page about the subject or topic requested by the user.
You cannot add any other elements to the response.
Your aim is to provide a full lenght, informative Wikipedia-like page with all the details.
You will mimic the style of Wikipedia, including the use of headings, subheadings, and concise paragraphs.
You will not add any other elements to the response.
Before you start, think step by step (reasoning) about the task and the response.
Also double check your response for any errors or inconsistencies.