llm_rng_project/llm_rng/rng_generator.py
2025-01-13 15:07:30 +01:00

31 lines
1.1 KiB
Python

import hashlib
import ollama
from llm_rng.utils import map_hash_to_range
class LLMRNG:
def __init__(self, model_name="llama3.1"):
self.client = ollama.Client()
self.model_name = model_name
def generate_text(self, prompt: str) -> str:
"""Query the LLM with a given prompt to generate a response."""
response = self.client.generate(
model=self.model_name,
prompt=prompt,
options={
"num_predict": 50, # Limit output to 50 tokens
"temperature": 1.0, # Keep high temperature for randomness
"timeout": 10, # 10 second timeout
},
)
return response["response"].strip()
def generate_random_number(
self, prompt: str, range_min: int, range_max: int
) -> int:
"""Generate a pseudo-random number by hashing the LLM's output."""
text_output = self.generate_text(prompt)
hash_object = hashlib.sha256(text_output.encode())
return map_hash_to_range(hash_object.hexdigest(), range_min, range_max)