mirror of
https://github.com/tcsenpai/spacellama.git
synced 2025-07-24 02:20:10 +00:00
early support for further fact checking (not active)
This commit is contained in:
parent
7ab58c548f
commit
da5ef7a6fe
@ -48,6 +48,10 @@ The extension uses the `marked` library to render Markdown content in the summar
|
||||
|
||||
SpaceLLama processes web page content locally through your configured OLLAMA endpoint. No data is sent to external servers beyond what you configure. Always ensure you're using a trusted OLLAMA setup, especially if using a remote endpoint.
|
||||
|
||||
## FAQ
|
||||
|
||||
- If you get a 403 error, you probably need to set the environment variable `OLLAMA_ORIGINS` to "\*" on your ollama server. On Windows, you will have to set the environment variable in the `SYSTEM` environment, not just the `USER` environment.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions to SpaceLLama are welcome! Please feel free to submit issues, feature requests, or pull requests to help improve the extension.
|
||||
|
@ -156,6 +156,14 @@ async function summarizeChunk(
|
||||
}),
|
||||
});
|
||||
|
||||
// TODO Add bespoke-minicheck validation here
|
||||
// LINK https://ollama.com/library/bespoke-minicheck
|
||||
let factCheck = false;
|
||||
if (factCheck) {
|
||||
let bespokeResponse = await bespokeMinicheck(chunk, summary);
|
||||
console.log(bespokeResponse);
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(
|
||||
@ -191,3 +199,32 @@ function splitContentIntoChunks(content, maxTokens) {
|
||||
|
||||
return chunks;
|
||||
}
|
||||
|
||||
async function bespokeMinicheck(chunk, summary) {
|
||||
let bespoke_prompt = `
|
||||
Document: ${chunk}
|
||||
Claim: This is a correct summary of the document:\n\n ${summary},
|
||||
`;
|
||||
|
||||
let bespoke_body = {
|
||||
prompt: bespoke_prompt,
|
||||
model: "bespoke-minicheck:latest",
|
||||
stream: false,
|
||||
num_ctx: 30000, // Model is 32k but we want to leave some buffer
|
||||
options: {
|
||||
temperature: 0.0,
|
||||
num_predict: 2,
|
||||
},
|
||||
};
|
||||
|
||||
let bespoke_response = await fetch(endpoint, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify(bespoke_body),
|
||||
});
|
||||
// TODO Error handling
|
||||
let response_text = await bespoke_response.text();
|
||||
return response_text
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"manifest_version": 2,
|
||||
"name": "SpaceLLama",
|
||||
"version": "1.2",
|
||||
"version": "1.3",
|
||||
"description": "Summarize web pages using Ollama. Supports custom models, token limits, system prompts, chunking, and more. See https://github.com/tcsenpai/spacellama for more information.",
|
||||
"permissions": ["activeTab", "storage", "<all_urls>", "tabs"],
|
||||
"browser_action": {
|
||||
|
Loading…
x
Reference in New Issue
Block a user