updated token limits and model to be more modern

This commit is contained in:
tcsenpai 2025-03-11 12:06:09 +01:00
parent ad363a946a
commit 96a1882323
3 changed files with 46 additions and 29 deletions

View File

@ -1,7 +1,7 @@
{
"manifest_version": 2,
"name": "SpaceLLama",
"version": "1.4",
"version": "1.5",
"description": "Summarize web pages using Ollama. Supports custom models, token limits, system prompts, chunking, and more. See https://github.com/tcsenpai/spacellama for more information.",
"permissions": ["activeTab", "storage", "<all_urls>", "tabs"],
"browser_action": {

View File

@ -23,11 +23,16 @@
</div>
<div class="form-group">
<label for="model">OLLAMA Model:</label>
<input type="text" id="model" placeholder="llama2" />
<input type="text" id="model" placeholder="llama3.1:latest" />
</div>
<div class="form-group">
<label for="token-limit">Token Limit:</label>
<input type="number" id="token-limit" min="1000" step="1000" placeholder="4096" />
<input
type="number"
id="token-limit"
min="1024"
placeholder="16384"
/>
</div>
<button type="submit" class="btn btn-primary">Save Settings</button>
<div class="form-group">

View File

@ -1,5 +1,10 @@
let browser = (typeof chrome !== 'undefined') ? chrome : (typeof browser !== 'undefined') ? browser : null;
let browser =
typeof chrome !== "undefined"
? chrome
: typeof browser !== "undefined"
? browser
: null;
async function validateEndpoint(endpoint) {
try {
const response = await fetch(`${endpoint}/api/tags`);
@ -25,7 +30,7 @@ async function updateTokenLimit() {
if (model in modelTokens) {
tokenLimitInput.value = modelTokens[model];
} else {
tokenLimitInput.value = 4000; // Default value, modified from 4096 to meet even requirement
tokenLimitInput.value = 16384; // Default value
}
} catch (error) {
console.error("Error updating token limit:", error.message || error);
@ -34,7 +39,7 @@ async function updateTokenLimit() {
async function loadModelTokens() {
try {
const response = await fetch(browser.runtime.getURL('model_tokens.json'));
const response = await fetch(browser.runtime.getURL("model_tokens.json"));
return await response.json();
} catch (error) {
console.error("Error loading model tokens:", error.message || error);
@ -47,7 +52,7 @@ async function saveOptions(e) {
const model = document.getElementById("model").value;
const systemPrompt = document.getElementById("system-prompt").value;
const status = document.getElementById("status");
const tokenLimit = document.getElementById("token-limit").value || 4096;
const tokenLimit = document.getElementById("token-limit").value || 16384;
// Ensure the endpoint doesn't end with /api/generate
const cleanEndpoint = endpoint.replace(/\/api\/generate\/?$/, "");
status.textContent = "Validating endpoint...";
@ -67,7 +72,7 @@ async function saveOptions(e) {
}, 2000);
} else {
status.textContent =
"Invalid endpoint. Please check the URL and try again.";
"Invalid endpoint. Please check the URL and try again.";
}
} catch (error) {
console.error("Error saving options:", error.message || error);
@ -76,33 +81,40 @@ async function saveOptions(e) {
}
function restoreOptions() {
browser.storage.local.get({
ollamaEndpoint: "http://localhost:11434",
ollamaModel: "llama2",
systemPrompt: "You are a helpful AI assistant. Summarize the given text concisely.",
tokenLimit: 4096
}, function(result) {
document.getElementById("endpoint").value = result.ollamaEndpoint || "http://localhost:11434";
document.getElementById("model").value = result.ollamaModel || "llama2";
document.getElementById("system-prompt").value = result.systemPrompt || "You are a helpful AI assistant. Summarize the given text concisely.";
browser.storage.local.get(
{
ollamaEndpoint: "http://localhost:11434",
ollamaModel: "llama3.1:latest",
systemPrompt:
"You are a helpful AI assistant. Summarize the given text concisely.",
tokenLimit: 16384,
},
function (result) {
document.getElementById("endpoint").value =
result.ollamaEndpoint || "http://localhost:11434";
document.getElementById("model").value =
result.ollamaModel || "llama3.1:latest";
document.getElementById("system-prompt").value =
result.systemPrompt ||
"You are a helpful AI assistant. Summarize the given text concisely.";
// Call to updateTokenLimit remains async
updateTokenLimit().then(() => {
validateEndpoint(result.ollamaEndpoint).then(isValid => {
updateEndpointStatus(isValid);
// Call to updateTokenLimit remains async
updateTokenLimit().then(() => {
validateEndpoint(result.ollamaEndpoint).then((isValid) => {
updateEndpointStatus(isValid);
});
});
});
});
}
);
}
document.addEventListener("DOMContentLoaded", restoreOptions);
document
.getElementById("settings-form")
.addEventListener("submit", saveOptions);
document.getElementById("endpoint").addEventListener("blur", async (e) => {
const isValid = await validateEndpoint(e.target.value);
updateEndpointStatus(isValid);
const isValid = await validateEndpoint(e.target.value);
updateEndpointStatus(isValid);
document.getElementById("model").addEventListener("change", updateTokenLimit);
});
document.getElementById("model").addEventListener("change", updateTokenLimit);
});