diff --git a/README.md b/README.md index f5327f4..8d9a590 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,6 @@ Open the `~/.memos/config.yaml` file with your preferred text editor and modify ```yaml embedding: - enabled: true use_local: true model: jinaai/jina-embeddings-v2-base-en # Model name used num_dim: 768 # Model dimensions @@ -153,7 +152,6 @@ Open the `~/.memos/config.yaml` file with your preferred text editor and modify ```yaml vlm: - enabled: true # Enable VLM feature endpoint: http://localhost:11434 # Ollama service address modelname: minicpm-v # Model name to use force_jpeg: true # Convert images to JPEG format to ensure compatibility diff --git a/README_ZH.md b/README_ZH.md index eb2cc63..abaa411 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -86,7 +86,6 @@ Pensieve 通过 embedding 模型来提取语义信息,并构建向量索引。 ```yaml embedding: - enabled: true use_local: true model: arkohut/jina-embeddings-v2-base-zh # 使用的模型名称 num_dim: 768 # 模型的维度 @@ -156,7 +155,6 @@ ollama run minicpm-v "描述一下这是什么服务" ```yaml vlm: - enabled: true # 启用 VLM 功能 endpoint: http://localhost:11434 # Ollama 服务地址 modelname: minicpm-v # 使用的模型名称 force_jpeg: true # 将图片转换为 JPEG 格式以确保兼容性 diff --git a/memos/default_config.yaml b/memos/default_config.yaml index 1bcb0f5..7598482 100644 --- a/memos/default_config.yaml +++ b/memos/default_config.yaml @@ -43,7 +43,6 @@ embedding: # using ollama embedding # embedding: -# enabled: true # endpoint: http://localhost:11434/api/embed # this is not used # model: arkohut/gte-qwen2-1.5b-instruct:q8_0 # num_dim: 1536