add ai docs

This commit is contained in:
perf3ct 2025-04-15 20:18:29 -07:00
parent 3fec87106f
commit bbb382ef65
48 changed files with 755 additions and 2 deletions

View File

@ -10578,6 +10578,369 @@
}
]
},
{
"isClone": false,
"noteId": "LMAv4Uy3Wk6J",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J"
],
"title": "AI",
"notePosition": 320,
"prefix": null,
"isExpanded": false,
"type": "book",
"mime": "",
"attributes": [
{
"type": "label",
"name": "iconClass",
"value": "bx bx-bot",
"isInheritable": false,
"position": 10
},
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 20
},
{
"type": "label",
"name": "expanded",
"value": "",
"isInheritable": false,
"position": 30
}
],
"attachments": [],
"dirFileName": "AI",
"children": [
{
"isClone": false,
"noteId": "GBBMSlVSOIGP",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"GBBMSlVSOIGP"
],
"title": "Introduction",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [
{
"type": "relation",
"name": "internalLink",
"value": "vvUCN7FDkq7G",
"isInheritable": false,
"position": 10
}
],
"format": "markdown",
"dataFileName": "Introduction.md",
"attachments": [
{
"attachmentId": "4UpXwA3WvbmA",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "Introduction_image.png"
},
{
"attachmentId": "8Bn5IsE3Bv1k",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_Introduction_image.png"
},
{
"attachmentId": "ABN1rFIIJ8no",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "2_Introduction_image.png"
},
{
"attachmentId": "CK3z7sYw63XT",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "3_Introduction_image.png"
},
{
"attachmentId": "E6Y09N2t7vyA",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "4_Introduction_image.png"
},
{
"attachmentId": "JlIPeTtl5wlV",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "5_Introduction_image.png"
},
{
"attachmentId": "ur4TDJeRqpUC",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "6_Introduction_image.png"
},
{
"attachmentId": "UTH83LkQEA8u",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "7_Introduction_image.png"
},
{
"attachmentId": "V68TCCTUdyl7",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "8_Introduction_image.png"
},
{
"attachmentId": "YbWoNq58T9kB",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "9_Introduction_image.png"
}
]
},
{
"isClone": false,
"noteId": "WkM7gsEUyCXs",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs"
],
"title": "AI Provider Information",
"notePosition": 20,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 10
},
{
"type": "relation",
"name": "internalLink",
"value": "7EdTxPADv95W",
"isInheritable": false,
"position": 20
},
{
"type": "relation",
"name": "internalLink",
"value": "ZavFigBX9AwP",
"isInheritable": false,
"position": 30
},
{
"type": "relation",
"name": "internalLink",
"value": "e0lkirXEiSNc",
"isInheritable": false,
"position": 40
}
],
"format": "markdown",
"dataFileName": "AI Provider Information.md",
"attachments": [
{
"attachmentId": "BNN9Vv3JEf2X",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "AI Provider Information_im.png"
},
{
"attachmentId": "diIollN3KEbn",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_AI Provider Information_im.png"
}
],
"dirFileName": "AI Provider Information",
"children": [
{
"isClone": false,
"noteId": "7EdTxPADv95W",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"7EdTxPADv95W"
],
"title": "Ollama",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "book",
"mime": "",
"attributes": [
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 10
},
{
"type": "label",
"name": "expanded",
"value": "",
"isInheritable": false,
"position": 20
}
],
"attachments": [],
"dirFileName": "Ollama",
"children": [
{
"isClone": false,
"noteId": "vvUCN7FDkq7G",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"7EdTxPADv95W",
"vvUCN7FDkq7G"
],
"title": "Installing Ollama",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "Installing Ollama.md",
"attachments": [
{
"attachmentId": "CG9q2FfKuEsr",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "Installing Ollama_image.png"
},
{
"attachmentId": "GEcgXxUE1IDx",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_Installing Ollama_image.png"
},
{
"attachmentId": "OMGDDxjScXCl",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "2_Installing Ollama_image.png"
},
{
"attachmentId": "Qacg7ibmEBkZ",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "3_Installing Ollama_image.png"
},
{
"attachmentId": "vSjU929VnBm4",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "4_Installing Ollama_image.png"
},
{
"attachmentId": "xGrxARTj79Gv",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "5_Installing Ollama_image.png"
}
]
}
]
},
{
"isClone": false,
"noteId": "ZavFigBX9AwP",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"ZavFigBX9AwP"
],
"title": "OpenAI",
"notePosition": 20,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "OpenAI.md",
"attachments": []
},
{
"isClone": false,
"noteId": "e0lkirXEiSNc",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"e0lkirXEiSNc"
],
"title": "Anthropic",
"notePosition": 30,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "Anthropic.md",
"attachments": []
}
]
}
]
},
{
"isClone": false,
"noteId": "CdNpE2pqjmI6",
@ -10586,7 +10949,7 @@
"CdNpE2pqjmI6"
],
"title": "Scripting",
"notePosition": 320,
"notePosition": 330,
"prefix": null,
"isExpanded": false,
"type": "text",

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 167 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

View File

@ -0,0 +1,15 @@
# AI Provider Information
Currently, we support the following providers:
* <a class="reference-link" href="AI%20Provider%20Information/Ollama">Ollama</a>
* <a class="reference-link" href="AI%20Provider%20Information/OpenAI.md">OpenAI</a>
* <a class="reference-link" href="AI%20Provider%20Information/Anthropic.md">Anthropic</a>
* Voyage AI
To set your preferred chat model, you'll want to enter the provider's name here:
<figure class="image image_resized" style="width:88.38%;"><img style="aspect-ratio:1884/1267;" src="AI Provider Information_im.png" width="1884" height="1267"></figure>
And to set your preferred embedding provider:
<figure class="image image_resized" style="width:93.47%;"><img style="aspect-ratio:1907/1002;" src="1_AI Provider Information_im.png" width="1907" height="1002"></figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View File

@ -0,0 +1,25 @@
# Installing Ollama
[Ollama](https://ollama.com/) can be installed in a variety of ways, and even runs [within a Docker container](https://hub.docker.com/r/ollama/ollama). Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel), but it can run on CPU and RAM. To install Ollama without any other prerequisites, you can follow their [installer](https://ollama.com/download):
<figure class="image image_resized" style="width:50.49%;"><img style="aspect-ratio:785/498;" src="3_Installing Ollama_image.png" width="785" height="498"></figure><figure class="image image_resized" style="width:40.54%;"><img style="aspect-ratio:467/100;" src="Installing Ollama_image.png" width="467" height="100"></figure><figure class="image image_resized" style="width:55.73%;"><img style="aspect-ratio:1296/1011;" src="1_Installing Ollama_image.png" width="1296" height="1011"></figure>
After their installer completes, if you're on Windows, you should see an entry in the start menu to run it:
<figure class="image image_resized" style="width:66.12%;"><img style="aspect-ratio:1161/480;" src="2_Installing Ollama_image.png" width="1161" height="480"></figure>
Also, you should have access to the `ollama` CLI via Powershell or CMD:
<figure class="image image_resized" style="width:86.09%;"><img style="aspect-ratio:1730/924;" src="5_Installing Ollama_image.png" width="1730" height="924"></figure>
After Ollama is installed, you can go ahead and `pull` the models you want to use and run. Here's a command to pull my favorite tool-compatible model and embedding model as of April 2025:
```sh
ollama pull llama3.1:8b
ollama pull mxbai-embed-large
```
Also, you can make sure it's running by going to [http://localhost:11434](http://localhost:11434) and you should get the following response (port 11434 being the “normal” Ollama port):
<figure class="image"><img style="aspect-ratio:585/202;" src="4_Installing Ollama_image.png" width="585" height="202"></figure>
Now that you have Ollama up and running, have a few models pulled, you're ready to go to go ahead and start using Ollama as both a chat provider, and embedding provider!

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 198 KiB

View File

@ -0,0 +1,89 @@
# Introduction
<figure class="image image_resized" style="width:63.68%;"><img style="aspect-ratio:1363/1364;" src="Introduction_image.png" width="1363" height="1364"><figcaption>An example chat with an LLM</figcaption></figure>
The AI / LLM features within Trilium Notes are designed to allow you to interact with your Notes in a variety of ways, using as many of the major providers as we can support. 
In addition to being able to send chats to LLM providers such as OpenAI, Anthropic, and Ollama - we also support agentic tool calling, and embeddings.
The quickest way to get started is to navigate to the “AI/LLM” settings:
<figure class="image image_resized" style="width:74.04%;"><img style="aspect-ratio:1916/1906;" src="5_Introduction_image.png" width="1916" height="1906"></figure>
Enable the feature:
<figure class="image image_resized" style="width:82.82%;"><img style="aspect-ratio:1911/997;" src="1_Introduction_image.png" width="1911" height="997"></figure>
## Embeddings
**Embeddings** are important as it allows us to have an compact AI “summary” (it's not human readable text) of each of your Notes, that we can then perform mathematical functions on (such as cosine similarity) to smartly figure out which Notes to send as context to the LLM when you're chatting, among other useful functions.
You will then need to set up the AI “provider” that you wish to use to create the embeddings for your Notes. Currently OpenAI, Voyage AI, and Ollama are supported providers for embedding generation.
In the following example, we're going to use our self-hosted Ollama instance to create the embeddings for our Notes. You can see additional documentation about installing your own Ollama locally in <a class="reference-link" href="AI%20Provider%20Information/Ollama/Installing%20Ollama.md">Installing Ollama</a>.
To see what embedding models Ollama has available, you can check out [this search](https://ollama.com/search?c=embedding)on their website, and then `pull` whichever one you want to try out. As of 4/15/25, my personal favorite is `mxbai-embed-large`.
First, we'll need to select the Ollama provider from the tabs of providers, then we will enter in the Base URL for our Ollama. Since our Ollama is running on our local machine, our Base URL is `http://localhost:11434`. We will then hit the “refresh” button to have it fetch our models:
<figure class="image image_resized" style="width:82.28%;"><img style="aspect-ratio:1912/1075;" src="4_Introduction_image.png" width="1912" height="1075"></figure>
When selecting the dropdown for the “Embedding Model”, embedding models should be at the top of the list, separated by regular chat models with a horizontal line, as seen below:
<figure class="image image_resized" style="width:61.73%;"><img style="aspect-ratio:1232/959;" src="8_Introduction_image.png" width="1232" height="959"></figure>
After selecting an embedding model, embeddings should automatically begin to be generated by checking the embedding statistics at the top of the “AI/LLM” settings panel:
<figure class="image image_resized" style="width:67.06%;"><img style="aspect-ratio:1333/499;" src="7_Introduction_image.png" width="1333" height="499"></figure>
If you don't see any embeddings being created, you will want to scroll to the bottom of the settings, and hit “Recreate All Embeddings”:
<figure class="image image_resized" style="width:65.69%;"><img style="aspect-ratio:1337/1490;" src="3_Introduction_image.png" width="1337" height="1490"></figure>
Creating the embeddings will take some time, and will be regenerated when a Note is created, updated, or deleted (removed).
If for some reason you choose to change your embedding provider, or the model used, you'll need to recreate all embeddings.
## Tools
Tools are essentially functions that we provide to the various LLM providers, and then LLMs can respond in a specific format that tells us what tool function and parameters they would like to invoke. We then execute these tools, and provide it as additional context in the Chat conversation. 
These are the tools that currently exist, and will certainly be updated to be more effectively (and even more to be added!):
* `search_notes`
* Semantic search
* `keyword_search`
* Keyword-based search
* `attribute_search`
* Attribute-specific search
* `search_suggestion`
* Search syntax helper
* `read_note`
* Read note content (helps the LLM read Notes)
* `create_note`
* Create a Note
* `update_note`
* Update a Note
* `manage_attributes`
* Manage attributes on a Note
* `manage_relationships`
* Manage the various relationships between Notes
* `extract_content`
* Used to smartly extract content from a Note
* `calendar_integration`
* Used to find date notes, create date notes, get the daily note, etc.
When Tools are executed within your Chat, you'll see output like the following:
<figure class="image image_resized" style="width:66.88%;"><img style="aspect-ratio:1372/1591;" src="6_Introduction_image.png" width="1372" height="1591"></figure>
You don't need to tell the LLM to execute a certain tool, it should “smartly” call tools and automatically execute them as needed.
## Overview
Now that you know about embeddings and tools, you can just go ahead and use the “Chat with Notes” button, where you can go ahead and start chatting!:
<figure class="image image_resized" style="width:60.77%;"><img style="aspect-ratio:1378/539;" src="2_Introduction_image.png" width="1378" height="539"></figure>
If you don't see the “Chat with Notes” button on your side launchbar, you might need to move it from the “Available Launchers” section to the “Visible Launchers” section:
<figure class="image image_resized" style="width:69.81%;"><img style="aspect-ratio:1765/1287;" src="9_Introduction_image.png" width="1765" height="1287"></figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 167 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

View File

@ -0,0 +1,30 @@
<p>&nbsp;</p>
<p>Currently, we support the following providers:</p>
<ul>
<li><a class="reference-link" href="#root/pOsGYCXsbNQG/LMAv4Uy3Wk6J/WkM7gsEUyCXs/_help_7EdTxPADv95W">Ollama</a>
</li>
<li><a class="reference-link" href="#root/pOsGYCXsbNQG/LMAv4Uy3Wk6J/WkM7gsEUyCXs/_help_ZavFigBX9AwP">OpenAI</a>
</li>
<li><a class="reference-link" href="#root/pOsGYCXsbNQG/LMAv4Uy3Wk6J/WkM7gsEUyCXs/_help_e0lkirXEiSNc">Anthropic</a>
</li>
<li>Voyage AI</li>
</ul>
<p>&nbsp;</p>
<p>To set your preferred chat model, you'll want to enter the provider's
name here:</p>
<figure class="image image_resized" style="width:88.38%;">
<img style="aspect-ratio:1884/1267;" src="AI Provider Information_im.png"
width="1884" height="1267">
</figure>
<p>&nbsp;</p>
<p>&nbsp;</p>
<p>&nbsp;</p>
<p>And to set your preferred embedding provider:</p>
<figure class="image image_resized"
style="width:93.47%;">
<img style="aspect-ratio:1907/1002;" src="1_AI Provider Information_im.png"
width="1907" height="1002">
</figure>
<p>&nbsp;</p>
<p>&nbsp;</p>
<p>&nbsp;</p>

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View File

@ -0,0 +1,54 @@
<p>&nbsp;</p>
<p>&nbsp;</p>
<p><a href="https://ollama.com/">Ollama</a> can be installed in a variety
of ways, and even runs <a href="https://hub.docker.com/r/ollama/ollama">within a Docker container</a>.
Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel),
but it can run on CPU and RAM. To install Ollama without any other prerequisites,
you can follow their <a href="https://ollama.com/download">installer</a>:</p>
<figure
class="image image_resized" style="width:50.49%;">
<img style="aspect-ratio:785/498;" src="3_Installing Ollama_image.png"
width="785" height="498">
</figure>
<figure class="image image_resized" style="width:40.54%;">
<img style="aspect-ratio:467/100;" src="Installing Ollama_image.png" width="467"
height="100">
</figure>
<figure class="image image_resized" style="width:55.73%;">
<img style="aspect-ratio:1296/1011;" src="1_Installing Ollama_image.png"
width="1296" height="1011">
</figure>
<p>&nbsp;</p>
<p>After their installer completes, if you're on Windows, you should see
an entry in the start menu to run it:</p>
<figure class="image image_resized"
style="width:66.12%;">
<img style="aspect-ratio:1161/480;" src="2_Installing Ollama_image.png"
width="1161" height="480">
</figure>
<p>&nbsp;</p>
<p>Also, you should have access to the <code>ollama</code> CLI via Powershell
or CMD:</p>
<figure class="image image_resized" style="width:86.09%;">
<img style="aspect-ratio:1730/924;" src="5_Installing Ollama_image.png"
width="1730" height="924">
</figure>
<p>&nbsp;</p>
<p>After Ollama is installed, you can go ahead and <code>pull</code> the models
you want to use and run. Here's a command to pull my favorite tool-compatible
model and embedding model as of April 2025:</p><pre><code class="language-text-x-sh">ollama pull llama3.1:8b
ollama pull mxbai-embed-large</code></pre>
<p>&nbsp;</p>
<p>Also, you can make sure it's running by going to <a href="http://localhost:11434">http://localhost:11434</a> and
you should get the following response (port 11434 being the “normal” Ollama
port):</p>
<p>&nbsp;</p>
<figure class="image">
<img style="aspect-ratio:585/202;" src="4_Installing Ollama_image.png"
width="585" height="202">
</figure>
<p>&nbsp;</p>
<p>Now that you have Ollama up and running, have a few models pulled, you're
ready to go to go ahead and start using Ollama as both a chat provider,
and embedding provider!</p>
<p>&nbsp;</p>

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 198 KiB

View File

@ -0,0 +1,177 @@
<p>&nbsp;</p>
<figure class="image image_resized" style="width:63.68%;">
<img style="aspect-ratio:1363/1364;" src="Introduction_image.png" width="1363"
height="1364">
<figcaption>An example chat with an LLM</figcaption>
</figure>
<p>&nbsp;</p>
<p>The AI / LLM features within Trilium Notes are designed to allow you to
interact with your Notes in a variety of ways, using as many of the major
providers as we can support.&nbsp;</p>
<p>&nbsp;</p>
<p>In addition to being able to send chats to LLM providers such as OpenAI,
Anthropic, and Ollama - we also support agentic tool calling, and embeddings.</p>
<p>&nbsp;</p>
<p>The quickest way to get started is to navigate to the “AI/LLM” settings:</p>
<figure
class="image image_resized" style="width:74.04%;">
<img style="aspect-ratio:1916/1906;" src="5_Introduction_image.png" width="1916"
height="1906">
</figure>
<p>&nbsp;</p>
<p>Enable the feature:</p>
<figure class="image image_resized" style="width:82.82%;">
<img style="aspect-ratio:1911/997;" src="1_Introduction_image.png" width="1911"
height="997">
</figure>
<p>&nbsp;</p>
<h2>Embeddings</h2>
<p><strong>Embeddings</strong> are important as it allows us to have an compact
AI “summary” (it's not human readable text) of each of your Notes, that
we can then perform mathematical functions on (such as cosine similarity)
to smartly figure out which Notes to send as context to the LLM when you're
chatting, among other useful functions.</p>
<p>You will then need to set up the AI “provider” that you wish to use to
create the embeddings for your Notes. Currently OpenAI, Voyage AI, and
Ollama are supported providers for embedding generation.</p>
<p>In the following example, we're going to use our self-hosted Ollama instance
to create the embeddings for our Notes. You can see additional documentation
about installing your own Ollama locally in&nbsp;<a class="reference-link"
href="#root/jdjRLhLV3TtI/LMAv4Uy3Wk6J/7EdTxPADv95W/_help_vvUCN7FDkq7G">Installing Ollama</a>.</p>
<p>To see what embedding models Ollama has available, you can check out
<a
href="https://ollama.com/search?c=embedding">this search</a>on their website, and then <code>pull</code> whichever one
you want to try out. As of 4/15/25, my personal favorite is <code>mxbai-embed-large</code>.</p>
<p>First, we'll need to select the Ollama provider from the tabs of providers,
then we will enter in the Base URL for our Ollama. Since our Ollama is
running on our local machine, our Base URL is <code>http://localhost:11434</code>.
We will then hit the “refresh” button to have it fetch our models:</p>
<figure
class="image image_resized" style="width:82.28%;">
<img style="aspect-ratio:1912/1075;" src="4_Introduction_image.png" width="1912"
height="1075">
</figure>
<p>&nbsp;</p>
<p>When selecting the dropdown for the “Embedding Model”, embedding models
should be at the top of the list, separated by regular chat models with
a horizontal line, as seen below:</p>
<figure class="image image_resized"
style="width:61.73%;">
<img style="aspect-ratio:1232/959;" src="8_Introduction_image.png" width="1232"
height="959">
</figure>
<p>&nbsp;</p>
<p>After selecting an embedding model, embeddings should automatically begin
to be generated by checking the embedding statistics at the top of the
“AI/LLM” settings panel:</p>
<figure class="image image_resized" style="width:67.06%;">
<img style="aspect-ratio:1333/499;" src="7_Introduction_image.png" width="1333"
height="499">
</figure>
<p>&nbsp;</p>
<p>If you don't see any embeddings being created, you will want to scroll
to the bottom of the settings, and hit “Recreate All Embeddings”:</p>
<figure
class="image image_resized" style="width:65.69%;">
<img style="aspect-ratio:1337/1490;" src="3_Introduction_image.png" width="1337"
height="1490">
</figure>
<p>&nbsp;</p>
<p>Creating the embeddings will take some time, and will be regenerated when
a Note is created, updated, or deleted (removed).</p>
<p>If for some reason you choose to change your embedding provider, or the
model used, you'll need to recreate all embeddings.</p>
<p>&nbsp;</p>
<p>&nbsp;</p>
<h2>Tools</h2>
<p>Tools are essentially functions that we provide to the various LLM providers,
and then LLMs can respond in a specific format that tells us what tool
function and parameters they would like to invoke. We then execute these
tools, and provide it as additional context in the Chat conversation.&nbsp;</p>
<p>&nbsp;</p>
<p>These are the tools that currently exist, and will certainly be updated
to be more effectively (and even more to be added!):</p>
<ul>
<li><code>search_notes</code>
<ul>
<li>Semantic search</li>
</ul>
</li>
<li><code>keyword_search</code>
<ul>
<li>Keyword-based search</li>
</ul>
</li>
<li><code>attribute_search</code>
<ul>
<li>Attribute-specific search</li>
</ul>
</li>
<li><code>search_suggestion</code>
<ul>
<li>Search syntax helper</li>
</ul>
</li>
<li><code>read_note</code>
<ul>
<li>Read note content (helps the LLM read Notes)</li>
</ul>
</li>
<li><code>create_note</code>
<ul>
<li>Create a Note</li>
</ul>
</li>
<li><code>update_note</code>
<ul>
<li>Update a Note</li>
</ul>
</li>
<li><code>manage_attributes</code>
<ul>
<li>Manage attributes on a Note</li>
</ul>
</li>
<li><code>manage_relationships</code>
<ul>
<li>Manage the various relationships between Notes</li>
</ul>
</li>
<li><code>extract_content</code>
<ul>
<li>Used to smartly extract content from a Note</li>
</ul>
</li>
<li><code>calendar_integration</code>
<ul>
<li>Used to find date notes, create date notes, get the daily note, etc.</li>
</ul>
</li>
</ul>
<p>&nbsp;</p>
<p>When Tools are executed within your Chat, you'll see output like the following:</p>
<figure
class="image image_resized" style="width:66.88%;">
<img style="aspect-ratio:1372/1591;" src="6_Introduction_image.png" width="1372"
height="1591">
</figure>
<p>You don't need to tell the LLM to execute a certain tool, it should “smartly”
call tools and automatically execute them as needed.</p>
<p>&nbsp;</p>
<h2>Overview</h2>
<p>&nbsp;</p>
<p>Now that you know about embeddings and tools, you can just go ahead and
use the “Chat with Notes” button, where you can go ahead and start chatting!:</p>
<figure
class="image image_resized" style="width:60.77%;">
<img style="aspect-ratio:1378/539;" src="2_Introduction_image.png" width="1378"
height="539">
</figure>
<p>&nbsp;</p>
<p>If you don't see the “Chat with Notes” button on your side launchbar,
you might need to move it from the “Available Launchers” section to the
“Visible Launchers” section:</p>
<figure class="image image_resized" style="width:69.81%;">
<img style="aspect-ratio:1765/1287;" src="9_Introduction_image.png" width="1765"
height="1287">
</figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB