summary refs log tree commit diff
path: root/data
diff options
context:
space:
mode:
authorVika <vika@fireburn.ru>2024-09-04 19:49:26 +0300
committerVika <vika@fireburn.ru>2024-09-04 19:51:50 +0300
commita26610044483ddd323479d748af401382b8df210 (patch)
tree94b6509723210dde2302774bc7e4d18be240ab41 /data
parent2ac75574d5ac87b194834348e52a2267be23ebcd (diff)
downloadbowl-a26610044483ddd323479d748af401382b8df210.tar.zst
Smart Summary is now working!
There's no preferences dialog, so you can't really adjust the prompt
or the model it uses. The default settings work well for me. You may
want to tweak them depending on your model preferences and compute
budget. (Not many can afford to run Llama3-8B at high
quantization. Conversely, you might have a better GPU than me and wish
to run a 27B model or bigger.)
Diffstat (limited to 'data')
-rw-r--r--data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in48
1 files changed, 48 insertions, 0 deletions
diff --git a/data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in b/data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in
index 81e0b13..4cec9d1 100644
--- a/data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in
+++ b/data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in
@@ -1,5 +1,53 @@
 <?xml version="1.0" encoding="utf-8"?>
 <schemalist>
   <schema path="/xyz/vikanezrimaya/kittybox/Bowl/" id="@app-id@" gettext-domain="@gettext-package@">
+    <key name="llm-endpoint" type="s">
+      <default>"http://localhost:11434/"</default>
+      <summary>LLM API endpoint</summary>
+      <description>
+        Ollama API endpoint used to query an LLM for Smart Summary.
+      </description>
+    </key>
+    <key name="smart-summary-model" type="s">
+      <default>"llama3.1:8b-instruct-q8_0"</default>
+      <summary>Smart Summary LLM</summary>
+      <!-- TRANSLATORS: please keep the link intact -->
+      <description>
+        <![CDATA[
+        The model that Ollama will load to produce
+        summaries. Available models can be seen at
+        <a href="https://ollama.com/library">Ollama library</a>.
+        ]]>
+      </description>
+    </key>
+    <key name="smart-summary-system-prompt" type="s">
+      <default>"You are a helpful AI assistant embedded into a blog authoring tool. You will be provided with a text to summarize. Reply only, strictly with a one-sentence summary of the provided text, and don't write anything else."</default>
+      <summary>LLM system prompt</summary>
+      <description>
+        The system prompt provided to the LLM. For best results, it
+        should instruct the LLM to provide a one-sentence summary of
+        the document it receives.
+
+        The default system prompt is tested for Llama 3.1-8B and
+        should work for posts written mainly in English. Performance
+        with other languages is untested.
+      </description>
+    </key>
+    <key name="smart-summary-prompt-prefix" type="s">
+      <default>"Summarize the following text:"</default>
+      <summary>Smart Summary prompt prefix</summary>
+      <description>
+        What the text is prefixed with when pasted into the LLM prompt.
+
+        Something like "Summarize this text:" works well.
+      </description>
+    </key>
+    <key name="smart-summary-prompt-suffix" type="s">
+      <default>""</default>
+      <summary>Smart Summary prompt suffix</summary>
+      <description>
+        Append this to the prompt after the article text.
+      </description>
+    </key>
   </schema>
 </schemalist>