summary refs log tree commit diff
path: root/data/xyz.vikanezrimaya.kittybox.Bowl.gschema.xml.in
blob: c7eb98609a79ace1e49653e74a11147c0dfd4de6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
<?xml version="1.0" encoding="utf-8"?>
<schemalist>
  <schema path="/xyz/vikanezrimaya/kittybox/Bowl/" id="@app-id@" gettext-domain="@gettext-package@">
    <key name="send-html-directly" type="b">
      <default>false</default>
      <summary>Send post content as HTML</summary>
      <description>
        Some Micropub servers can preprocess plain-text content before
        posting. Enable this option to ask the Micropub server to
        treat your post content data as HTML and do not apply usual
        plain-text processing.

        This could be useful in case you wish to customize the post
        content using features not available in your Micropub server's
        preprocessor, or if your Micropub server lacks the ability to
        preprocess content entirely.
      </description>
    </key>
    <key name="llm-endpoint" type="s">
      <default>"http://localhost:11434/"</default>
      <summary>LLM API endpoint</summary>
      <description>
        Ollama API endpoint used to query an LLM for Smart Summary.
      </description>
    </key>
    <key name="smart-summary-model" type="s">
      <default>"llama3.1:8b-instruct-q8_0"</default>
      <summary>Smart Summary LLM</summary>
      <!-- TRANSLATORS: please keep the link intact -->
      <description>
        <![CDATA[
        The model that Ollama will load to produce
        summaries. Available models can be seen at
        <a href="https://ollama.com/library">Ollama library</a>.
        ]]>
      </description>
    </key>
    <key name="smart-summary-show-warning" type="b">
      <default>true</default>
      <summary>Show warnings on LLM enhancement features</summary>
      <description>
        If enabled, will show warnings regarding LLM enhancement
        features.
      </description>
    </key>
    <key name="smart-summary-system-prompt" type="s">
      <default>"You are a helpful AI assistant embedded into a blog authoring tool. You will be provided with a text to summarize. Reply only, strictly with a one-sentence summary of the provided text, and don't write anything else."</default>
      <summary>LLM system prompt</summary>
      <description>
        The system prompt provided to the LLM. For best results, it
        should instruct the LLM to provide a one-sentence summary of
        the document it receives.

        The default system prompt is tested for Llama 3.1-8B and
        should work for posts written mainly in English. Performance
        with other languages is untested.
      </description>
    </key>
    <key name="smart-summary-prompt-prefix" type="s">
      <default>"Summarize the following text:"</default>
      <summary>Smart Summary prompt prefix</summary>
      <description>
        What the text is prefixed with when pasted into the LLM prompt.

        Something like "Summarize this text:" works well.
      </description>
    </key>
    <key name="smart-summary-prompt-suffix" type="s">
      <default>""</default>
      <summary>Smart Summary prompt suffix</summary>
      <description>
        Append this to the prompt after the article text.
      </description>
    </key>
  </schema>
</schemalist>