about summary refs log tree commit diff
diff options
context:
space:
mode:
authorVika <vika@fireburn.ru>2023-07-09 01:39:58 +0300
committerVika <vika@fireburn.ru>2023-07-09 01:39:58 +0300
commita863a2b27902d2d8b87dae07c03f94e96d06d92b (patch)
tree960487de597f02f28b44d77966189d86d885e43c
parent63148c502c11fcbe99f335c5d214fba84eda1c1c (diff)
downloadkittybox-a863a2b27902d2d8b87dae07c03f94e96d06d92b.tar.zst
Implement Postgres backend
A single giga-commit that took me weeks to produce. I know, this is
not exactly the best thing ever — but I wanted to experiment first
before "committing" to the implementation, so that I would produce the
best solution.
-rw-r--r--configuration.nix11
-rw-r--r--flake.nix2
-rwxr-xr-xkittybox-rs/dev.sh2
-rw-r--r--kittybox-rs/migrations/0001_init.sql62
-rw-r--r--kittybox-rs/src/database/mod.rs29
-rw-r--r--kittybox-rs/src/database/postgres/mod.rs360
-rw-r--r--kittybox-rs/src/main.rs51
-rw-r--r--kittybox.nix15
-rw-r--r--postgres-smoke-test.nix49
9 files changed, 577 insertions, 4 deletions
diff --git a/configuration.nix b/configuration.nix
index b3b7ada..937b1c8 100644
--- a/configuration.nix
+++ b/configuration.nix
@@ -44,8 +44,9 @@ in {
         example = "redis://192.168.1.200:6379";
         description = lib.mdDoc ''
           Set the backend used for storing data. Available backends are:
-           - `file://` - static folder backend (recommended)
-           - `redis://` - Redis backend (currently unavailable)
+           - `postgres://` - PostgreSQL backend (recommended)
+           - `file://`     - static folder backend
+           - `redis://`    - Redis backend (currently unavailable)
 
           Make sure that if you are using the file backend, the state
           directory is accessible by Kittybox. By default, the unit config
@@ -114,6 +115,12 @@ in {
     };
   };
   config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = lib.strings.hasPrefix cfg.backendUri "postgres://" -> cfg.package.hasPostgres;
+        message = "To use the Postgres backend, Kittybox has to be compiled with Postgres support enabled.";
+      }
+    ];
     systemd.services.kittybox = {
       description = "An IndieWeb-enabled blog engine";
 
diff --git a/flake.nix b/flake.nix
index bb4ef65..e258dc4 100644
--- a/flake.nix
+++ b/flake.nix
@@ -34,6 +34,7 @@
         inherit (pkgs.nodePackages) typescript;
         nixosTests = {
           smokeTest = self.checks.${system}.nixos-test;
+          postgresSmokeTest = self.checks.${system}.nixos-test-postgres;
         };
       };
       default = self.packages.${system}.kittybox;
@@ -43,6 +44,7 @@
       kittybox = self.packages.${system}.kittybox;
       distributed-test = pkgs.nixosTest (import ./distributed-test.nix self);
       nixos-test = pkgs.nixosTest (import ./smoke-test.nix self);
+      nixos-test-postgres = pkgs.nixosTest (import ./postgres-smoke-test.nix self);
       dockerContainer = pkgs.callPackage ./docker.nix {
         inherit (self.packages.${system}) kittybox;
         rev = self.rev or "development";
diff --git a/kittybox-rs/dev.sh b/kittybox-rs/dev.sh
index 8a15d58..65d6143 100755
--- a/kittybox-rs/dev.sh
+++ b/kittybox-rs/dev.sh
@@ -1,5 +1,5 @@
 #!/bin/sh
-export RUST_LOG="kittybox=debug,retainer::cache=warn,h2=info,rustls=info,tokio=info,tower_http::trace=debug"
+export RUST_LOG="kittybox=debug,retainer::cache=warn,h2=info,rustls=info,tokio=info,tower_http::trace=debug,sqlx=trace"
 #export BACKEND_URI=file://./test-dir
 export BACKEND_URI="postgres://localhost?dbname=kittybox&host=/run/postgresql"
 export BLOBSTORE_URI=file://./media-store
diff --git a/kittybox-rs/migrations/0001_init.sql b/kittybox-rs/migrations/0001_init.sql
new file mode 100644
index 0000000..c9915eb
--- /dev/null
+++ b/kittybox-rs/migrations/0001_init.sql
@@ -0,0 +1,62 @@
+CREATE SCHEMA IF NOT EXISTS kittybox;
+
+CREATE TABLE kittybox.users (
+    user_domain TEXT NOT NULL PRIMARY KEY,
+    site_name   JSONB NOT NULL DEFAULT '"Kittybox"'::jsonb,
+    webring     JSONB NOT NULL DEFAULT 'false'::jsonb
+);
+
+CREATE TABLE kittybox.mf2_json (
+    uid TEXT NOT NULL PRIMARY KEY,
+    mf2 JSONB NOT NULL,
+    owner TEXT NOT NULL -- REFERENCES kittybox.users(user_domain)
+);
+
+CREATE INDEX mf2props ON kittybox.mf2_json USING GIN (mf2);
+CREATE INDEX published_date ON kittybox.mf2_json ((mf2 #>> '{properties,published,0}'));
+
+CREATE TABLE kittybox.children (
+    parent TEXT NOT NULL REFERENCES kittybox.mf2_json(uid) ON DELETE CASCADE,
+    child  TEXT NOT NULL REFERENCES kittybox.mf2_json(uid) ON DELETE CASCADE,
+    UNIQUE(parent, child)
+);
+
+CREATE INDEX fulltext ON kittybox.mf2_json USING GIN (
+    to_tsvector('english', mf2['properties']['content'])
+);
+
+CREATE FUNCTION kittybox.set_setting(user_domain text, setting text, val anyelement) RETURNS void AS $$
+BEGIN
+EXECUTE format('INSERT INTO kittybox.users (user_domain, %I) VALUES ($1, $2) ON CONFLICT (user_domain) DO UPDATE SET %I = $2', setting, setting)
+    USING user_domain, val;
+    RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE FUNCTION kittybox.get_setting(user_domain text, setting text) RETURNS jsonb AS $$
+DECLARE
+  val jsonb;
+BEGIN
+EXECUTE format('SELECT %I FROM kittybox.users WHERE user_domain = $1', setting) USING user_domain INTO val;
+
+RETURN val;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE FUNCTION kittybox.hydrate_author(mf2 jsonb) RETURNS jsonb AS $$
+DECLARE
+  author jsonb;
+  author_uid text;
+BEGIN
+
+author_uid := mf2 #>> '{properties,author,0}';
+IF NOT (author_uid IS NULL) THEN
+   SELECT mf2_json.mf2 INTO author FROM kittybox.mf2_json WHERE uid = author_uid;
+END IF;
+IF NOT FOUND THEN
+    RETURN mf2;
+ELSE
+    RETURN jsonb_set(mf2, '{properties,author,0}', author);
+END IF;
+END;
+$$ LANGUAGE plpgsql;
diff --git a/kittybox-rs/src/database/mod.rs b/kittybox-rs/src/database/mod.rs
index 3086623..231fd26 100644
--- a/kittybox-rs/src/database/mod.rs
+++ b/kittybox-rs/src/database/mod.rs
@@ -6,6 +6,11 @@ use async_trait::async_trait;
 mod file;
 pub use crate::database::file::FileStorage;
 use crate::micropub::MicropubUpdate;
+#[cfg(feature = "postgres")]
+mod postgres;
+#[cfg(feature = "postgres")]
+pub use postgres::PostgresStorage;
+
 #[cfg(test)]
 mod memory;
 #[cfg(test)]
@@ -649,5 +654,29 @@ mod tests {
         };
     }
 
+    macro_rules! postgres_test {
+        ($func_name:ident) => {
+            #[cfg(feature = "sqlx")]
+            #[sqlx::test]
+            #[tracing_test::traced_test]
+            async fn $func_name(
+                pool_opts: sqlx::postgres::PgPoolOptions,
+                mut connect_opts: sqlx::postgres::PgConnectOptions
+            ) -> Result<(), sqlx::Error> {
+                use sqlx::ConnectOptions;
+
+                let db = {
+                    //connect_opts.log_statements(log::LevelFilter::Debug);
+
+                    pool_opts.connect_with(connect_opts).await?
+                };
+                let backend = super::super::PostgresStorage::from_pool(db).await.unwrap();
+
+                Ok(super::$func_name(backend).await)
+            }
+        };
+    }
+
     test_all!(file_test, file);
+    test_all!(postgres_test, postgres);
 }
diff --git a/kittybox-rs/src/database/postgres/mod.rs b/kittybox-rs/src/database/postgres/mod.rs
new file mode 100644
index 0000000..4ac2abe
--- /dev/null
+++ b/kittybox-rs/src/database/postgres/mod.rs
@@ -0,0 +1,360 @@
+#![allow(unused_variables)]
+use std::borrow::Cow;
+use std::str::FromStr;
+
+use kittybox_util::MicropubChannel;
+use sqlx::PgPool;
+use crate::micropub::{MicropubUpdate, MicropubPropertyDeletion};
+
+use super::settings::Setting;
+use super::{Storage, Result, StorageError, ErrorKind};
+
+static MIGRATOR: sqlx::migrate::Migrator = sqlx::migrate!();
+
+impl From<sqlx::Error> for StorageError {
+    fn from(value: sqlx::Error) -> Self {
+        Self::with_source(
+            super::ErrorKind::Backend,
+            Cow::Owned(format!("sqlx error: {}", &value)),
+            Box::new(value)
+        )
+    }
+}
+
+impl From<sqlx::migrate::MigrateError> for StorageError {
+    fn from(value: sqlx::migrate::MigrateError) -> Self {
+        Self::with_source(
+            super::ErrorKind::Backend,
+            Cow::Owned(format!("sqlx migration error: {}", &value)),
+            Box::new(value)
+        )
+    }
+}
+
+#[derive(Debug, Clone)]
+pub struct PostgresStorage {
+    db: PgPool
+}
+
+impl PostgresStorage {
+    /// Construct a new [`PostgresStorage`] from an URI string and run
+    /// migrations on the database.
+    ///
+    /// If `PGPASS_FILE` environment variable is defined, read the
+    /// password from the file at the specified path. If, instead,
+    /// the `PGPASS` environment variable is present, read the
+    /// password from it.
+    pub async fn new(uri: &str) -> Result<Self> {
+        tracing::debug!("Postgres URL: {uri}");
+        let mut options = sqlx::postgres::PgConnectOptions::from_str(uri)?;
+        if let Ok(password_file) = std::env::var("PGPASS_FILE") {
+            let password = tokio::fs::read_to_string(password_file).await.unwrap();
+            options = options.password(&password);
+        } else if let Ok(password) = std::env::var("PGPASS") {
+            options = options.password(&password)
+        }
+        Self::from_pool(
+            sqlx::postgres::PgPoolOptions::new()
+                .max_connections(50)
+                .connect_with(options)
+                .await?
+        ).await
+        
+    }
+
+    /// Construct a [`PostgresStorage`] from a [`sqlx::PgPool`],
+    /// running appropriate migrations.
+    pub async fn from_pool(db: sqlx::PgPool) -> Result<Self> {
+        MIGRATOR.run(&db).await?;
+        Ok(Self { db })
+    }
+}
+
+#[async_trait::async_trait]
+impl Storage for PostgresStorage {
+    #[tracing::instrument(skip(self))]
+    async fn post_exists(&self, url: &str) -> Result<bool> {
+        sqlx::query_as::<_, (bool,)>("SELECT exists(SELECT 1 FROM kittybox.mf2_json WHERE uid = $1 OR mf2['properties']['url'] ? $1)")
+            .bind(url)
+            .fetch_one(&self.db)
+            .await
+            .map(|v| v.0)
+            .map_err(|err| err.into())
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn get_post(&self, url: &str) -> Result<Option<serde_json::Value>> {
+        sqlx::query_as::<_, (serde_json::Value,)>("SELECT mf2 FROM kittybox.mf2_json WHERE uid = $1 OR mf2['properties']['url'] ? $1")
+            .bind(url)
+            .fetch_optional(&self.db)
+            .await
+            .map(|v| v.map(|v| v.0))
+            .map_err(|err| err.into())
+
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn put_post(&self, post: &'_ serde_json::Value, user: &'_ str) -> Result<()> {
+        tracing::debug!("New post: {}", post);
+        sqlx::query("INSERT INTO kittybox.mf2_json (uid, mf2, owner) VALUES ($1 #>> '{properties,uid,0}', $1, $2)")
+            .bind(post)
+            .bind(user)
+            .execute(&self.db)
+            .await
+            .map(|_| ())
+            .map_err(Into::into)
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn add_to_feed(&self, feed: &'_ str, post: &'_ str) -> Result<()> {
+        tracing::debug!("Inserting {} into {}", post, feed);
+        sqlx::query("INSERT INTO kittybox.children (parent, child) VALUES ($1, $2) ON CONFLICT DO NOTHING")
+            .bind(feed)
+            .bind(post)
+            .execute(&self.db)
+            .await
+            .map(|_| ())
+            .map_err(Into::into)
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn remove_from_feed(&self, feed: &'_ str, post: &'_ str) -> Result<()> {
+        sqlx::query("DELETE FROM kittybox.children WHERE parent = $1 AND child = $2")
+            .bind(feed)
+            .bind(post)
+            .execute(&self.db)
+            .await
+            .map_err(Into::into)
+            .map(|_| ())
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn update_post(&self, url: &'_ str, update: MicropubUpdate) -> Result<()> {
+        tracing::debug!("Updating post {}", url);
+        let mut txn = self.db.begin().await?;
+        let (uid, mut post) = sqlx::query_as::<_, (String, serde_json::Value)>("SELECT uid, mf2 FROM kittybox.mf2_json WHERE uid = $1 OR mf2['properties']['url'] ? $1 FOR UPDATE")
+            .bind(url)
+            .fetch_optional(&mut txn)
+            .await?
+            .ok_or(StorageError::from_static(
+                ErrorKind::NotFound,
+                "The specified post wasn't found in the database."
+            ))?;
+
+        if let Some(MicropubPropertyDeletion::Properties(ref delete)) = update.delete {
+            if let Some(props) = post["properties"].as_object_mut() {
+                for key in delete {
+                    props.remove(key);
+                }
+            }
+        } else if let Some(MicropubPropertyDeletion::Values(ref delete)) = update.delete {
+            if let Some(props) = post["properties"].as_object_mut() {
+                for (key, values) in delete {
+                    if let Some(prop) = props.get_mut(key).and_then(serde_json::Value::as_array_mut) {
+                        prop.retain(|v| { values.iter().all(|i| i != v) })
+                    }
+                }
+            }
+        }
+        if let Some(replace) = update.replace {
+            if let Some(props) = post["properties"].as_object_mut() {
+                for (key, value) in replace {
+                    props.insert(key, serde_json::Value::Array(value));
+                }
+            }
+        }
+        if let Some(add) = update.add {
+            if let Some(props) = post["properties"].as_object_mut() {
+                for (key, value) in add {
+                    if let Some(prop) = props.get_mut(&key).and_then(serde_json::Value::as_array_mut) {
+                        prop.extend_from_slice(value.as_slice());
+                    } else {
+                        props.insert(key, serde_json::Value::Array(value));
+                    }
+                }
+            }
+        }
+
+        sqlx::query("UPDATE kittybox.mf2_json SET mf2 = $2 WHERE uid = $1")
+            .bind(uid)
+            .bind(post)
+            .execute(&mut txn)
+            .await?;
+            
+        txn.commit().await.map_err(Into::into)
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn get_channels(&self, user: &'_ str) -> Result<Vec<MicropubChannel>> {
+        /*sqlx::query_as::<_, MicropubChannel>("SELECT name, uid FROM kittybox.channels WHERE owner = $1")
+            .bind(user)
+            .fetch_all(&self.db)
+            .await
+            .map_err(|err| err.into())*/
+        sqlx::query_as::<_, MicropubChannel>(r#"SELECT mf2 #>> '{properties,name,0}' as name, uid FROM kittybox.mf2_json WHERE '["h-feed"]'::jsonb @> mf2['type'] AND owner = $1"#)
+            .bind(user)
+            .fetch_all(&self.db)
+            .await
+            .map_err(|err| err.into())
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn read_feed_with_limit(
+        &self,
+        url: &'_ str,
+        after: &'_ Option<String>,
+        limit: usize,
+        user: &'_ Option<String>,
+    ) -> Result<Option<serde_json::Value>> {
+        let mut feed = match sqlx::query_as::<_, (serde_json::Value,)>("
+SELECT jsonb_set(
+    mf2,
+    '{properties,author,0}',
+    (SELECT mf2 FROM kittybox.mf2_json
+     WHERE uid = mf2 #>> '{properties,author,0}')
+) FROM kittybox.mf2_json WHERE uid = $1
+")
+            .bind(url)
+            .fetch_optional(&self.db)
+            .await?
+            .map(|v| v.0)
+        {
+            Some(feed) => feed,
+            None => return Ok(None)
+        };
+
+        let posts: Vec<String> = {
+            let mut posts_iter = feed["children"]
+                .as_array()
+                .cloned()
+                .unwrap_or_default()
+                .into_iter()
+                .map(|s| s.as_str().unwrap().to_string());
+            if let Some(after) = after {
+                for s in posts_iter.by_ref() {
+                    if &s == after {
+                        break;
+                    }
+                }
+            };
+
+            posts_iter.take(limit).collect::<Vec<_>>()
+        };
+        feed["children"] = serde_json::Value::Array(
+            sqlx::query_as::<_, (serde_json::Value,)>("
+SELECT jsonb_set(
+    mf2,
+    '{properties,author,0}',
+    (SELECT mf2 FROM kittybox.mf2_json
+     WHERE uid = mf2 #>> '{properties,author,0}')
+) FROM kittybox.mf2_json
+WHERE uid = ANY($1)
+ORDER BY mf2 #>> '{properties,published,0}' DESC
+")
+                .bind(&posts[..])
+                .fetch_all(&self.db)
+                .await?
+                .into_iter()
+                .map(|v| v.0)
+                .collect::<Vec<_>>()
+        );
+
+        Ok(Some(feed))
+            
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn read_feed_with_cursor(
+        &self,
+        url: &'_ str,
+        cursor: Option<&'_ str>,
+        limit: usize,
+        user: Option<&'_ str>
+    ) -> Result<Option<(serde_json::Value, Option<String>)>> {
+        let mut txn = self.db.begin().await?;
+        sqlx::query("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY")
+			.execute(&mut txn)
+			.await?;
+        tracing::debug!("Started txn: {:?}", txn);
+        let mut feed = match sqlx::query_scalar::<_, serde_json::Value>("
+SELECT kittybox.hydrate_author(mf2) FROM kittybox.mf2_json WHERE uid = $1
+")
+            .bind(url)
+            .fetch_optional(&mut txn)
+            .await?
+        {
+            Some(feed) => feed,
+            None => return Ok(None)
+        };
+
+        feed["children"] = sqlx::query_scalar::<_, serde_json::Value>("
+SELECT kittybox.hydrate_author(mf2) FROM kittybox.mf2_json
+INNER JOIN kittybox.children
+ON mf2_json.uid = children.child
+WHERE
+    children.parent = $1
+    AND (
+        (
+          (mf2 #>> '{properties,visibility,0}') = 'public'
+          OR
+          NOT (mf2['properties'] ? 'visibility')
+        )
+        OR
+        (
+            $3 != null AND (
+                mf2['properties']['audience'] ? $3
+                OR mf2['properties']['author'] ? $3
+            )
+        )
+    )
+    AND ($4 IS NULL OR ((mf2_json.mf2 #>> '{properties,published,0}') < $4))
+ORDER BY (mf2_json.mf2 #>> '{properties,published,0}') DESC
+LIMIT $2"
+        )
+            .bind(url)
+            .bind(limit as i64)
+            .bind(user)
+            .bind(cursor)
+            .fetch_all(&mut txn)
+            .await
+            .map(serde_json::Value::Array)?;
+
+        let new_cursor = feed["children"].as_array().unwrap()
+            .last()
+            .map(|v| v["properties"]["published"][0].as_str().unwrap().to_owned());
+
+        txn.commit().await?;
+
+        Ok(Some((feed, new_cursor)))
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn delete_post(&self, url: &'_ str) -> Result<()> {
+        todo!()
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn get_setting<S: Setting<'a>, 'a>(&'_ self, user: &'_ str) -> Result<S> {
+        match sqlx::query_as::<_, (serde_json::Value,)>("SELECT kittybox.get_setting($1, $2)")
+            .bind(user)
+            .bind(S::ID)
+            .fetch_one(&self.db)
+            .await
+        {
+            Ok((value,)) => Ok(serde_json::from_value(value)?),
+            Err(err) => Err(err.into())
+        }
+    }
+
+    #[tracing::instrument(skip(self))]
+    async fn set_setting<S: Setting<'a> + 'a, 'a>(&self, user: &'a str, value: S::Data) -> Result<()> {
+        sqlx::query("SELECT kittybox.set_setting($1, $2, $3)")
+            .bind(user)
+            .bind(S::ID)
+            .bind(serde_json::to_value(S::new(value)).unwrap())
+            .execute(&self.db)
+            .await
+            .map_err(Into::into)
+            .map(|_| ())
+    }
+}
diff --git a/kittybox-rs/src/main.rs b/kittybox-rs/src/main.rs
index 68d3b01..7131200 100644
--- a/kittybox-rs/src/main.rs
+++ b/kittybox-rs/src/main.rs
@@ -78,7 +78,58 @@ where A: kittybox::indieauth::backend::AuthBackend
                 )
         },
         "redis" => unimplemented!("Redis backend is not supported."),
+        #[cfg(feature = "postgres")]
+        "postgres" => {
+            use kittybox::database::PostgresStorage;
 
+            let database = {
+                match PostgresStorage::new(backend_uri).await {
+                    Ok(db) => db,
+                    Err(err) => {
+                        error!("Error creating database: {:?}", err);
+                        std::process::exit(1);
+                    }
+                }
+            };
+
+            // Technically, if we don't construct the micropub router,
+            // we could use some wrapper that makes the database
+            // read-only.
+            //
+            // This would allow to exclude all code to write to the
+            // database and separate reader and writer processes of
+            // Kittybox to improve security.
+            let homepage: axum::routing::MethodRouter<_> = axum::routing::get(
+                kittybox::frontend::homepage::<PostgresStorage>
+            )
+                .layer(axum::Extension(database.clone()));
+            let fallback = axum::routing::get(
+                kittybox::frontend::catchall::<PostgresStorage>
+            )
+                .layer(axum::Extension(database.clone()));
+
+            let micropub = kittybox::micropub::router(
+                database.clone(),
+                http.clone(),
+                auth_backend.clone()
+            );
+            let onboarding = kittybox::frontend::onboarding::router(
+                database.clone(), http.clone()
+            );
+
+            axum::Router::new()
+                .route("/", homepage)
+                .fallback(fallback)
+                .route("/.kittybox/micropub", micropub)
+                .route("/.kittybox/onboarding", onboarding)
+                .nest("/.kittybox/media", init_media(auth_backend.clone(), blobstore_uri))
+                .merge(kittybox::indieauth::router(auth_backend.clone(), database.clone(), http.clone()))
+                .route(
+                    "/.kittybox/health",
+                    axum::routing::get(health_check::<kittybox::database::PostgresStorage>)
+                        .layer(axum::Extension(database))
+                )
+        },
         other => unimplemented!("Unsupported backend: {other}")
     }
 }
diff --git a/kittybox.nix b/kittybox.nix
index d7577fd..ccf3ea8 100644
--- a/kittybox.nix
+++ b/kittybox.nix
@@ -1,8 +1,10 @@
 { stdenv, lib, naersk, lld, mold, typescript
 , useWebAuthn ? false, openssl, zlib, pkg-config, protobuf
+, usePostgres ? true, postgresql, postgresqlTestHook
 , nixosTests }:
 
 assert useWebAuthn -> openssl != null && pkg-config != null;
+assert usePostgres -> postgresql != null && postgresqlTestHook != null;
 
 naersk.buildPackage {
   pname = "kittybox";
@@ -12,13 +14,24 @@ naersk.buildPackage {
 
   doCheck = stdenv.hostPlatform == stdenv.targetPlatform;
   cargoOptions = x: x ++ (lib.optionals useWebAuthn [
-    "--no-default-features" "--features=\"webauthn\""
+    "--no-default-features" "--features=\"webauthn${lib.optionalString usePostgres " sqlx"}\""
   ]);
   buildInputs = lib.optional useWebAuthn openssl;
   nativeBuildInputs = [ typescript ] ++ (lib.optional useWebAuthn pkg-config);
 
+  nativeCheckInputs = lib.optionals usePostgres [
+    postgresql postgresqlTestHook
+  ];
+
+  # Tests create arbitrary databases; we need to be prepared for that
+  postgresqlTestUserOptions = "LOGIN SUPERUSER";
+  postgresqlTestSetupPost = ''
+    export DATABASE_URL="postgres://localhost?host=$PGHOST&user=$PGUSER&dbname=$PGDATABASE"
+  '';
+
   passthru = {
     tests = nixosTests;
+    hasPostgres = usePostgres;
   };
 
   meta = with lib.meta; {
diff --git a/postgres-smoke-test.nix b/postgres-smoke-test.nix
new file mode 100644
index 0000000..51d53c7
--- /dev/null
+++ b/postgres-smoke-test.nix
@@ -0,0 +1,49 @@
+kittybox:
+{ lib, ... }: {
+  name = "nixos-kittybox";
+
+  nodes = {
+    kittybox = { config, pkgs, lib, ... }: {
+      imports = [ kittybox.nixosModules.default ];
+
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = ["kittybox"];
+        ensureUsers = [ {
+          name = "kittybox";
+          ensurePermissions = {
+            "DATABASE kittybox" = "ALL PRIVILEGES";
+          };
+        } ];
+      };
+
+      services.kittybox = {
+        enable = true;
+        logLevel = "info,kittybox=debug,retainer::cache=warn,h2=warn,rustls=warn";
+        backendUri = "postgres://localhost?host=/run/postgresql&dbname=kittybox";
+      };
+
+      systemd.services.kittybox.wants = [ "postgresql.service" ];
+      systemd.services.kittybox.after = [ "postgresql.service" ];
+
+      environment.systemPackages = with pkgs; [
+        xh
+      ];
+    };
+  };
+
+  # TODO: Make e2e tests for authentication endpoints and such
+  # Potentially using WebDriver
+  # Could also be implemented with fantoccini
+  testScript = ''
+    with subtest("Verify that Kittybox started correctly..."):
+        kittybox.wait_for_open_port(8080)
+        kittybox.succeed("xh --no-check-status http://localhost:8080/.kittybox/micropub")
+
+    with subtest("Onboarding should correctly work..."):
+        kittybox.copy_from_host("${./onboarding.json}", "/root/onboarding.json")
+        kittybox.succeed("xh --follow http://localhost:8080/.kittybox/onboarding -j @/root/onboarding.json")
+        # Testing for a known string is the easiest way to determine that the onboarding worked
+        kittybox.succeed("xh http://localhost:8080/ | grep 'vestige of the past long gone'")
+'';
+}