about summary refs log tree commit diff
path: root/kittybox-rs/src/database/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'kittybox-rs/src/database/mod.rs')
-rw-r--r--kittybox-rs/src/database/mod.rs29
1 files changed, 23 insertions, 6 deletions
diff --git a/kittybox-rs/src/database/mod.rs b/kittybox-rs/src/database/mod.rs
index 1d1cf15..98fe6ca 100644
--- a/kittybox-rs/src/database/mod.rs
+++ b/kittybox-rs/src/database/mod.rs
@@ -257,7 +257,7 @@ pub trait Storage: std::fmt::Debug + Clone + Send + Sync {
     /// the `user` domain to write to.
     async fn get_channels(&self, user: &'_ str) -> Result<Vec<MicropubChannel>>;
 
-    /// Fetch a feed at `url` and return a an h-feed object containing
+    /// Fetch a feed at `url` and return an h-feed object containing
     /// `limit` posts after a post by url `after`, filtering the content
     /// in context of a user specified by `user` (or an anonymous user).
     ///
@@ -281,6 +281,24 @@ pub trait Storage: std::fmt::Debug + Clone + Send + Sync {
         user: &'_ Option<String>,
     ) -> Result<Option<serde_json::Value>>;
 
+    /// Fetch a feed at `url` and return an h-feed object containing
+    /// `limit` posts after a `cursor` (filtering the content in
+    /// context of a user specified by `user`, or an anonymous user),
+    /// as well as a new cursor to paginate with.
+    ///
+    /// This method MUST hydrate the `author` property with an h-card
+    /// from the database by replacing URLs with corresponding h-cards.
+    ///
+    /// When encountering posts which the `user` is not authorized to
+    /// access, this method MUST elide such posts (as an optimization
+    /// for the frontend) and not return them, but still return an
+    /// amount of posts as close to `limit` as possible (to avoid
+    /// revealing the existence of the hidden post).
+    ///
+    /// Note for implementors: if you use streams to fetch posts in
+    /// parallel from the database, preferably make this method use a
+    /// connection pool to reduce overhead of creating a database
+    /// connection per post for parallel fetching.
     async fn read_feed_with_cursor(
         &self,
         url: &'_ str,
@@ -645,7 +663,7 @@ mod tests {
         );
 
         tracing::debug!("Continuing with cursor: {:?}", cursor);
-        let (result4, cursor4) = backend
+        let (result4, _) = backend
             .read_feed_with_cursor(
                 key,
                 cursor3.as_deref(),
@@ -666,7 +684,7 @@ mod tests {
         // Results for a bogus cursor are undefined, so we aren't
         // checking them. But the function at least shouldn't hang.
         let nonsense_after = Some("1010101010");
-        let result_bogus = tokio::time::timeout(tokio::time::Duration::from_secs(10), async move {
+        let _ = tokio::time::timeout(tokio::time::Duration::from_secs(10), async move {
             backend
                 .read_feed_with_cursor(key, nonsense_after, limit, None)
                 .await
@@ -735,11 +753,10 @@ mod tests {
             #[tracing_test::traced_test]
             async fn $func_name(
                 pool_opts: sqlx::postgres::PgPoolOptions,
-                mut connect_opts: sqlx::postgres::PgConnectOptions
+                connect_opts: sqlx::postgres::PgConnectOptions
             ) -> Result<(), sqlx::Error> {
-                use sqlx::ConnectOptions;
-
                 let db = {
+                    //use sqlx::ConnectOptions;
                     //connect_opts.log_statements(log::LevelFilter::Debug);
 
                     pool_opts.connect_with(connect_opts).await?