about summary refs log tree commit diff
path: root/src/database/redis
diff options
context:
space:
mode:
Diffstat (limited to 'src/database/redis')
-rw-r--r--src/database/redis/mod.rs26
1 files changed, 20 insertions, 6 deletions
diff --git a/src/database/redis/mod.rs b/src/database/redis/mod.rs
index f1724b7..eeaa3f2 100644
--- a/src/database/redis/mod.rs
+++ b/src/database/redis/mod.rs
@@ -2,6 +2,7 @@ use async_trait::async_trait;
 use futures::stream;
 use futures_util::FutureExt;
 use futures_util::StreamExt;
+use futures_util::TryStream;
 use futures_util::TryStreamExt;
 use lazy_static::lazy_static;
 use log::error;
@@ -225,9 +226,13 @@ impl Storage for RedisStorage {
                     }
                 }
             }
+            async fn fetch_post_for_feed(url: String) -> Option<serde_json::Value> {
+                return Some(serde_json::json!({}));
+            }
             let posts = stream::iter(posts_iter)
-                .map(|url| async move {
-                    match self.redis.get().await {
+                .map(|url: String| async move {
+                    return Ok(fetch_post_for_feed(url).await);
+                    /*match self.redis.get().await {
                         Ok(mut conn) => {
                             match conn.hget::<&str, &str, Option<String>>("posts", &url).await {
                                 Ok(post) => match post {
@@ -241,18 +246,21 @@ impl Storage for RedisStorage {
                             }
                         }
                         Err(err) => Err(StorageError::with_source(ErrorKind::Backend, "Error getting a connection from the pool", Box::new(err)))
-                    }
+                    }*/
                 })
                 // TODO: determine the optimal value for this buffer
                 // It will probably depend on how often can you encounter a private post on the page
                 // It shouldn't be too large, or we'll start fetching too many posts from the database
                 // It MUST NOT be larger than the typical page size
                 // It MUST NOT be a significant amount of the connection pool size
-                .buffered(std::cmp::min(3, limit))
+                //.buffered(std::cmp::min(3, limit))
                 // Hack to unwrap the Option and sieve out broken links
                 // Broken links return None, and Stream::filter_map skips all Nones.
-                .try_filter_map(|post: Option<serde_json::Value>| async move { Ok(post) })
-                .try_filter_map(|post| async move { Ok(filter_post(post, user)) })
+                // I wonder if one can use try_flatten() here somehow akin to iters
+                .try_filter_map(|post| async move { Ok(post) })
+                .try_filter_map(|post| async move {
+                    Ok(filter_post(post, user))
+                })
                 .take(limit);
             match posts.try_collect::<Vec<serde_json::Value>>().await {
                 Ok(posts) => feed["children"] = json!(posts),
@@ -312,6 +320,12 @@ impl RedisStorage {
             Err(e) => Err(e.into()),
         }
     }
+
+    pub async fn conn(&self) -> Result<mobc::Connection<mobc_redis::RedisConnectionManager>> {
+        self.redis.get().await.map_err(|e| StorageError::with_source(
+            ErrorKind::Backend, "Error getting a connection from the pool", Box::new(e)
+        ))
+    }
 }
 
 #[cfg(test)]