diff options
Diffstat (limited to 'src/database/mod.rs')
-rw-r--r-- | src/database/mod.rs | 150 |
1 files changed, 106 insertions, 44 deletions
diff --git a/src/database/mod.rs b/src/database/mod.rs index 98c2cae..8579125 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,17 +1,17 @@ #![warn(missing_docs)] -use async_trait::async_trait; -use serde::{Serialize,Deserialize}; use crate::indieauth::User; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; mod redis; pub use crate::database::redis::RedisStorage; #[cfg(test)] -pub use redis::tests::{RedisInstance, get_redis_instance}; +pub use redis::tests::{get_redis_instance, RedisInstance}; #[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct MicropubChannel { pub uid: String, - pub name: String + pub name: String, } #[derive(Debug, Clone, Copy)] @@ -21,14 +21,14 @@ pub enum ErrorKind { JsonParsing, NotFound, BadRequest, - Other + Other, } #[derive(Debug)] pub struct StorageError { msg: String, source: Option<Box<dyn std::error::Error>>, - kind: ErrorKind + kind: ErrorKind, } unsafe impl Send for StorageError {} unsafe impl Sync for StorageError {} @@ -38,14 +38,16 @@ impl From<StorageError> for tide::Response { ErrorKind::BadRequest => 400, ErrorKind::NotFound => 404, _ => 500, - }).body(serde_json::json!({ + }) + .body(serde_json::json!({ "error": match err.kind() { ErrorKind::BadRequest => "invalid_request", ErrorKind::NotFound => "not_found", _ => "database_error" }, "error_description": err - })).build() + })) + .build() } } impl std::error::Error for StorageError { @@ -58,7 +60,7 @@ impl From<serde_json::Error> for StorageError { Self { msg: format!("{}", err), source: Some(Box::new(err)), - kind: ErrorKind::JsonParsing + kind: ErrorKind::JsonParsing, } } } @@ -70,15 +72,18 @@ impl std::fmt::Display for StorageError { ErrorKind::PermissionDenied => write!(f, "permission denied: "), ErrorKind::NotFound => write!(f, "not found: "), ErrorKind::BadRequest => write!(f, "bad request: "), - ErrorKind::Other => write!(f, "generic storage layer error: ") + ErrorKind::Other => write!(f, "generic storage layer error: "), } { Ok(_) => write!(f, "{}", self.msg), - Err(err) => Err(err) + Err(err) => Err(err), } } } impl serde::Serialize for StorageError { - fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> { + fn serialize<S: serde::Serializer>( + &self, + serializer: S, + ) -> std::result::Result<S::Ok, S::Error> { serializer.serialize_str(&self.to_string()) } } @@ -88,7 +93,7 @@ impl StorageError { StorageError { msg: msg.to_string(), source: None, - kind + kind, } } /// Get the kind of an error. @@ -100,12 +105,11 @@ impl StorageError { } } - /// A special Result type for the Micropub backing storage. pub type Result<T> = std::result::Result<T, StorageError>; /// A storage backend for the Micropub server. -/// +/// /// Implementations should note that all methods listed on this trait MUST be fully atomic /// or lock the database so that write conflicts or reading half-written data should not occur. #[async_trait] @@ -117,21 +121,21 @@ pub trait Storage: Clone + Send + Sync { async fn get_post(&self, url: &str) -> Result<Option<serde_json::Value>>; /// Save a post to the database as an MF2-JSON structure. - /// + /// /// Note that the `post` object MUST have `post["properties"]["uid"][0]` defined. async fn put_post<'a>(&self, post: &'a serde_json::Value) -> Result<()>; /*/// Save a post and add it to the relevant feeds listed in `post["properties"]["channel"]`. - /// - /// Note that the `post` object MUST have `post["properties"]["uid"][0]` defined + /// + /// Note that the `post` object MUST have `post["properties"]["uid"][0]` defined /// and `post["properties"]["channel"]` defined, even if it's empty. async fn put_and_index_post<'a>(&mut self, post: &'a serde_json::Value) -> Result<()>;*/ - + /// Modify a post using an update object as defined in the Micropub spec. - /// + /// /// Note to implementors: the update operation MUST be atomic OR MUST lock the database /// to prevent two clients overwriting each other's changes. - /// + /// /// You can assume concurrent updates will never contradict each other, since that will be dumb. /// The last update always wins. async fn update_post<'a>(&self, url: &'a str, update: serde_json::Value) -> Result<()>; @@ -142,21 +146,27 @@ pub trait Storage: Clone + Send + Sync { /// Fetch a feed at `url` and return a an h-feed object containing /// `limit` posts after a post by url `after`, filtering the content /// in context of a user specified by `user` (or an anonymous user). - /// + /// /// Specifically, private posts that don't include the user in the audience /// will be elided from the feed, and the posts containing location and not /// specifying post["properties"]["location-visibility"][0] == "public" /// will have their location data (but not check-in data) stripped. - /// + /// /// This function is used as an optimization so the client, whatever it is, /// doesn't have to fetch posts, then realize some of them are private, and /// fetch more posts. - /// + /// /// Note for implementors: if you use streams to fetch posts in parallel /// from the database, preferably make this method use a connection pool /// to reduce overhead of creating a database connection per post for /// parallel fetching. - async fn read_feed_with_limit<'a>(&self, url: &'a str, after: &'a Option<String>, limit: usize, user: &'a Option<String>) -> Result<Option<serde_json::Value>>; + async fn read_feed_with_limit<'a>( + &self, + url: &'a str, + after: &'a Option<String>, + limit: usize, + user: &'a Option<String>, + ) -> Result<Option<serde_json::Value>>; /// Deletes a post from the database irreversibly. 'nuff said. Must be idempotent. async fn delete_post<'a>(&self, url: &'a str) -> Result<()>; @@ -170,9 +180,9 @@ pub trait Storage: Clone + Send + Sync { #[cfg(test)] mod tests { - use super::{Storage, MicropubChannel}; - use serde_json::json; use super::redis::tests::get_redis_instance; + use super::{MicropubChannel, Storage}; + use serde_json::json; async fn test_backend_basic_operations<Backend: Storage>(backend: Backend) { let post: serde_json::Value = json!({ @@ -191,23 +201,47 @@ mod tests { backend.put_post(&post).await.unwrap(); if let Ok(Some(returned_post)) = backend.get_post(&key).await { assert!(returned_post.is_object()); - assert_eq!(returned_post["type"].as_array().unwrap().len(), post["type"].as_array().unwrap().len()); - assert_eq!(returned_post["type"].as_array().unwrap(), post["type"].as_array().unwrap()); - let props: &serde_json::Map<String, serde_json::Value> = post["properties"].as_object().unwrap(); + assert_eq!( + returned_post["type"].as_array().unwrap().len(), + post["type"].as_array().unwrap().len() + ); + assert_eq!( + returned_post["type"].as_array().unwrap(), + post["type"].as_array().unwrap() + ); + let props: &serde_json::Map<String, serde_json::Value> = + post["properties"].as_object().unwrap(); for key in props.keys() { - assert_eq!(returned_post["properties"][key].as_array().unwrap(), post["properties"][key].as_array().unwrap()) + assert_eq!( + returned_post["properties"][key].as_array().unwrap(), + post["properties"][key].as_array().unwrap() + ) } - } else { panic!("For some reason the backend did not return the post.") } + } else { + panic!("For some reason the backend did not return the post.") + } // Check the alternative URL - it should return the same post if let Ok(Some(returned_post)) = backend.get_post(&alt_url).await { assert!(returned_post.is_object()); - assert_eq!(returned_post["type"].as_array().unwrap().len(), post["type"].as_array().unwrap().len()); - assert_eq!(returned_post["type"].as_array().unwrap(), post["type"].as_array().unwrap()); - let props: &serde_json::Map<String, serde_json::Value> = post["properties"].as_object().unwrap(); + assert_eq!( + returned_post["type"].as_array().unwrap().len(), + post["type"].as_array().unwrap().len() + ); + assert_eq!( + returned_post["type"].as_array().unwrap(), + post["type"].as_array().unwrap() + ); + let props: &serde_json::Map<String, serde_json::Value> = + post["properties"].as_object().unwrap(); for key in props.keys() { - assert_eq!(returned_post["properties"][key].as_array().unwrap(), post["properties"][key].as_array().unwrap()) + assert_eq!( + returned_post["properties"][key].as_array().unwrap(), + post["properties"][key].as_array().unwrap() + ) } - } else { panic!("For some reason the backend did not return the post.") } + } else { + panic!("For some reason the backend did not return the post.") + } } async fn test_backend_get_channel_list<Backend: Storage>(backend: Backend) { @@ -221,33 +255,61 @@ mod tests { "children": [] }); backend.put_post(&feed).await.unwrap(); - let chans = backend.get_channels(&crate::indieauth::User::new("https://fireburn.ru/", "https://quill.p3k.io/", "create update media")).await.unwrap(); + let chans = backend + .get_channels(&crate::indieauth::User::new( + "https://fireburn.ru/", + "https://quill.p3k.io/", + "create update media", + )) + .await + .unwrap(); assert_eq!(chans.len(), 1); - assert_eq!(chans[0], MicropubChannel { uid: "https://fireburn.ru/feeds/main".to_string(), name: "Main Page".to_string() }); + assert_eq!( + chans[0], + MicropubChannel { + uid: "https://fireburn.ru/feeds/main".to_string(), + name: "Main Page".to_string() + } + ); } async fn test_backend_settings<Backend: Storage>(backend: Backend) { - backend.set_setting("site_name", "https://fireburn.ru/", "Vika's Hideout").await.unwrap(); - assert_eq!(backend.get_setting("site_name", "https://fireburn.ru/").await.unwrap(), "Vika's Hideout"); + backend + .set_setting("site_name", "https://fireburn.ru/", "Vika's Hideout") + .await + .unwrap(); + assert_eq!( + backend + .get_setting("site_name", "https://fireburn.ru/") + .await + .unwrap(), + "Vika's Hideout" + ); } #[async_std::test] async fn test_redis_storage_basic_operations() { let redis_instance = get_redis_instance().await; - let backend = super::RedisStorage::new(redis_instance.uri().to_string()).await.unwrap(); + let backend = super::RedisStorage::new(redis_instance.uri().to_string()) + .await + .unwrap(); test_backend_basic_operations(backend).await; } #[async_std::test] async fn test_redis_storage_channel_list() { let redis_instance = get_redis_instance().await; - let backend = super::RedisStorage::new(redis_instance.uri().to_string()).await.unwrap(); + let backend = super::RedisStorage::new(redis_instance.uri().to_string()) + .await + .unwrap(); test_backend_get_channel_list(backend).await; } #[async_std::test] async fn test_redis_settings() { let redis_instance = get_redis_instance().await; - let backend = super::RedisStorage::new(redis_instance.uri().to_string()).await.unwrap(); + let backend = super::RedisStorage::new(redis_instance.uri().to_string()) + .await + .unwrap(); test_backend_settings(backend).await; } } |