diff --git a/Cargo.toml b/Cargo.toml index 086b78ae..251a18cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = ["codegen", "examples", "performance_measurement", "performance_measur [package] name = "worktable" -version = "0.9.0-beta0.1.4" +version = "0.9.0-beta0.2.0" edition = "2024" authors = ["Handy-caT"] license = "MIT" @@ -19,9 +19,9 @@ s3-support = ["dep:rusty-s3", "dep:url", "dep:reqwest", "dep:walkdir", "worktabl [dependencies] async-trait = "0.1.89" convert_case = "0.6.0" -data_bucket = "=0.3.14" +data_bucket = "=0.3.15" # data_bucket = { git = "https://github.com/pathscale/DataBucket", branch = "page_cdc_correction", version = "0.2.7" } -# data_bucket = { path = "../DataBucket", version = "0.3.11" } +# data_bucket = { path = "../DataBucket", version = "0.3.14" } derive_more = { version = "2.0.1", features = ["from", "error", "display", "debug", "into"] } eyre = "0.6.12" fastrand = "2.3.0" @@ -46,7 +46,7 @@ tracing = "0.1" url = { version = "2", optional = true } uuid = { version = "1.10.0", features = ["v4", "v7"] } walkdir = { version = "2", optional = true } -worktable_codegen = { path = "codegen", version = "=0.9.0-beta0.1.3" } +worktable_codegen = { path = "codegen", version = "=0.9.0-beta0.2.0" } [dev-dependencies] chrono = "0.4.43" diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 1da31705..da37a32d 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "worktable_codegen" -version = "0.9.0-beta0.1.3" +version = "0.9.0-beta0.2.0" edition = "2024" license = "MIT" description = "WorkTable codegeneration crate" diff --git a/codegen/src/common/mod.rs b/codegen/src/common/mod.rs new file mode 100644 index 00000000..ec73bce8 --- /dev/null +++ b/codegen/src/common/mod.rs @@ -0,0 +1,7 @@ +pub mod model; +pub mod name_generator; +pub mod parser; + +#[allow(unused_imports)] +pub use model::*; +pub use parser::Parser; diff --git a/codegen/src/worktable/model/column.rs b/codegen/src/common/model/column.rs similarity index 96% rename from codegen/src/worktable/model/column.rs rename to codegen/src/common/model/column.rs index e0de2dfb..a9fa4b1f 100644 --- a/codegen/src/worktable/model/column.rs +++ b/codegen/src/common/model/column.rs @@ -1,8 +1,8 @@ use indexmap::IndexMap; use std::collections::HashMap; -use crate::worktable::model::GeneratorType; -use crate::worktable::model::index::Index; +use crate::common::model::GeneratorType; +use crate::common::model::index::Index; use proc_macro2::{Ident, TokenStream}; use quote::quote; use syn::spanned::Spanned; diff --git a/codegen/src/worktable/model/config.rs b/codegen/src/common/model/config.rs similarity index 100% rename from codegen/src/worktable/model/config.rs rename to codegen/src/common/model/config.rs diff --git a/codegen/src/worktable/model/index.rs b/codegen/src/common/model/index.rs similarity index 100% rename from codegen/src/worktable/model/index.rs rename to codegen/src/common/model/index.rs diff --git a/codegen/src/worktable/model/mod.rs b/codegen/src/common/model/mod.rs similarity index 100% rename from codegen/src/worktable/model/mod.rs rename to codegen/src/common/model/mod.rs diff --git a/codegen/src/worktable/model/operation.rs b/codegen/src/common/model/operation.rs similarity index 100% rename from codegen/src/worktable/model/operation.rs rename to codegen/src/common/model/operation.rs diff --git a/codegen/src/worktable/model/primary_key.rs b/codegen/src/common/model/primary_key.rs similarity index 100% rename from codegen/src/worktable/model/primary_key.rs rename to codegen/src/common/model/primary_key.rs diff --git a/codegen/src/worktable/model/queries.rs b/codegen/src/common/model/queries.rs similarity index 85% rename from codegen/src/worktable/model/queries.rs rename to codegen/src/common/model/queries.rs index fdc8b170..7ad81643 100644 --- a/codegen/src/worktable/model/queries.rs +++ b/codegen/src/common/model/queries.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use proc_macro2::Ident; -use crate::worktable::model::Operation; +use crate::common::model::Operation; #[derive(Debug, Default)] pub struct Queries { diff --git a/codegen/src/name_generator.rs b/codegen/src/common/name_generator.rs similarity index 82% rename from codegen/src/name_generator.rs rename to codegen/src/common/name_generator.rs index 52c8c8ce..df5fa9f8 100644 --- a/codegen/src/name_generator.rs +++ b/codegen/src/common/name_generator.rs @@ -33,6 +33,24 @@ impl WorktableNameGenerator { Self { name } } + pub fn get_dir_name(&self) -> String { + self.name.from_case(Case::Pascal).to_case(Case::Snake) + } + + pub fn get_update_query_lock_ident(snake_case_name: &String) -> Ident { + Ident::new( + format!("lock_update_{snake_case_name}").as_str(), + Span::mixed_site(), + ) + } + + pub fn get_update_in_place_query_lock_ident(snake_case_name: &String) -> Ident { + Ident::new( + format!("lock_update_in_place_{snake_case_name}").as_str(), + Span::mixed_site(), + ) + } + pub fn get_work_table_literal_name(&self) -> Literal { Literal::string(self.name.as_str()) } @@ -118,6 +136,14 @@ impl WorktableNameGenerator { ) } + pub fn get_version_const_ident(&self) -> Ident { + let upper_snake_case_name = self.name.from_case(Case::Pascal).to_case(Case::UpperSnake); + Ident::new( + format!("{}_VERSION", upper_snake_case_name.to_uppercase()).as_str(), + Span::mixed_site(), + ) + } + pub fn get_space_secondary_index_ident(&self) -> Ident { Ident::new( format!("{}SpaceSecondaryIndex", self.name).as_str(), diff --git a/codegen/src/worktable/parser/attribute.rs b/codegen/src/common/parser/attribute.rs similarity index 97% rename from codegen/src/worktable/parser/attribute.rs rename to codegen/src/common/parser/attribute.rs index 47b10d0c..0686da9b 100644 --- a/codegen/src/worktable/parser/attribute.rs +++ b/codegen/src/common/parser/attribute.rs @@ -1,7 +1,7 @@ use proc_macro2::TokenTree; use syn::spanned::Spanned as _; -use crate::worktable::parser::Parser; +use crate::common::parser::Parser; // TODO: Move this to separate attributes section because now it only parses persist. impl Parser { @@ -45,7 +45,7 @@ impl Parser { mod tests { use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_empty() { diff --git a/codegen/src/worktable/parser/columns.rs b/codegen/src/common/parser/columns.rs similarity index 98% rename from codegen/src/worktable/parser/columns.rs rename to codegen/src/common/parser/columns.rs index 8a7aeaa5..53af7f0d 100644 --- a/codegen/src/worktable/parser/columns.rs +++ b/codegen/src/common/parser/columns.rs @@ -1,8 +1,8 @@ use proc_macro2::{Delimiter, TokenTree}; use syn::spanned::Spanned as _; -use crate::worktable::Parser; -use crate::worktable::model::{Columns, GeneratorType, Row}; +use crate::common::Parser; +use crate::common::model::{Columns, GeneratorType, Row}; impl Parser { pub fn parse_columns(&mut self) -> syn::Result { @@ -132,7 +132,7 @@ mod tests { use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_columns_parse() { diff --git a/codegen/src/worktable/parser/config.rs b/codegen/src/common/parser/config.rs similarity index 98% rename from codegen/src/worktable/parser/config.rs rename to codegen/src/common/parser/config.rs index 1c831d34..8b64c89a 100644 --- a/codegen/src/worktable/parser/config.rs +++ b/codegen/src/common/parser/config.rs @@ -3,8 +3,8 @@ use std::str::FromStr; use proc_macro2::{Delimiter, TokenTree}; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Config; +use crate::common::Parser; +use crate::common::model::Config; const CONFIG_FIELD_NAME: &str = "config"; diff --git a/codegen/src/worktable/parser/index.rs b/codegen/src/common/parser/index.rs similarity index 97% rename from codegen/src/worktable/parser/index.rs rename to codegen/src/common/parser/index.rs index e40c9201..8bb85220 100644 --- a/codegen/src/worktable/parser/index.rs +++ b/codegen/src/common/parser/index.rs @@ -1,5 +1,5 @@ -use crate::worktable::Parser; -use crate::worktable::model::Index; +use crate::common::Parser; +use crate::common::model::Index; use indexmap::IndexMap; use proc_macro2::{Delimiter, Ident, TokenTree}; use syn::spanned::Spanned; diff --git a/codegen/src/worktable/parser/mod.rs b/codegen/src/common/parser/mod.rs similarity index 100% rename from codegen/src/worktable/parser/mod.rs rename to codegen/src/common/parser/mod.rs diff --git a/codegen/src/common/parser/name.rs b/codegen/src/common/parser/name.rs new file mode 100644 index 00000000..4d631338 --- /dev/null +++ b/codegen/src/common/parser/name.rs @@ -0,0 +1,159 @@ +use proc_macro2::Ident; +use proc_macro2::TokenTree; +use syn::spanned::Spanned as _; + +use crate::common::parser::Parser; + +impl Parser { + pub fn parse_name(&mut self) -> syn::Result { + let ident = self.input_iter.next().ok_or(syn::Error::new( + self.input.span(), + "Expected `name` field in declaration", + ))?; + if let TokenTree::Ident(ident) = ident { + if ident.to_string().as_str() != "name" { + return Err(syn::Error::new( + ident.span(), + "Expected `name` field. `WorkTable` name must be specified", + )); + } + } else { + return Err(syn::Error::new( + ident.span(), + "Expected field name identifier.", + )); + }; + + self.parse_colon()?; + + let name = self + .input_iter + .next() + .ok_or(syn::Error::new(self.input.span(), "Expected token."))?; + let name = if let TokenTree::Ident(name) = name { + name + } else { + return Err(syn::Error::new(name.span(), "Expected identifier.")); + }; + + self.try_parse_comma()?; + + Ok(name) + } + + pub fn parse_version(&mut self) -> syn::Result> { + if let Some(ident) = self.peek_next() + && ident.to_string().as_str() == "version" { + self.input_iter.next(); + + self.parse_colon()?; + + let value = self.input_iter.next().ok_or(syn::Error::new( + self.input.span(), + "Expected version value", + ))?; + let value = if let TokenTree::Literal(value) = value { + value + } else { + return Err(syn::Error::new(value.span(), "Expected literal for version.")); + }; + + self.try_parse_comma()?; + + let value_str = value.to_string().replace("_", ""); + let version = value_str.parse::().map_err(|_| { + syn::Error::new(value.span(), "Expected valid u32 number for version.") + })?; + + return Ok(Some(version)); + } + Ok(None) + } +} + +#[cfg(test)] +mod tests { + use quote::quote; + + use crate::common::Parser; + + #[test] + fn test_name_parse() { + let tokens = quote! {name: TestName,}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name(); + + assert!(name.is_ok()); + let name = name.unwrap(); + + assert_eq!(name, "TestName"); + } + + #[test] + fn test_empty() { + let tokens = quote! {}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name(); + + assert!(name.is_err()); + } + + #[test] + fn test_literal_field() { + let tokens = quote! {"nme": TestName,}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name(); + + assert!(name.is_err()); + } + + #[test] + fn test_wrong_field() { + let tokens = quote! {nme: TestName,}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name(); + + assert!(name.is_err()); + } + + #[test] + fn test_version_parse() { + let tokens = quote! {name: TestName, version: 2,}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name().unwrap(); + assert_eq!(name, "TestName"); + + let version = parser.parse_version().unwrap(); + assert_eq!(version, Some(2)); + } + + #[test] + fn test_version_default() { + let tokens = quote! {name: TestName,}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name().unwrap(); + assert_eq!(name, "TestName"); + + let version = parser.parse_version().unwrap(); + assert_eq!(version, None); + } + + #[test] + fn test_version_before_other_fields() { + let tokens = quote! {name: TestName, version: 5, columns: { id: u64 primary_key },}; + + let mut parser = Parser::new(tokens); + let name = parser.parse_name().unwrap(); + let version = parser.parse_version().unwrap(); + assert_eq!(version, Some(5)); + + let next = parser.peek_next().unwrap(); + assert_eq!(next.to_string(), "columns"); + } +} diff --git a/codegen/src/worktable/parser/punct.rs b/codegen/src/common/parser/punct.rs similarity index 53% rename from codegen/src/worktable/parser/punct.rs rename to codegen/src/common/parser/punct.rs index f04ac465..160956f1 100644 --- a/codegen/src/worktable/parser/punct.rs +++ b/codegen/src/common/parser/punct.rs @@ -1,7 +1,7 @@ use proc_macro2::TokenTree; use syn::spanned::Spanned; -use crate::worktable::parser::Parser; +use crate::common::parser::Parser; impl Parser { /// Parses ':' from [`proc_macro2::TokenStream`]. @@ -37,6 +37,38 @@ impl Parser { Ok(()) } + + /// Parses '=>' from token stream. + pub fn parse_fat_arrow(&mut self) -> syn::Result<()> { + let iter = &mut self.input_iter; + + let first = iter + .next() + .ok_or(syn::Error::new(self.input.span(), "Expected token."))?; + if let TokenTree::Punct(p) = first { + if p.as_char() == '=' { + // next should be '>' + let second = iter + .next() + .ok_or(syn::Error::new(self.input.span(), "Expected '>' after '='"))?; + if let TokenTree::Punct(p2) = second { + if p2.as_char() == '>' { + return Ok(()); + } + return Err(syn::Error::new( + p2.span(), + format!("Expected '>' found: '{}'", p2.as_char()), + )); + } + return Err(syn::Error::new(second.span(), "Expected '>'")); + } + return Err(syn::Error::new( + p.span(), + format!("Expected '=' found: '{}'", p.as_char()), + )); + } + Err(syn::Error::new(first.span(), "Expected '=>'")) + } } fn comma(tt: &TokenTree) -> syn::Result<()> { diff --git a/codegen/src/worktable/parser/queries/delete.rs b/codegen/src/common/parser/queries/delete.rs similarity index 94% rename from codegen/src/worktable/parser/queries/delete.rs rename to codegen/src/common/parser/queries/delete.rs index 59252478..168de96a 100644 --- a/codegen/src/worktable/parser/queries/delete.rs +++ b/codegen/src/common/parser/queries/delete.rs @@ -3,8 +3,8 @@ use std::collections::HashMap; use proc_macro2::{Ident, TokenTree}; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Operation; +use crate::common::Parser; +use crate::common::model::Operation; impl Parser { pub fn parse_deletes(&mut self) -> syn::Result> { @@ -46,7 +46,7 @@ mod tests { use proc_macro2::{Ident, Span}; use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_update() { diff --git a/codegen/src/worktable/parser/queries/in_place.rs b/codegen/src/common/parser/queries/in_place.rs similarity index 94% rename from codegen/src/worktable/parser/queries/in_place.rs rename to codegen/src/common/parser/queries/in_place.rs index cc928068..5f1fa773 100644 --- a/codegen/src/worktable/parser/queries/in_place.rs +++ b/codegen/src/common/parser/queries/in_place.rs @@ -3,8 +3,8 @@ use std::collections::HashMap; use proc_macro2::{Ident, TokenTree}; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Operation; +use crate::common::Parser; +use crate::common::model::Operation; impl Parser { pub fn parse_in_place(&mut self) -> syn::Result> { @@ -46,7 +46,7 @@ mod tests { use proc_macro2::{Ident, Span}; use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_update() { diff --git a/codegen/src/worktable/parser/queries/mod.rs b/codegen/src/common/parser/queries/mod.rs similarity index 96% rename from codegen/src/worktable/parser/queries/mod.rs rename to codegen/src/common/parser/queries/mod.rs index 824c225d..bad8a328 100644 --- a/codegen/src/worktable/parser/queries/mod.rs +++ b/codegen/src/common/parser/queries/mod.rs @@ -7,8 +7,8 @@ mod update; use proc_macro2::TokenTree; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Queries; +use crate::common::Parser; +use crate::common::model::Queries; impl Parser { pub fn parse_queries(&mut self) -> syn::Result { diff --git a/codegen/src/worktable/parser/queries/operation.rs b/codegen/src/common/parser/queries/operation.rs similarity index 96% rename from codegen/src/worktable/parser/queries/operation.rs rename to codegen/src/common/parser/queries/operation.rs index 142ba796..a75efb22 100644 --- a/codegen/src/worktable/parser/queries/operation.rs +++ b/codegen/src/common/parser/queries/operation.rs @@ -2,8 +2,8 @@ use proc_macro2::{Ident, TokenTree}; use std::collections::HashMap; use syn::spanned::Spanned; -use crate::worktable::model::Operation; -use crate::worktable::parser::Parser; +use crate::common::model::Operation; +use crate::common::parser::Parser; impl Parser { pub fn parse_operations(&mut self) -> syn::Result> { @@ -105,7 +105,7 @@ impl Parser { mod tests { use quote::quote; - use crate::worktable::parser::Parser; + use crate::common::parser::Parser; #[test] fn test_operation() { diff --git a/codegen/src/worktable/parser/queries/select.rs b/codegen/src/common/parser/queries/select.rs similarity index 94% rename from codegen/src/worktable/parser/queries/select.rs rename to codegen/src/common/parser/queries/select.rs index 60fb3d8d..f52dfdfd 100644 --- a/codegen/src/worktable/parser/queries/select.rs +++ b/codegen/src/common/parser/queries/select.rs @@ -3,8 +3,8 @@ use std::collections::HashMap; use proc_macro2::{Ident, TokenTree}; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Operation; +use crate::common::Parser; +use crate::common::model::Operation; impl Parser { pub fn _parse_selects(&mut self) -> syn::Result> { @@ -46,7 +46,7 @@ mod tests { use proc_macro2::{Ident, Span}; use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_update() { diff --git a/codegen/src/worktable/parser/queries/update.rs b/codegen/src/common/parser/queries/update.rs similarity index 95% rename from codegen/src/worktable/parser/queries/update.rs rename to codegen/src/common/parser/queries/update.rs index 7b0a7696..af6ec283 100644 --- a/codegen/src/worktable/parser/queries/update.rs +++ b/codegen/src/common/parser/queries/update.rs @@ -3,8 +3,8 @@ use std::collections::HashMap; use proc_macro2::{Ident, TokenTree}; use syn::spanned::Spanned; -use crate::worktable::Parser; -use crate::worktable::model::Operation; +use crate::common::Parser; +use crate::common::model::Operation; impl Parser { pub fn parse_updates(&mut self) -> syn::Result> { @@ -48,7 +48,7 @@ mod tests { use proc_macro2::{Ident, Span}; use quote::quote; - use crate::worktable::Parser; + use crate::common::Parser; #[test] fn test_update() { diff --git a/codegen/src/worktable/generator/index/cdc.rs b/codegen/src/generators/in_memory/index/cdc.rs similarity index 98% rename from codegen/src/worktable/generator/index/cdc.rs rename to codegen/src/generators/in_memory/index/cdc.rs index 21783339..e590b307 100644 --- a/codegen/src/worktable/generator/index/cdc.rs +++ b/codegen/src/generators/in_memory/index/cdc.rs @@ -1,11 +1,11 @@ -use crate::name_generator::{WorktableNameGenerator, is_float}; -use crate::worktable::generator::Generator; -use crate::worktable::generator::queries::r#type::map_to_uppercase; +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::generators::in_memory::queries::map_to_uppercase; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Literal, Span, TokenStream}; use quote::quote; -impl Generator { +impl InMemoryGenerator { pub fn gen_secondary_index_cdc_impl_def(&mut self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let index_type_ident = name_generator.get_index_type_ident(); diff --git a/codegen/src/generators/in_memory/index/info.rs b/codegen/src/generators/in_memory/index/info.rs new file mode 100644 index 00000000..eda1a254 --- /dev/null +++ b/codegen/src/generators/in_memory/index/info.rs @@ -0,0 +1,91 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; + +impl InMemoryGenerator { + pub fn gen_secondary_index_info_impl_def(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let index_type_ident = name_generator.get_index_type_ident(); + + let info_fn = self.gen_index_info_fn(); + let is_empty_fn = self.gen_index_is_empty_fn(); + + quote! { + impl TableSecondaryIndexInfo for #index_type_ident { + #info_fn + #is_empty_fn + } + } + } + + fn gen_index_info_fn(&self) -> TokenStream { + let rows = self.columns.indexes.values().map(|idx| { + let index_field_name = &idx.name; + let index_name_str = index_field_name.to_string(); + + if idx.is_unique { + quote! { + info.push(IndexInfo { + name: #index_name_str.to_string(), + index_type: IndexKind::Unique, + key_count: self.#index_field_name.len(), + capacity: self.#index_field_name.capacity(), + heap_size: self.#index_field_name.heap_size(), + used_size: self.#index_field_name.used_size(), + node_count: self.#index_field_name.node_count(), + }); + } + } else { + quote! { + info.push(IndexInfo { + name: #index_name_str.to_string(), + index_type: IndexKind::NonUnique, + key_count: self.#index_field_name.len(), + capacity: self.#index_field_name.capacity(), + heap_size: self.#index_field_name.heap_size(), + used_size: self.#index_field_name.used_size(), + node_count: self.#index_field_name.node_count(), + }); + } + } + }); + + quote! { + fn index_info(&self) -> Vec { + let mut info = Vec::new(); + #(#rows)* + info + } + } + } + + fn gen_index_is_empty_fn(&self) -> TokenStream { + let is_empty = self + .columns + .indexes + .values() + .map(|idx| { + let index_field_name = &idx.name; + quote! { + self.#index_field_name.len() == 0 + } + }) + .collect::>(); + + if is_empty.is_empty() { + quote! { + fn is_empty(&self) -> bool { + true + } + } + } else { + quote! { + fn is_empty(&self) -> bool { + #(#is_empty) &&* + } + } + } + } +} diff --git a/codegen/src/worktable/generator/index/mod.rs b/codegen/src/generators/in_memory/index/mod.rs similarity index 92% rename from codegen/src/worktable/generator/index/mod.rs rename to codegen/src/generators/in_memory/index/mod.rs index af30d1aa..4ecd15bf 100644 --- a/codegen/src/worktable/generator/index/mod.rs +++ b/codegen/src/generators/in_memory/index/mod.rs @@ -2,19 +2,19 @@ mod cdc; mod info; mod usual; -use crate::name_generator::{WorktableNameGenerator, is_float, is_unsized}; -use crate::worktable::generator::Generator; +use crate::common::name_generator::{WorktableNameGenerator, is_float, is_unsized}; +use crate::generators::in_memory::InMemoryGenerator; use convert_case::{Case, Casing}; use proc_macro2::TokenStream; use quote::quote; -impl Generator { +impl InMemoryGenerator { /// Generates index type and it's impls. pub fn gen_index_def(&mut self) -> syn::Result { let type_def = self.gen_type_def()?; let impl_def = self.gen_secondary_index_impl_def(); let info_def = self.gen_secondary_index_info_impl_def(); - let cdc_impl_def = if self.is_persist { + let cdc_impl_def = if false { self.gen_secondary_index_cdc_impl_def() } else { quote! {} @@ -76,9 +76,16 @@ impl Generator { }) .collect::, syn::Error>>()?; - let derive = if self.is_persist { - quote! { - #[derive(Debug, MemStat, PersistIndex)] + let derive = if false { + if false { + quote! { + #[derive(Debug, MemStat, PersistIndex)] + #[index(read_only)] + } + } else { + quote! { + #[derive(Debug, MemStat, PersistIndex)] + } } } else { quote! { diff --git a/codegen/src/worktable/generator/index/usual.rs b/codegen/src/generators/in_memory/index/usual.rs similarity index 98% rename from codegen/src/worktable/generator/index/usual.rs rename to codegen/src/generators/in_memory/index/usual.rs index 6989b311..8ffdddc9 100644 --- a/codegen/src/worktable/generator/index/usual.rs +++ b/codegen/src/generators/in_memory/index/usual.rs @@ -1,11 +1,11 @@ -use crate::name_generator::{WorktableNameGenerator, is_float}; -use crate::worktable::generator::Generator; -use crate::worktable::generator::queries::r#type::map_to_uppercase; +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::generators::in_memory::queries::map_to_uppercase; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Literal, Span, TokenStream}; use quote::quote; -impl Generator { +impl InMemoryGenerator { /// Generates implementation of `TableSecondaryIndex` trait for index. pub fn gen_secondary_index_impl_def(&mut self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); diff --git a/codegen/src/generators/in_memory/locks.rs b/codegen/src/generators/in_memory/locks.rs new file mode 100644 index 00000000..41422774 --- /dev/null +++ b/codegen/src/generators/in_memory/locks.rs @@ -0,0 +1,193 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +impl InMemoryGenerator { + pub fn gen_locks_def(&self) -> TokenStream { + let type_ = self.gen_locks_type(); + let impl_ = self.gen_locks_impl(); + + quote! { + #type_ + #impl_ + } + } + + fn gen_locks_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let name = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #name: Option>, } + }) + .collect(); + + quote! { + #[derive(Debug, Clone)] + pub struct #lock_ident { + #(#rows)* + } + } + } + + fn gen_locks_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let new_fn = self.gen_new_fn(); + let row_impl = self.gen_lock_row_impl(); + + quote! { + impl #lock_ident { + #new_fn + } + + #row_impl + } + } + + fn gen_lock_row_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let is_locked_fn = self.gen_is_locked_fn(); + let with_lock_fn = self.gen_with_lock_fn(); + let lock_fn = self.gen_lock_fn(); + let merge_fn = self.gen_merge_fn(); + + quote! { + impl RowLock for #lock_ident { + #is_locked_fn + #lock_fn + #with_lock_fn + #merge_fn + } + } + } + + fn gen_is_locked_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { self.#col.as_ref().map(|l| l.is_locked()).unwrap_or(false) } + }) + .collect(); + + quote! { + fn is_locked(&self) -> bool { + #(#rows) ||* + } + } + } + + fn gen_new_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #col: None } + }) + .collect(); + + quote! { + pub fn new() -> Self { + Self { + #(#rows),* + } + } + } + } + + fn gen_with_lock_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #col: Some(lock.clone()) } + }) + .collect(); + + quote! { + fn with_lock(id: u16) -> (Self, std::sync::Arc) { + let lock = std::sync::Arc::new(Lock::new(id)); + ( + Self { + #(#rows),* + }, + lock + ) + } + } + } + + fn gen_lock_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { + if let Some(lock) = &self.#col { + set.insert(lock.clone()); + } + self.#col = Some(lock.clone()); + } + }) + .collect(); + + quote! { + #[allow(clippy::mutable_key_type)] + fn lock(&mut self, id: u16) -> (std::collections::HashSet>, std::sync::Arc) { + let mut set = std::collections::HashSet::new(); + let lock = std::sync::Arc::new(Lock::new(id)); + #(#rows)* + + (set, lock) + } + } + } + + fn gen_merge_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|col| { + let col = Ident::new(format!("{col}_lock").as_str(), Span::mixed_site()); + quote! { + if let Some(#col) = &other.#col { + if self.#col.is_none() { + self.#col = Some(#col.clone()); + } else { + set.insert(#col.clone()); + } + } + other.#col = self.#col.clone(); + } + }) + .collect(); + + quote! { + #[allow(clippy::mutable_key_type)] + fn merge(&mut self, other: &mut Self) -> std::collections::HashSet> { + let mut set = std::collections::HashSet::new(); + #(#rows)* + set + } + } + } +} diff --git a/codegen/src/generators/in_memory/mod.rs b/codegen/src/generators/in_memory/mod.rs new file mode 100644 index 00000000..07c37795 --- /dev/null +++ b/codegen/src/generators/in_memory/mod.rs @@ -0,0 +1,130 @@ +mod locks; +mod primary_key; +pub mod queries; +mod row; +mod table; +mod index; +mod wrapper; + +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +use crate::common::model::{Columns, Config, PrimaryKey, Queries}; + +pub struct InMemoryGenerator { + pub name: Ident, + pub pk: Option, + pub queries: Option, + pub config: Option, + pub columns: Columns, +} + +impl InMemoryGenerator { + pub fn new(name: Ident, columns: Columns) -> Self { + Self { + name, + pk: None, + queries: None, + config: None, + columns, + } + } + + pub fn set_queries(&mut self, queries: Queries) { + self.queries = Some(queries); + } + + pub fn set_config(&mut self, config: Config) { + self.config = Some(config); + } +} + +#[allow(dead_code)] +pub fn expand(input: TokenStream) -> syn::Result { + let mut parser = crate::common::parser::Parser::new(input); + let mut columns = None; + let mut queries = None; + let mut indexes = None; + let mut config = None; + + let name = parser.parse_name()?; + while let Some(ident) = parser.peek_next() { + match ident.to_string().as_str() { + "columns" => { + let res = parser.parse_columns()?; + columns = Some(res) + } + "indexes" => { + let res = parser.parse_indexes()?; + indexes = Some(res); + } + "queries" => { + let res = parser.parse_queries()?; + queries = Some(res) + } + "config" => { + let res = parser.parse_configs()?; + config = Some(res) + } + "persist" => { + // Skip persist flag for in_memory - it's always false + parser.parse_persist()?; + } + _ => return Err(syn::Error::new(ident.span(), "Unexpected identifier")), + } + } + + let mut columns = columns.expect("defined"); + if let Some(i) = indexes { + columns.indexes = i + } + + expand_from_parsed(name, columns, queries, config) +} + +pub fn expand_from_parsed( + name: proc_macro2::Ident, + columns: crate::common::model::Columns, + queries: Option, + config: Option, +) -> syn::Result { + let mut generator = InMemoryGenerator::new(name, columns); + if let Some(q) = queries { + generator.set_queries(q); + } + if let Some(c) = config { + generator.set_config(c); + } + + let pk_def = generator.gen_primary_key_def()?; + let row_def = generator.gen_row_def(); + let wrapper_def = generator.gen_wrapper_def(); + let locks_def = generator.gen_locks_def(); + let index_def = generator.gen_index_def()?; + let table_def = generator.gen_table_def()?; + let query_types_def = generator.gen_result_types_def()?; + let query_available_def = generator.gen_available_types_def()?; + let query_locks_impls = generator.gen_query_locks_impl()?; + let select_impls = generator.gen_query_select_impl()?; + let update_impls = generator.gen_query_update_impl()?; + let update_in_place_impls = generator.gen_query_in_place_impl()?; + let delete_impls = generator.gen_query_delete_impl()?; + let unsized_impl = generator.gen_unsized_impls(); + + Ok(quote! { + #pk_def + #row_def + #query_available_def + #wrapper_def + #locks_def + #index_def + #table_def + #query_types_def + #query_locks_impls + #select_impls + #update_impls + #update_in_place_impls + #delete_impls + #unsized_impl + }) +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/primary_key.rs b/codegen/src/generators/in_memory/primary_key.rs similarity index 95% rename from codegen/src/worktable/generator/primary_key.rs rename to codegen/src/generators/in_memory/primary_key.rs index 8398c8a0..cc982762 100644 --- a/codegen/src/worktable/generator/primary_key.rs +++ b/codegen/src/generators/in_memory/primary_key.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; -use crate::name_generator::{WorktableNameGenerator, is_unsized_vec}; -use crate::worktable::generator::Generator; -use crate::worktable::model::{GeneratorType, PrimaryKey}; +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::{GeneratorType, PrimaryKey}; use proc_macro2::{Ident, TokenStream}; use quote::quote; -impl Generator { +impl InMemoryGenerator { /// Generates primary key type and it's impls. pub fn gen_primary_key_def(&mut self) -> syn::Result { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); diff --git a/codegen/src/worktable/generator/queries/delete.rs b/codegen/src/generators/in_memory/queries/delete.rs similarity index 97% rename from codegen/src/worktable/generator/queries/delete.rs rename to codegen/src/generators/in_memory/queries/delete.rs index 66c72b24..b7337459 100644 --- a/codegen/src/worktable/generator/queries/delete.rs +++ b/codegen/src/generators/in_memory/queries/delete.rs @@ -4,11 +4,11 @@ use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use crate::name_generator::{WorktableNameGenerator, is_float}; -use crate::worktable::generator::Generator; -use crate::worktable::model::Operation; +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::Operation; -impl Generator { +impl InMemoryGenerator { pub fn gen_query_delete_impl(&mut self) -> syn::Result { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let table_ident = name_generator.get_work_table_ident(); @@ -79,7 +79,7 @@ impl Generator { let pk_ident = name_generator.get_primary_key_type_ident(); let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); - let process = if self.is_persist { + let process = if false { quote! { let (secondary_keys_events, res) = self.0.indexes.delete_row_cdc(row, link); res?; diff --git a/codegen/src/worktable/generator/queries/in_place.rs b/codegen/src/generators/in_memory/queries/in_place.rs similarity index 96% rename from codegen/src/worktable/generator/queries/in_place.rs rename to codegen/src/generators/in_memory/queries/in_place.rs index a3aa6ea9..bc812963 100644 --- a/codegen/src/worktable/generator/queries/in_place.rs +++ b/codegen/src/generators/in_memory/queries/in_place.rs @@ -1,12 +1,12 @@ -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; -use crate::worktable::model::Operation; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::Operation; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; use std::collections::HashMap; -impl Generator { +impl InMemoryGenerator { pub fn gen_query_in_place_impl(&self) -> syn::Result { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let table_ident = name_generator.get_work_table_ident(); diff --git a/codegen/src/worktable/generator/queries/locks.rs b/codegen/src/generators/in_memory/queries/locks.rs similarity index 91% rename from codegen/src/worktable/generator/queries/locks.rs rename to codegen/src/generators/in_memory/queries/locks.rs index 7e80f548..f9015da4 100644 --- a/codegen/src/worktable/generator/queries/locks.rs +++ b/codegen/src/generators/in_memory/queries/locks.rs @@ -4,27 +4,11 @@ use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; -use crate::worktable::model::Operation; - -impl WorktableNameGenerator { - pub fn get_update_query_lock_ident(snake_case_name: &String) -> Ident { - Ident::new( - format!("lock_update_{snake_case_name}").as_str(), - Span::mixed_site(), - ) - } - - pub fn get_update_in_place_query_lock_ident(snake_case_name: &String) -> Ident { - Ident::new( - format!("lock_update_in_place_{snake_case_name}").as_str(), - Span::mixed_site(), - ) - } -} +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::Operation; -impl Generator { +impl InMemoryGenerator { pub fn gen_query_locks_impl(&mut self) -> syn::Result { if let Some(q) = &self.queries { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); diff --git a/codegen/src/generators/in_memory/queries/mod.rs b/codegen/src/generators/in_memory/queries/mod.rs new file mode 100644 index 00000000..41d89634 --- /dev/null +++ b/codegen/src/generators/in_memory/queries/mod.rs @@ -0,0 +1,9 @@ +mod delete; +mod in_place; +mod locks; +mod select; +pub mod r#type; +mod unsized_; +mod update; + +pub use r#type::map_to_uppercase; diff --git a/codegen/src/generators/in_memory/queries/select.rs b/codegen/src/generators/in_memory/queries/select.rs new file mode 100644 index 00000000..a86ddea1 --- /dev/null +++ b/codegen/src/generators/in_memory/queries/select.rs @@ -0,0 +1,40 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use proc_macro2::TokenStream; +use quote::quote; + +impl InMemoryGenerator { + pub fn gen_query_select_impl(&mut self) -> syn::Result { + let select_all = self.gen_select_all(); + + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + + Ok(quote! { + impl #table_ident { + #select_all + } + }) + } + + fn gen_select_all(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + quote! { + pub fn select_all(&self) -> SelectQueryBuilder<#row_ident, + impl DoubleEndedIterator + '_ + Sized, + #column_range_type, + #row_fields_ident> + { + let iter = self.0.primary_index.pk_map + .iter() + .filter_map(|(_, link)| self.0.data.select_non_ghosted(link.0).ok()); + + SelectQueryBuilder::new(iter) + } + } + } +} diff --git a/codegen/src/generators/in_memory/queries/type.rs b/codegen/src/generators/in_memory/queries/type.rs new file mode 100644 index 00000000..7b516fa1 --- /dev/null +++ b/codegen/src/generators/in_memory/queries/type.rs @@ -0,0 +1,173 @@ +use std::collections::HashSet; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; + +pub fn map_to_uppercase(str: &str) -> String { + if str.contains("OrderedFloat") { + let mut split = str.split("<"); + let _ = split.next(); + let inner_type = split + .next() + .expect("OrderedFloat def contains inner type") + .replace(">", ""); + format!("Ordered{}", inner_type.to_uppercase().trim()) + } else if str.contains("Option") { + let mut split = str.split("<"); + let _ = split.next(); + let inner_type = split + .next() + .expect("Option def contains inner type") + .replace(">", ""); + format!("Option{}", inner_type.to_uppercase().trim()) + } else { + str.to_uppercase() + } +} + +impl InMemoryGenerator { + pub fn gen_available_types_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + + let unique_types: HashSet = self + .columns + .indexes + .iter() + .filter_map(|(_, idx)| self.columns.columns_map.get(&idx.field)) + .map(|ty| ty.to_string()) + .collect(); + + let rows: Vec<_> = unique_types + .iter() + .map(|s| { + let type_ident: TokenStream = s + .to_string() + .parse() + .expect("should be valid because parsed from declaration"); + let type_upper = map_to_uppercase(s); + let type_upper = Ident::new(type_upper.as_str(), Span::mixed_site()); + Some(quote! { + #[from] + #type_upper(#type_ident), + }) + }) + .collect(); + + if !rows.is_empty() { + Ok(quote! { + #[derive(Clone, Debug, From, PartialEq)] + #[non_exhaustive] + pub enum #avt_type_ident { + #(#rows)* + } + }) + } else { + Ok(quote! { + type #avt_type_ident = (); + }) + } + } + + pub fn gen_result_types_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + + if let Some(queries) = &self.queries { + let query_defs = queries + .updates + .keys() + .map(|v| { + let ident = Ident::new(format!("{v}Query").as_str(), Span::mixed_site()); + let (rows, updates): (Vec<_>, Vec<_>) = queries + .updates + .get(v) + .expect("exists") + .columns + .iter() + .map(|i| { + let type_ = self + .columns + .columns_map + .get(i) + .ok_or(syn::Error::new(i.span(), "Unexpected column name"))?; + + let def = if type_.to_string().contains("OrderedFloat") { + let inner_type = type_.to_string(); + let mut split = inner_type.split("<"); + let _ = split.next(); + let inner_type = split + .next() + .expect("OrderedFloat def contains inner type") + .to_uppercase() + .replace(">", ""); + let ident = Ident::new( + format!("Ordered{}Def", inner_type.trim()).as_str(), + Span::call_site(), + ); + quote! { + #[rkyv(with = #ident)] + pub #i: #type_, + } + } else { + quote! {pub #i: #type_,} + }; + + let update = quote! { + row.#i = self.#i; + }; + + Ok::<_, syn::Error>((def, update)) + }) + .collect::, _>>()? + .into_iter() + .unzip(); + + Ok::<_, syn::Error>(quote! { + + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, Clone, rkyv::Serialize)] + #[repr(C)] + pub struct #ident { + #(#rows)* + } + + impl Query<#row_ident> for #ident { + fn merge(self, mut row: #row_ident) -> #row_ident { + #(#updates)* + + row + } + } + }) + }) + .collect::, _>>()?; + + let by_defs = queries + .updates + .values() + .map(|op| { + let ident = Ident::new(format!("{}By", &op.name).as_str(), Span::mixed_site()); + let field_type = self + .columns + .columns_map + .get(&op.by) + .ok_or(syn::Error::new(op.by.span(), "Unexpected column name"))?; + + Ok::<_, syn::Error>(quote! { + pub type #ident = #field_type; + }) + }) + .collect::, _>>()?; + + Ok(quote! { + #(#query_defs)* + #(#by_defs)* + }) + } else { + Ok(quote! {}) + } + } +} diff --git a/codegen/src/generators/in_memory/queries/unsized_.rs b/codegen/src/generators/in_memory/queries/unsized_.rs new file mode 100644 index 00000000..513898f5 --- /dev/null +++ b/codegen/src/generators/in_memory/queries/unsized_.rs @@ -0,0 +1,100 @@ +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; + +impl InMemoryGenerator { + pub fn gen_unsized_impls(&self) -> TokenStream { + if self.columns.is_sized { + quote! {} + } else { + let unsized_field_len_fns = self.gen_get_unsized_field_len_wt_fn(); + let unsized_field_len_query_fns = self.gen_get_unsized_field_len_query_fn(); + quote! { + #unsized_field_len_fns + #unsized_field_len_query_fns + } + } + } + + fn gen_get_unsized_field_len_wt_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + + let unsized_fields: Vec<_> = self + .columns + .columns_map + .iter() + .filter_map(|(k, v)| { + if v.to_string() == "String" { + Some(k) + } else { + None + } + }) + .map(|f| { + let fn_ident = Ident::new(format!("get_{f}_size").as_str(), Span::call_site()); + quote! { + fn #fn_ident(&self, link: Link) -> core::result::Result { + self.0.data + .with_ref(link, |row_ref| row_ref.inner.#f.as_str().to_string().aligned_size()) + .map_err(WorkTableError::PagesError) + } + } + }) + .collect(); + + quote! { + impl #table_ident { + #(#unsized_fields)* + } + } + } + + fn gen_get_unsized_field_len_query_fn(&self) -> TokenStream { + if let Some(q) = &self.queries { + let query_impls: Vec<_> = q + .updates + .iter() + .filter(|(_, op)| { + op.columns + .iter() + .any(|c| self.columns.columns_map.get(c).unwrap().to_string() == "String") + }) + .map(|(i, op)| { + let archived_ident = + Ident::new(format!("Archived{i}Query").as_str(), Span::call_site()); + let unsized_fields: Vec<_> = op + .columns + .iter() + .filter(|c| { + self.columns.columns_map.get(c).unwrap().to_string() == "String" + }) + .map(|c| { + let fn_ident = + Ident::new(format!("get_{c}_size").as_str(), Span::call_site()); + quote! { + pub fn #fn_ident(&self) -> usize { + self.#c.as_str().to_string().aligned_size() + } + } + }) + .collect(); + + quote! { + impl #archived_ident { + #(#unsized_fields)* + } + } + }) + .collect(); + + quote! { + #(#query_impls)* + } + } else { + quote! {} + } + } +} diff --git a/codegen/src/worktable/generator/queries/update.rs b/codegen/src/generators/in_memory/queries/update.rs similarity index 98% rename from codegen/src/worktable/generator/queries/update.rs rename to codegen/src/generators/in_memory/queries/update.rs index 50c2953a..adf9d0a9 100644 --- a/codegen/src/worktable/generator/queries/update.rs +++ b/codegen/src/generators/in_memory/queries/update.rs @@ -1,14 +1,14 @@ use proc_macro2::Literal; use std::collections::HashMap; -use crate::name_generator::{WorktableNameGenerator, is_float}; -use crate::worktable::generator::Generator; -use crate::worktable::model::Operation; +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::Operation; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -impl Generator { +impl InMemoryGenerator { pub fn gen_query_update_impl(&mut self) -> syn::Result { let custom_updates = if let Some(q) = &self.queries { let custom_updates = self.gen_custom_updates(q.updates.clone()); @@ -218,7 +218,7 @@ impl Generator { } fn gen_persist_call(&self) -> TokenStream { - if self.is_persist { + if false { quote! { if let Operation::Update(op) = &mut op { op.bytes = self.0.data.select_raw(link)?; @@ -287,7 +287,7 @@ impl Generator { let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); let primary_key_ident = name_generator.get_primary_key_type_ident(); - if self.is_persist { + if false { quote! { let mut op: Operation< <<#primary_key_ident as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, @@ -350,7 +350,7 @@ impl Generator { vec![] }; - let process_difference = if self.is_persist { + let process_difference = if false { let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); if idx_idents.is_some() { quote! { @@ -422,7 +422,7 @@ impl Generator { } fn gen_process_diffs_remove_on_index(&self, idx_idents: Option<&Vec>) -> TokenStream { - let process_difference = if self.is_persist { + let process_difference = if false { if idx_idents.is_some() { quote! { let (secondary_keys_events_remove, res) = self.0.indexes.process_difference_remove_cdc(link, diffs); diff --git a/codegen/src/worktable/generator/row.rs b/codegen/src/generators/in_memory/row.rs similarity index 97% rename from codegen/src/worktable/generator/row.rs rename to codegen/src/generators/in_memory/row.rs index 5d353687..4d99e29b 100644 --- a/codegen/src/worktable/generator/row.rs +++ b/codegen/src/generators/in_memory/row.rs @@ -1,10 +1,10 @@ -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -impl Generator { +impl InMemoryGenerator { /// Generates row type and it's impls. pub fn gen_row_def(&mut self) -> TokenStream { let def = self.gen_row_type(); diff --git a/codegen/src/generators/in_memory/table/impls.rs b/codegen/src/generators/in_memory/table/impls.rs new file mode 100644 index 00000000..526220aa --- /dev/null +++ b/codegen/src/generators/in_memory/table/impls.rs @@ -0,0 +1,253 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::GeneratorType; + +impl InMemoryGenerator { + pub fn gen_table_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + + let persisted_impl = self.gen_table_new_fn(); + let name_fn = self.gen_table_name_fn(); + let select_fn = self.gen_table_select_fn(); + let insert_fn = self.gen_table_insert_fn(); + let reinsert_fn = self.gen_table_reinsert_fn(); + let upsert_fn = self.gen_table_upsert_fn(); + let get_next_fn = self.gen_table_get_next_fn(); + let iter_with_fn = self.gen_table_iter_with_fn(); + let iter_with_async_fn = self.gen_table_iter_with_async_fn(); + let count_fn = self.gen_table_count_fn(); + let system_info_fn = self.gen_system_info_fn(); + let vacuum_fn = self.gen_table_vacuum_fn(); + + quote! { + #persisted_impl + impl #ident { + #name_fn + #select_fn + #insert_fn + #reinsert_fn + #upsert_fn + #count_fn + #get_next_fn + #iter_with_fn + #iter_with_async_fn + #system_info_fn + #vacuum_fn + } + } + } + + fn gen_table_new_fn(&self) -> TokenStream { + // InMemory tables don't have PersistedWorkTable impl + quote! {} + } + + fn gen_table_name_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let dir_name = name_generator.get_dir_name(); + + quote! { + pub fn name(&self) -> &'static str { + &self.0.table_name + } + + pub fn name_snake_case() -> &'static str { + #dir_name + } + } + } + + fn gen_table_select_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + quote! { + pub fn select(&self, pk: Pk) -> Option<#row_type> + where #primary_key_type: From { + self.0.select(pk.into()) + } + } + } + + fn gen_table_insert_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + quote! { + pub fn insert(&self, row: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { + self.0.insert(row) + } + } + } + + fn gen_table_reinsert_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + quote! { + pub async fn reinsert(&self, row_old: #row_type, row_new: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { + self.0.reinsert(row_old, row_new).await + } + } + } + + fn gen_table_upsert_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + + quote! { + pub async fn upsert(&self, row: #row_type) -> core::result::Result<(), WorkTableError> { + let pk = row.get_primary_key(); + let need_to_update = { + if let Some(link) = self.0.primary_index.pk_map.get(&pk) { + true + } else { + false + } + }; + if need_to_update { + self.update(row).await?; + } else { + self.insert(row)?; + } + core::result::Result::Ok(()) + } + } + } + + fn gen_table_get_next_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + match self.columns.generator_type { + GeneratorType::Custom | GeneratorType::Autoincrement => { + quote! { + pub fn get_next_pk(&self) -> #primary_key_type { + self.0.get_next_pk() + } + } + } + GeneratorType::None => { + quote! {} + } + } + } + + fn gen_table_iter_with_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let inner = self.gen_table_iter_inner(quote! { + f(data)?; + }); + + quote! { + pub fn iter_with< + F: Fn(#row_type) -> core::result::Result<(), WorkTableError> + >(&self, f: F) -> core::result::Result<(), WorkTableError> { + #inner + } + } + } + + fn gen_table_iter_with_async_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let inner = self.gen_table_iter_inner(quote! { + f(data).await?; + }); + + quote! { + pub async fn iter_with_async< + F: Fn(#row_type) -> Fut, + Fut: std::future::Future> + >(&self, f: F) -> core::result::Result<(), WorkTableError> { + #inner + } + } + } + + fn gen_table_iter_inner(&self, func: TokenStream) -> TokenStream { + quote! { + let first = self.0.primary_index.pk_map.iter().next().map(|(k, v)| (k.clone(), v.0)); + let Some((mut k, link)) = first else { + return Ok(()) + }; + + let data = self.0.data.select_non_ghosted(link).map_err(WorkTableError::PagesError)?; + #func + + let mut ind = false; + while !ind { + let next = { + let mut iter = self.0.primary_index.pk_map.range(k.clone()..); + let next = iter.next().map(|(k, v)| (k.clone(), v.0)).filter(|(key, _)| key != &k); + if next.is_some() { + next + } else { + iter.next().map(|(k, v)| (k.clone(), v.0)) + } + }; + if let Some((key, link)) = next { + let data = self.0.data.select_non_ghosted(link).map_err(WorkTableError::PagesError)?; + #func + k = key + } else { + ind = true; + }; + } + + core::result::Result::Ok(()) + } + } + + fn gen_table_count_fn(&self) -> TokenStream { + quote! { + pub fn count(&self) -> usize { + let count = self.0.primary_index.pk_map.len(); + count + } + } + } + + fn gen_system_info_fn(&self) -> TokenStream { + quote! { + pub fn system_info(&self) -> SystemInfo { + self.0.system_info() + } + } + } + + fn gen_table_vacuum_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_name = name_generator.get_work_table_literal_name(); + let lock_type = name_generator.get_lock_type_ident(); + + quote! { + pub fn vacuum(&self) -> std::sync::Arc { + std::sync::Arc::new(EmptyDataVacuum::< + _, + _, + _, + _, + _, + _, + #lock_type, + _ + >::new( + #table_name, + std::sync::Arc::clone(&self.0.data), + std::sync::Arc::clone(&self.0.lock_manager), + std::sync::Arc::clone(&self.0.primary_index), + std::sync::Arc::clone(&self.0.indexes), + )) + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/in_memory/table/index_fns.rs b/codegen/src/generators/in_memory/table/index_fns.rs new file mode 100644 index 00000000..673a539e --- /dev/null +++ b/codegen/src/generators/in_memory/table/index_fns.rs @@ -0,0 +1,114 @@ +use std::collections::HashMap; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::in_memory::InMemoryGenerator; +use crate::common::model::Index; + +impl InMemoryGenerator { + pub fn gen_table_index_fns(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + let row_ident = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + let fn_defs = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + if idx.is_unique { + Self::gen_unique_index_fn(i, idx, &self.columns.columns_map, row_ident.clone()) + } else { + Self::gen_non_unique_index_fn( + i, + idx, + &self.columns.columns_map, + row_ident.clone(), + &column_range_type, + &row_fields_ident, + ) + } + }) + .collect::, syn::Error>>()?; + + Ok(quote! { + impl #ident { + #(#fn_defs)* + } + }) + } + + fn gen_unique_index_fn( + i: &Ident, + idx: &Index, + columns_map: &HashMap, + row_ident: Ident, + ) -> syn::Result { + let type_ = columns_map + .get(i) + .ok_or(syn::Error::new(i.span(), "Row not found"))?; + let fn_name = Ident::new(format!("select_by_{i}").as_str(), Span::mixed_site()); + let field_ident = &idx.name; + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + + Ok(quote! { + pub fn #fn_name(&self, by: #type_) -> Option<#row_ident> { + let link: Link = self.0.indexes.#field_ident.get(#by).map(|kv| kv.get().value.into())?; + self.0.data.select_non_ghosted(link).ok() + } + }) + } + + fn gen_non_unique_index_fn( + i: &Ident, + idx: &Index, + columns_map: &HashMap, + row_ident: Ident, + column_range_type: &Ident, + row_fields_ident: &Ident, + ) -> syn::Result { + let type_ = columns_map + .get(i) + .ok_or(syn::Error::new(i.span(), "Row not found"))?; + let fn_name = Ident::new(format!("select_by_{i}").as_str(), Span::mixed_site()); + let field_ident = &idx.name; + let row_field_ident = &idx.field; + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + + Ok(quote! { + pub fn #fn_name(&self, by: #type_) -> SelectQueryBuilder<#row_ident, + impl DoubleEndedIterator + '_, + #column_range_type, + #row_fields_ident> + { + let rows = self.0.indexes.#field_ident + .get(#by) + .into_iter() + .filter_map(|(_, link)| self.0.data.select_non_ghosted(link.0).ok()) + .filter(move |r| &r.#row_field_ident == &by); + + SelectQueryBuilder::new(rows) + } + }) + } +} diff --git a/codegen/src/worktable/generator/table/mod.rs b/codegen/src/generators/in_memory/table/mod.rs similarity index 78% rename from codegen/src/worktable/generator/table/mod.rs rename to codegen/src/generators/in_memory/table/mod.rs index 07f7554d..819f8a80 100644 --- a/codegen/src/worktable/generator/table/mod.rs +++ b/codegen/src/generators/in_memory/table/mod.rs @@ -1,14 +1,14 @@ use proc_macro2::{Literal, TokenStream}; use quote::quote; -use crate::name_generator::{WorktableNameGenerator, is_unsized_vec}; -use crate::worktable::generator::Generator; +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::in_memory::InMemoryGenerator; mod impls; mod index_fns; mod select_executor; -impl Generator { +impl InMemoryGenerator { pub fn gen_table_def(&mut self) -> syn::Result { let page_size_consts = self.gen_page_size_consts(); let type_ = self.gen_table_type(); @@ -34,16 +34,12 @@ impl Generator { let ident = name_generator.get_work_table_ident(); let table_name = name_generator.get_work_table_literal_name(); - if self.is_persist { - quote! {} - } else { - quote! { - impl Default for #ident { - fn default() -> Self { - let mut inner = WorkTable::default(); - inner.table_name = #table_name; - Self(inner) - } + quote! { + impl Default for #ident { + fn default() -> Self { + let mut inner = WorkTable::default(); + inner.table_name = #table_name; + Self(inner) } } } @@ -58,17 +54,8 @@ impl Generator { let inner_const_name = name_generator.get_page_inner_size_const_ident(); let avt_type_ident = name_generator.get_available_type_ident(); let avt_index_ident = name_generator.get_available_indexes_ident(); - let persistence_task = name_generator.get_persistence_task_ident(); let lock_ident = name_generator.get_lock_type_ident(); - let persist_type_part = if self.is_persist { - quote! { - , #persistence_task - } - } else { - quote! {} - }; - let pk_types = &self .columns .primary_keys @@ -82,22 +69,11 @@ impl Generator { }) .collect::>(); let pk_types_unsized = is_unsized_vec(pk_types); - let derive = if self.is_persist { - if pk_types_unsized { - quote! { - #[derive(Debug, PersistTable)] - #[table(pk_unsized)] - } - } else { - quote! { - #[derive(Debug, PersistTable)] - } - } - } else { - quote! { - #[derive(Debug)] - } + + let derive = quote! { + #[derive(Debug)] }; + let node_type = if pk_types_unsized { quote! { UnsizedNode>> @@ -123,7 +99,6 @@ impl Generator { #inner_const_name, #node_type > - #persist_type_part ); } } else { @@ -141,7 +116,6 @@ impl Generator { { INNER_PAGE_SIZE }, #node_type > - #persist_type_part ); } } @@ -165,4 +139,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/generators/in_memory/table/select_executor.rs b/codegen/src/generators/in_memory/table/select_executor.rs new file mode 100644 index 00000000..3a2f6254 --- /dev/null +++ b/codegen/src/generators/in_memory/table/select_executor.rs @@ -0,0 +1,227 @@ +use convert_case::{Case, Casing}; +use proc_macro2::Ident; +use proc_macro2::Span; +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use quote::ToTokens; +use syn::Type; + +const RANGE_VARIANTS: &[&str] = &["", "Inclusive", "From", "To", "ToInclusive"]; + +fn is_numeric_type(ty: &Type) -> bool { + matches!( + ty.to_token_stream().to_string().as_str(), + "i8" | "i16" + | "i32" + | "i64" + | "i128" + | "u8" + | "u16" + | "u32" + | "u64" + | "u128" + | "f32" + | "f64" + ) +} + +impl InMemoryGenerator { + pub fn gen_table_column_range_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let column_range_type = name_generator.get_column_range_type_ident(); + + let unique_types: std::collections::HashSet = self + .columns + .columns_map + .values() + .map(|ty| ty.to_token_stream().to_string()) + .filter(|ty| is_numeric_type(&syn::parse_str::(ty).unwrap())) + .map(|ty| ty.to_string()) + .collect(); + + let column_range_variants = unique_types.iter().map(|type_name| { + let ty_ident = Ident::new(&type_name.to_string(), Span::call_site()); + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|variant| { + let variant_ident = Ident::new( + &format!("{}{}", type_name.to_string().to_case(Case::Pascal), variant), + Span::call_site(), + ); + let range_ident = Ident::new(&format!("Range{variant}"), Span::call_site()); + quote! { + #variant_ident(std::ops::#range_ident<#ty_ident>), + } + }) + .collect(); + + quote! { + #(#variants)* + } + }); + + let from_impls = unique_types.iter().map(|type_name| { + let ty_ident = Ident::new(&type_name.to_string(), Span::call_site()); + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|variant| { + let variant_ident = Ident::new( + &format!("{}{}", type_name.to_string().to_case(Case::Pascal), variant), + Span::call_site(), + ); + let range_ident = Ident::new(&format!("Range{variant}"), Span::call_site()); + quote! { + impl From> for #column_range_type { + fn from(range: std::ops::#range_ident<#ty_ident>) -> Self { + Self::#variant_ident(range) + } + } + } + }) + .collect(); + + quote! { + #(#variants)* + } + }); + + quote! { + #[derive(Debug, Clone)] + pub enum #column_range_type { + #(#column_range_variants)* + } + + #(#from_impls)* + } + } + + pub fn gen_table_select_query_executor_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + let order_matches = self.columns.columns_map.keys().map(|column| { + let column_variant = Ident::new(&column.to_string().to_case(Case::Pascal), Span::mixed_site()); + let col_ident = Ident::new(&column.to_string(), Span::call_site()); + quote! { + #row_fields_ident::#column_variant => { + let cmp = a.#col_ident.partial_cmp(&b.#col_ident).unwrap_or(std::cmp::Ordering::Equal); + if cmp != std::cmp::Ordering::Equal { + return match order { + Order::Asc => cmp, + Order::Desc => cmp.reverse(), + }; + } + } + } + }); + + let range_matches = self + .columns + .columns_map + .iter() + .filter(|(_, ty)| { + is_numeric_type(&syn::parse_str::(&ty.to_token_stream().to_string()).unwrap()) + }) + .map(|(column, ty)| { + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|v| { + let column_variant = Ident::new(&column.to_string().to_case(Case::Pascal), Span::mixed_site()); + let col_ident = Ident::new(&column.to_string(), Span::call_site()); + let variant_ident = Ident::new( + &format!("{}{}", ty.to_string().to_case(Case::Pascal), v), + Span::call_site(), + ); + quote! { + (#row_fields_ident::#column_variant, #column_range_type::#variant_ident(range)) => { + Box::new(iter.filter(move |row| range.contains(&row.#col_ident))) + as Box> + }, + } + }) + .collect(); + + quote! { + #(#variants)* + } + }).collect::>(); + + let range = if range_matches.is_empty() { + quote! {} + } else { + quote! { + if !self.params.range.is_empty() { + for (range, column) in &self.params.range { + iter = match (column, range.clone().into()) { + #(#range_matches)* + _ => unreachable!(), + }; + } + } + } + }; + + quote! { + impl SelectQueryExecutor<#row_type, I, #column_range_type, #row_fields_ident> + for SelectQueryBuilder<#row_type, I, #column_range_type, #row_fields_ident> + where + I: DoubleEndedIterator + Sized, + { + + fn where_by(self, predicate: F) -> SelectQueryBuilder<#row_type, + impl DoubleEndedIterator + Sized, + #column_range_type, + #row_fields_ident> + where + F: FnMut(&#row_type) -> bool, + { + SelectQueryBuilder { + params: self.params, + iter: self.iter.filter(predicate), + } + } + + fn execute(self) -> Result, WorkTableError> { + let mut iter: Box> = Box::new(self.iter); + + #range + + if !self.params.order.is_empty() { + let mut items: Vec<#row_type> = iter.collect(); + + items.sort_by(|a, b| { + for (order, col) in &self.params.order { + match col { + #(#order_matches)* + _ => continue, + } + } + std::cmp::Ordering::Equal + }); + + iter = Box::new(items.into_iter()); + } + + let iter_result: Box> = if let Some(offset) = self.params.offset { + Box::new(iter.skip(offset)) + } else { + Box::new(iter) + }; + + let iter_result: Box> = if let Some(limit) = self.params.limit { + Box::new(iter_result.take(limit)) + } else { + Box::new(iter_result) + }; + + Ok(iter_result.collect()) + } + } + } + } +} diff --git a/codegen/src/generators/in_memory/wrapper.rs b/codegen/src/generators/in_memory/wrapper.rs new file mode 100644 index 00000000..48c31da7 --- /dev/null +++ b/codegen/src/generators/in_memory/wrapper.rs @@ -0,0 +1,107 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::in_memory::InMemoryGenerator; +use proc_macro2::TokenStream; +use quote::quote; + +impl InMemoryGenerator { + pub fn gen_wrapper_def(&self) -> TokenStream { + let type_ = self.gen_wrapper_type(); + let impl_ = self.gen_wrapper_impl(); + let storable_impl = self.get_wrapper_storable_impl(); + let archived_wrapper_impl = self.get_archived_wrapper_impl(); + + quote! { + #type_ + #impl_ + #storable_impl + #archived_wrapper_impl + } + } + + fn gen_wrapper_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, rkyv::Serialize)] + #[repr(C)] + pub struct #wrapper_ident { + inner: #row_ident, + is_ghosted: bool, + is_deleted: bool, + is_in_vacuum_process: bool, + } + } + } + + pub fn gen_wrapper_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + let row_ident = name_generator.get_row_type_ident(); + + quote! { + + impl RowWrapper<#row_ident> for #wrapper_ident { + fn get_inner(self) -> #row_ident { + self.inner + } + + fn is_ghosted(&self) -> bool { + self.is_ghosted + } + + fn is_vacuumed(&self) -> bool { + self.is_in_vacuum_process + } + + fn is_deleted(&self) -> bool { + self.is_deleted + } + + fn from_inner(inner: #row_ident) -> Self { + Self { + inner, + is_ghosted: true, + is_deleted: false, + is_in_vacuum_process: false, + } + } + } + } + } + + fn get_wrapper_storable_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + + quote! { + impl StorableRow for #row_ident { + type WrappedRow = #wrapper_ident; + } + } + } + + fn get_archived_wrapper_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_archived_wrapper_type_ident(); + + quote! { + impl ArchivedRowWrapper for #row_ident { + fn unghost(&mut self) { + self.is_ghosted = false; + } + fn set_in_vacuum_process(&mut self) { + self.is_in_vacuum_process = true; + } + fn delete(&mut self) { + self.is_deleted = true; + } + fn is_deleted(&self) -> bool { + self.is_deleted + } + } + } + } +} diff --git a/codegen/src/generators/mod.rs b/codegen/src/generators/mod.rs new file mode 100644 index 00000000..c62c37c6 --- /dev/null +++ b/codegen/src/generators/mod.rs @@ -0,0 +1,3 @@ +pub mod in_memory; +pub mod read_only; +pub mod persist; \ No newline at end of file diff --git a/codegen/src/generators/persist/index/cdc.rs b/codegen/src/generators/persist/index/cdc.rs new file mode 100644 index 00000000..55c7a001 --- /dev/null +++ b/codegen/src/generators/persist/index/cdc.rs @@ -0,0 +1,425 @@ +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::persist::PersistGenerator; +use crate::generators::persist::queries::r#type::map_to_uppercase; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Literal, Span, TokenStream}; +use quote::quote; + +impl PersistGenerator { + pub fn gen_secondary_index_cdc_impl_def(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let index_type_ident = name_generator.get_index_type_ident(); + let row_type_ident = name_generator.get_row_type_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + let available_types_ident = name_generator.get_available_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let save_row_cdc = self.gen_save_row_cdc_index_fn(); + let reinsert_row_cdc = self.gen_reinsert_row_cdc_index_fn(); + let delete_row_cdc = self.gen_delete_row_cdc_index_fn(); + let delete_from_indexes_cdc = self.gen_delete_from_indexes_cdc_index_fn(); + let process_difference_insert_cdc = self.gen_process_difference_insert_cdc_index_fn(); + let process_difference_remove_cdc = self.gen_process_difference_remove_cdc_index_fn(); + + quote! { + impl TableSecondaryIndexCdc<#row_type_ident, #available_types_ident, #events_ident, #available_index_ident> for #index_type_ident { + #reinsert_row_cdc + #save_row_cdc + #delete_row_cdc + #delete_from_indexes_cdc + #process_difference_insert_cdc + #process_difference_remove_cdc + } + } + } + + fn gen_save_row_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let save_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + quote! { + partial_events.#index_field_name = vec![]; + let #index_field_name = if let Some(events) = self.#index_field_name.insert_checked_cdc(row.#i.clone(), link) { + let evs: Vec<_> = events.into_iter().map(|ev| ev.into()).collect(); + partial_events.#index_field_name = evs.clone(); + evs + } else { + return (partial_events, Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + })); + }; + inserted_indexes.push(#available_index_ident::#index_variant); + } + }) + .collect::>(); + let idents = self + .columns + .indexes + .values() + .map(|idx| &idx.name) + .collect::>(); + + quote! { + fn save_row_cdc(&self, row: #row_type_ident, link: Link) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + let mut partial_events = #events_ident::default(); + + #(#save_rows)* + (#events_ident { + #(#idents,)* + }, Ok(())) + } + } + } + + fn gen_reinsert_row_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let (insert_rows, remove_rows): (Vec<_>, Vec<_>) = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let remove = if idx.is_unique { + quote! { + if row_new.#i == row_old.#i { + let events = TableIndexCdc::insert_cdc(&self.#index_field_name, row_new.#i.clone(), link_new).1; + #index_field_name.extend(events.into_iter().map(|ev| ev.into()).collect::>()); + } else { + let (_, events) = TableIndexCdc::remove_cdc(&self.#index_field_name, row_old.#i.clone(), link_old); + #index_field_name.extend(events.into_iter().map(|ev| ev.into()).collect::>()); + } + } + } else { + quote! { + let events = TableIndexCdc::insert_cdc(&self.#index_field_name, row_new.#i.clone(), link_new).1; + #index_field_name.extend(events.into_iter().map(|ev| ev.into()).collect::>()); + let (_, events) = TableIndexCdc::remove_cdc(&self.#index_field_name, row_old.#i.clone(), link_old); + #index_field_name.extend(events.into_iter().map(|ev| ev.into()).collect::>()); + } + }; + let insert = if idx.is_unique { + quote! { + let mut #index_field_name = if row_new.#i != row_old.#i { + partial_events.#index_field_name = vec![]; + let #index_field_name: Vec<_> = if let Some(events) = self.#index_field_name.insert_checked_cdc(row_new.#i.clone(), link_new) { + let evs: Vec<_> = events.into_iter().map(|ev| ev.into()).collect(); + partial_events.#index_field_name = evs.clone(); + evs + } else { + return (partial_events, Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + })); + }; + inserted_indexes.push(#available_index_ident::#index_variant); + + #index_field_name + } else { + vec![] + }; + } + } else { + quote! { + let mut #index_field_name = vec![]; + } + }; + (insert, remove) + }) + .unzip(); + let idents = self + .columns + .indexes + .values() + .map(|idx| &idx.name) + .collect::>(); + + quote! { + fn reinsert_row_cdc( + &self, + row_old: #row_type_ident, + link_old: Link, + row_new: #row_type_ident, + link_new: Link + ) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + let mut partial_events = #events_ident::default(); + + #(#insert_rows)* + #(#remove_rows)* + (#events_ident { + #(#idents,)* + }, Ok(())) + } + } + } + + fn gen_delete_row_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let delete_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + quote! { + let (_, events) = TableIndexCdc::remove_cdc(&self.#index_field_name, row.#i, link); + let #index_field_name = events.into_iter().map(|ev| ev.into()).collect(); + } + }) + .collect::>(); + let idents = self + .columns + .indexes + .values() + .map(|idx| &idx.name) + .collect::>(); + + quote! { + fn delete_row_cdc(&self, row: #row_type_ident, link: Link) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + #(#delete_rows)* + (#events_ident { + #(#idents,)* + }, Ok(())) + } + } + } + + fn gen_delete_from_indexes_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let matches = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + let type_str = self.columns + .columns_map + .get(i) + .unwrap() + .to_string(); + let row = if is_float(type_str.as_str()) { + quote! { + OrderedFloat(row.#i) + } + } else if type_str == "String" { + quote! { + row.#i.clone() + } + } else { + quote! { + row.#i + } + }; + + quote! { + #available_index_ident::#index_variant => { + let (_, events) = TableIndexCdc::remove_cdc(&self.#index_field_name, #row, link); + partial_events.#index_field_name = events.into_iter().map(|ev| ev.into()).collect(); + }, + } + }) + .collect::>(); + + let inner = if matches.is_empty() { + quote! {} + } else { + quote! { + for index in indexes { + match index { + #(#matches)* + } + } + } + }; + + quote! { + fn delete_from_indexes_cdc( + &self, + row: #row_type_ident, + link: Link, + indexes: Vec<#available_index_ident>, + ) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + let mut partial_events = #events_ident::default(); + #inner + (partial_events, Ok(())) + } + } + } + + fn gen_process_difference_remove_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + + let process_difference_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + + let old_value_expr = if type_str == "String" { + quote! { old.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*old) } + } else { + quote! { *old } + }; + + quote! { + let #index_field_name = if let Some(diff) = difference.get(#diff_key) { + let mut events = vec![]; + if let #avt_type_ident::#variant_ident(old) = &diff.old { + let key_old = #old_value_expr; + let (_, evs) = TableIndexCdc::remove_cdc(&self.#index_field_name, key_old, link); + events.extend_from_slice(evs.as_ref()); + } + events + } else { + vec![] + }; + } + } else { + quote! {} + } + }); + let idents = self + .columns + .indexes + .values() + .map(|idx| &idx.name) + .collect::>(); + + quote! { + fn process_difference_remove_cdc( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + #(#process_difference_rows)* + (#events_ident { + #(#idents,)* + }, Ok(())) + } + } + } + + fn gen_process_difference_insert_cdc_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + let events_ident = name_generator.get_space_secondary_index_events_ident(); + + let process_difference_insert_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let new_value_expr = if type_str == "String" { + quote! { new.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*new) } + } else { + quote! { *new } + }; + + quote! { + let #index_field_name = if let Some(diff) = difference.get(#diff_key) { + let mut events = vec![]; + if let #avt_type_ident::#variant_ident(new) = &diff.new { + let key_new = #new_value_expr; + partial_events.#index_field_name = vec![]; + if let Some(evs) = TableIndexCdc::insert_checked_cdc(&self.#index_field_name, key_new, link) { + let evs: Vec<_> = evs.into_iter().collect(); + partial_events.#index_field_name = evs.clone(); + events.extend_from_slice(evs.as_ref()); + } else { + return (partial_events, Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + })); + } + inserted_indexes.push(#available_index_ident::#index_variant); + } + events + } else { + vec![] + }; + } + } else { + quote! {} + } + }); + let idents = self + .columns + .indexes + .values() + .map(|idx| &idx.name) + .collect::>(); + + quote! { + fn process_difference_insert_cdc( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> (#events_ident, Result<(), IndexError<#available_index_ident>>) { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + let mut partial_events = #events_ident::default(); + + #(#process_difference_insert_rows)* + (#events_ident { + #(#idents,)* + }, Ok(())) + } + } + } +} diff --git a/codegen/src/worktable/generator/index/info.rs b/codegen/src/generators/persist/index/info.rs similarity index 95% rename from codegen/src/worktable/generator/index/info.rs rename to codegen/src/generators/persist/index/info.rs index 10da64cd..b28f96f2 100644 --- a/codegen/src/worktable/generator/index/info.rs +++ b/codegen/src/generators/persist/index/info.rs @@ -1,10 +1,10 @@ use proc_macro2::TokenStream; use quote::quote; -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; -impl Generator { +impl PersistGenerator { pub fn gen_secondary_index_info_impl_def(&mut self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let index_type_ident = name_generator.get_index_type_ident(); @@ -88,4 +88,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/generators/persist/index/mod.rs b/codegen/src/generators/persist/index/mod.rs new file mode 100644 index 00000000..9e9b682b --- /dev/null +++ b/codegen/src/generators/persist/index/mod.rs @@ -0,0 +1,173 @@ +mod cdc; +mod info; +mod usual; + +use crate::common::name_generator::{WorktableNameGenerator, is_float, is_unsized}; +use crate::generators::persist::PersistGenerator; +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; +use quote::quote; + +impl PersistGenerator { + pub fn gen_index_def(&mut self) -> syn::Result { + let type_def = self.gen_type_def()?; + let impl_def = self.gen_secondary_index_impl_def(); + let info_def = self.gen_secondary_index_info_impl_def(); + let cdc_impl_def = self.gen_secondary_index_cdc_impl_def(); + let default_impl = self.gen_index_default_impl()?; + let available_indexes = self.gen_available_indexes(); + + Ok(quote! { + #type_def + #impl_def + #info_def + #cdc_impl_def + #default_impl + #available_indexes + }) + } + + fn gen_type_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_index_type_ident(); + let index_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let Some(t) = self.columns.columns_map.get(i) else { + return Err(syn::Error::new( + i.span(), + format!("cannot find column `{i}` in this table"), + )); + }; + let t = if is_float(t.to_string().as_str()) { + quote! { OrderedFloat<#t> } + } else { + quote! { #t } + }; + let i = &idx.name; + + #[allow(clippy::collapsible_else_if)] + let res = if idx.is_unique { + if is_unsized(&t.to_string()) { + quote! { + #i: IndexMap<#t, OffsetEqLink, UnsizedNode>> + } + } else { + quote! {#i: IndexMap<#t, OffsetEqLink>} + } + } else { + if is_unsized(&t.to_string()) { + quote! {#i: IndexMultiMap<#t, OffsetEqLink, UnsizedNode>>} + } else { + quote! {#i: IndexMultiMap<#t, OffsetEqLink>} + } + }; + Ok::<_, syn::Error>(res) + }) + .collect::, syn::Error>>()?; + + let derive = quote! { + #[derive(Debug, MemStat, PersistIndex)] + }; + + Ok(quote! { + #derive + pub struct #ident { + #(#index_rows),* + } + }) + } + + fn gen_index_default_impl(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let index_type_ident = name_generator.get_index_type_ident(); + let const_name = name_generator.get_page_inner_size_const_ident(); + + let index_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let Some(t) = self.columns.columns_map.get(i) else { + return Err(syn::Error::new( + i.span(), + format!("cannot find column `{i}` in this table"), + )); + }; + let t = if is_float(t.to_string().as_str()) { + quote! { OrderedFloat<#t> } + } else { + quote! { #t } + }; + let i = &idx.name; + + #[allow(clippy::collapsible_else_if)] + let res = if idx.is_unique { + if is_unsized(&t.to_string()) { + quote! { + #i: IndexMap::with_maximum_node_size(#const_name), + } + } else { + quote! {#i: IndexMap::with_maximum_node_size(get_index_page_size_from_data_length::<#t>(#const_name)),} + } + } else { + if is_unsized(&t.to_string()) { + quote! {#i: IndexMultiMap::with_maximum_node_size(#const_name), } + } else { + quote! {#i: IndexMultiMap::with_maximum_node_size(get_index_page_size_from_data_length::<#t>(#const_name)),} + } + }; + + Ok::<_, syn::Error>(res) + }) + .collect::, syn::Error>>()?; + + Ok(quote! { + impl Default for #index_type_ident { + fn default() -> Self { + Self { + #(#index_rows)* + } + } + } + }) + } + + fn gen_available_indexes(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_indexes_ident(); + + let indexes = self.columns.indexes.values().map(|i| { + let camel_case_name = i + .name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let i: TokenStream = camel_case_name.parse().unwrap(); + quote! { + #i, + } + }); + + if self.columns.indexes.is_empty() { + quote! { + pub type #avt_type_ident = (); + } + } else { + quote! { + #[derive(Debug, Clone, Copy, MoreDisplay, PartialEq, PartialOrd, Ord, Hash, Eq)] + pub enum #avt_type_ident { + #(#indexes)* + } + + impl AvailableIndex for #avt_type_ident { + fn to_string_value(&self) -> String { + ToString::to_string(&self) + } + } + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/persist/index/usual.rs b/codegen/src/generators/persist/index/usual.rs new file mode 100644 index 00000000..8c0c5303 --- /dev/null +++ b/codegen/src/generators/persist/index/usual.rs @@ -0,0 +1,391 @@ +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::persist::PersistGenerator; +use crate::generators::persist::queries::r#type::map_to_uppercase; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Literal, Span, TokenStream}; +use quote::quote; + +impl PersistGenerator { + pub fn gen_secondary_index_impl_def(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let index_type_ident = name_generator.get_index_type_ident(); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let save_row_fn = self.gen_save_row_index_fn(); + let reinsert_row_fn = self.gen_reinsert_row_index_fn(); + let delete_row_fn = self.gen_delete_row_index_fn(); + let process_difference_insert_fn = self.gen_process_difference_insert_index_fn(); + let process_difference_remove_fn = self.gen_process_difference_remove_index_fn(); + let delete_from_indexes = self.gen_index_delete_from_indexes_fn(); + + quote! { + impl TableSecondaryIndex<#row_type_ident, #avt_type_ident, #avt_index_ident> for #index_type_ident { + #save_row_fn + #reinsert_row_fn + #delete_row_fn + #process_difference_insert_fn + #process_difference_remove_fn + #delete_from_indexes + } + } + } + + fn gen_save_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let save_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + quote! { + if self.#index_field_name.insert_checked(#row.clone(), link).is_none() { + return Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#available_index_ident::#index_variant); + } + }) + .collect::>(); + + quote! { + fn save_row(&self, row: #row_type_ident, link: Link) -> core::result::Result<(), IndexError<#available_index_ident>> { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + #(#save_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_reinsert_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let (insert_rows, remove_rows): (Vec<_>, Vec<_>) = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + let remove = if idx.is_unique { + quote! { + if val_new == val_old { + TableIndex::insert(&self.#index_field_name, val_new.clone(), link_new); + } else { + TableIndex::remove(&self.#index_field_name, &val_old, link_old); + } + } + } else { + quote! { + TableIndex::insert(&self.#index_field_name, val_new.clone(), link_new); + TableIndex::remove(&self.#index_field_name, &val_old, link_old); + } + }; + let insert = if idx.is_unique { + quote! { + let row = &row_new; + let val_new = #row.clone(); + let row = &row_old; + let val_old = #row.clone(); + if val_new != val_old { + if self.#index_field_name.insert_checked(val_new.clone(), link_new).is_none() { + return Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#available_index_ident::#index_variant); + } + } + } else { + quote! {} + }; + let remove = quote! { + let row = &row_new; + let val_new = #row.clone(); + let row = &row_old; + let val_old = #row.clone(); + #remove + }; + (insert, remove) + }) + .unzip(); + + quote! { + fn reinsert_row(&self, + row_old: #row_type_ident, + link_old: Link, + row_new: #row_type_ident, + link_new: Link + ) -> core::result::Result<(), IndexError<#available_index_ident>> + { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + #(#insert_rows)* + #(#remove_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_delete_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let delete_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + quote! { + TableIndex::remove(&self.#index_field_name, &#row, link); + } + }) + .collect::>(); + + quote! { + fn delete_row(&self, row: #row_type_ident, link: Link) -> core::result::Result<(), IndexError<#available_index_ident>> { + #(#delete_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_process_difference_remove_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let process_difference_remove_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + + let old_value_expr = if type_str == "String" { + quote! { old.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*old) } + } else { + quote! { *old } + }; + + quote! { + if let Some(diff) = difference.get(#diff_key) { + if let #avt_type_ident::#variant_ident(old) = &diff.old { + let key_old = #old_value_expr; + TableIndex::remove(&self.#index_field_name, &key_old, link); + } + } + } + } else { + quote! {} + } + }); + + quote! { + fn process_difference_remove( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + #(#process_difference_remove_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_process_difference_insert_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let process_difference_insert_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let new_value_expr = if type_str == "String" { + quote! { new.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*new) } + } else { + quote! { *new } + }; + + quote! { + if let Some(diff) = difference.get(#diff_key) { + if let #avt_type_ident::#variant_ident(new) = &diff.new { + let key_new = #new_value_expr; + if TableIndex::insert_checked(&self.#index_field_name, key_new, link).is_none() { + return Err(IndexError::AlreadyExists { + at: #avt_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#avt_index_ident::#index_variant); + } + } + } + } else { + quote! {} + } + }); + + quote! { + fn process_difference_insert( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + let mut inserted_indexes: Vec<#avt_index_ident> = vec![]; + #(#process_difference_insert_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_index_delete_from_indexes_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_index_ident = name_generator.get_available_indexes_ident(); + let row_type_ident = name_generator.get_row_type_ident(); + + let matches = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + + quote! { + #avt_index_ident::#index_variant => { + TableIndex::remove(&self.#index_field_name, &#row, link); + }, + } + }) + .collect::>(); + + let inner = if matches.is_empty() { + quote! {} + } else { + quote! { + for index in indexes { + match index { + #(#matches)* + } + } + } + }; + + quote! { + fn delete_from_indexes( + &self, + row: #row_type_ident, + link: Link, + indexes: Vec<#avt_index_ident>, + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + #inner + core::result::Result::Ok(()) + } + } + } +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/locks.rs b/codegen/src/generators/persist/locks.rs similarity index 97% rename from codegen/src/worktable/generator/locks.rs rename to codegen/src/generators/persist/locks.rs index 535d5316..af4ab50d 100644 --- a/codegen/src/worktable/generator/locks.rs +++ b/codegen/src/generators/persist/locks.rs @@ -1,9 +1,9 @@ -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -impl Generator { +impl PersistGenerator { pub fn gen_locks_def(&self) -> TokenStream { let type_ = self.gen_locks_type(); let impl_ = self.gen_locks_impl(); @@ -190,4 +190,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/generators/persist/mod.rs b/codegen/src/generators/persist/mod.rs new file mode 100644 index 00000000..a37da957 --- /dev/null +++ b/codegen/src/generators/persist/mod.rs @@ -0,0 +1,84 @@ +mod index; +mod locks; +mod primary_key; +mod queries; +mod row; +mod table; +mod wrapper; + +use proc_macro2::Ident; +use quote::quote; + +use crate::common::model::{Columns, Config, Queries}; + +pub struct PersistGenerator { + pub name: Ident, + pub columns: Columns, + pub pk: Option, + pub queries: Option, + pub config: Option, + pub version: u32, +} + +impl PersistGenerator { + pub fn new(name: Ident, columns: Columns, version: u32) -> Self { + Self { + name, + columns, + pk: None, + queries: None, + config: None, + version, + } + } + + pub fn set_queries(&mut self, queries: Queries) { + self.queries = Some(queries); + } + + pub fn set_config(&mut self, config: Config) { + self.config = Some(config); + } +} + +pub fn expand(name: proc_macro2::Ident, columns: crate::common::model::Columns, queries: Option, config: Option, version: u32) -> syn::Result { + let mut generator = PersistGenerator::new(name, columns, version); + if let Some(q) = queries { + generator.set_queries(q); + } + if let Some(c) = config { + generator.set_config(c); + } + + let pk_def = generator.gen_primary_key_def()?; + let row_def = generator.gen_row_def(); + let wrapper_def = generator.gen_wrapper_def(); + let locks_def = generator.gen_locks_def(); + let index_def = generator.gen_index_def()?; + let table_def = generator.gen_table_def()?; + let query_types_def = generator.gen_result_types_def()?; + let query_available_def = generator.gen_available_types_def()?; + let query_locks_impls = generator.gen_query_locks_impl()?; + let select_impls = generator.gen_query_select_impl()?; + let update_impls = generator.gen_query_update_impl()?; + let update_in_place_impls = generator.gen_query_in_place_impl()?; + let delete_impls = generator.gen_query_delete_impl()?; + let unsized_impl = generator.gen_unsized_impls(); + + Ok(quote! { + #pk_def + #row_def + #query_available_def + #wrapper_def + #locks_def + #index_def + #table_def + #query_types_def + #query_locks_impls + #select_impls + #update_impls + #update_in_place_impls + #delete_impls + #unsized_impl + }) +} \ No newline at end of file diff --git a/codegen/src/generators/persist/primary_key.rs b/codegen/src/generators/persist/primary_key.rs new file mode 100644 index 00000000..16d59729 --- /dev/null +++ b/codegen/src/generators/persist/primary_key.rs @@ -0,0 +1,144 @@ +use std::collections::HashMap; + +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::common::model::{GeneratorType, PrimaryKey}; +use crate::generators::persist::PersistGenerator; + +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +impl PersistGenerator { + pub fn gen_primary_key_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + let values = self + .columns + .primary_keys + .iter() + .map(|i| { + ( + i.clone(), + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .clone(), + ) + }) + .collect::>(); + + let def = self.gen_primary_key_type(); + let impl_ = self.gen_table_primary_key_impl()?; + + self.pk = Some(PrimaryKey { ident, values }); + + Ok(quote! { + #def + #impl_ + }) + } + + fn gen_primary_key_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + + let types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + }) + .collect::>(); + let unsized_derive = + if is_unsized_vec(&types.iter().map(|v| v.to_string()).collect::>()) { + quote! { + VariableSizeMeasure, + } + } else { + quote! {} + }; + + quote! { + #[derive( + Clone, + rkyv::Archive, + Debug, + Default, + rkyv::Deserialize, + Hash, + rkyv::Serialize, + From, + Eq, + Into, + PartialEq, + PartialOrd, + Ord, + SizeMeasure, + MemStat, + #unsized_derive + )] + #[rkyv(derive(PartialEq, Eq, PartialOrd, Ord, Debug))] + pub struct #ident(#(#types),*); + } + } + + fn gen_table_primary_key_impl(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + + Ok(match self.columns.generator_type { + GeneratorType::None => { + quote! { + impl TablePrimaryKey for #ident { + type Generator = (); + } + } + } + GeneratorType::Autoincrement => { + let i = self + .columns + .primary_keys + .first() + .expect("at least one primary key should exist if autoincrement"); + let type_ = self + .columns + .columns_map + .get(i) + .expect("primary key column name always exists if in primary keys list"); + + let generator = Self::get_generator_from_type(type_, i)?; + quote! { + impl TablePrimaryKey for #ident { + type Generator = #generator; + } + } + } + GeneratorType::Custom => { + quote! {} + } + }) + } + + fn get_generator_from_type(type_: &TokenStream, i: &Ident) -> syn::Result { + Ok(match type_.to_string().as_str() { + "u8" => quote! { std::sync::atomic::AtomicU8 }, + "u16" => quote! { std::sync::atomic::AtomicU16 }, + "u32" => quote! { std::sync::atomic::AtomicU32 }, + "u64" => quote! { std::sync::atomic::AtomicU64 }, + "i8" => quote! { std::sync::atomic::AtomicI8 }, + "i16" => quote! { std::sync::atomic::AtomicI16 }, + "i32" => quote! { std::sync::atomic::AtomicI32 }, + "i64" => quote! { std::sync::atomic::AtomicI64 }, + _ => { + return Err(syn::Error::new( + i.span(), + "Type is not supported for autoincrement", + )); + } + }) + } +} \ No newline at end of file diff --git a/codegen/src/generators/persist/queries/delete.rs b/codegen/src/generators/persist/queries/delete.rs new file mode 100644 index 00000000..d56ae8b4 --- /dev/null +++ b/codegen/src/generators/persist/queries/delete.rs @@ -0,0 +1,229 @@ +use std::collections::HashMap; + +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::persist::PersistGenerator; +use crate::common::model::Operation; + +impl PersistGenerator { + pub fn gen_query_delete_impl(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + + let custom_deletes = if let Some(q) = &self.queries { + let custom_deletes = self.gen_custom_deletes(q.deletes.clone()); + quote! { + #custom_deletes + } + } else { + quote! {} + }; + let full_row_delete = self.gen_full_row_delete(); + let full_row_delete_without_lock = self.gen_full_row_delete_without_lock(); + + Ok(quote! { + impl #table_ident { + #full_row_delete + #full_row_delete_without_lock + #custom_deletes + } + }) + } + + fn gen_full_row_delete(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let pk_ident = name_generator.get_primary_key_type_ident(); + let delete_logic = self.gen_delete_logic(true); + let full_row_lock = self.gen_full_lock_for_update(); + + quote! { + pub async fn delete(&self, pk: Pk) -> core::result::Result<(), WorkTableError> + where #pk_ident: From + { + let pk: #pk_ident = pk.into(); + let op_lock = { #full_row_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + + #delete_logic + + core::result::Result::Ok(()) + } + } + } + + fn gen_full_row_delete_without_lock(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let pk_ident = name_generator.get_primary_key_type_ident(); + let delete_logic = self.gen_delete_logic(false); + + quote! { + pub async fn delete_without_lock(&self, pk: Pk) -> core::result::Result<(), WorkTableError> + where #pk_ident: From + { + let pk: #pk_ident = pk.into(); + #delete_logic + core::result::Result::Ok(()) + } + } + } + + fn gen_delete_logic(&self, is_locked: bool) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let pk_ident = name_generator.get_primary_key_type_ident(); + let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); + + let process = quote! { + let (secondary_keys_events, res) = self.0.indexes.delete_row_cdc(row, link); + res?; + let (_, primary_key_events) = self.0.primary_index.remove_cdc(pk.clone(), link); + self.0.data.delete(link).map_err(WorkTableError::PagesError)?; + let mut op: Operation< + <<#pk_ident as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, + #pk_ident, + #secondary_events_ident + > = Operation::Delete(DeleteOperation { + id: uuid::Uuid::now_v7().into(), + secondary_keys_events, + primary_key_events, + link, + }); + self.1.apply_operation(op); + }; + + if is_locked { + quote! { + let link = match self.0 + .primary_index + .pk_map + .get(&pk) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound) { + Ok(l) => l, + Err(e) => { + return Err(e); + } + }; + let row = self.0.select(pk.clone()).unwrap(); + #process + } + } else { + quote! { + let link = self.0 + .primary_index + .pk_map + .get(&pk) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + let row = self.0.select(pk.clone()).unwrap(); + #process + } + } + } + + fn gen_custom_deletes(&mut self, deleted: HashMap) -> TokenStream { + let defs = deleted + .iter() + .map(|(name, op)| { + let snake_case_name = name + .to_string() + .from_case(Case::Pascal) + .to_case(Case::Snake); + let method_ident = Ident::new( + format!("delete_{snake_case_name}").as_str(), + Span::mixed_site(), + ); + let index = self.columns.indexes.values().find(|idx| idx.field == op.by); + let type_ = self.columns.columns_map.get(&op.by).unwrap(); + if let Some(index) = index { + let index_name = &index.name; + + if index.is_unique { + Self::gen_unique_delete(type_, &method_ident, index_name) + } else { + Self::gen_non_unique_delete(type_, &method_ident, index_name) + } + } else { + Self::gen_brute_force_delete_field(&op.by, type_, &method_ident) + } + }) + .collect::>(); + + quote! { + #(#defs)* + } + } + + fn gen_brute_force_delete_field( + field: &Ident, + type_: &TokenStream, + name: &Ident, + ) -> TokenStream { + quote! { + pub async fn #name(&self, by: #type_) -> core::result::Result<(), WorkTableError> { + self.iter_with_async(|row| { + if row.#field == by { + futures::future::Either::Left(async move { + self.delete::<_>(row.get_primary_key()).await + }) + } else { + futures::future::Either::Right(async { + Ok(()) + }) + } + }).await?; + core::result::Result::Ok(()) + } + } + } + + fn gen_non_unique_delete(type_: &TokenStream, name: &Ident, index: &Ident) -> TokenStream { + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + quote! { + pub async fn #name(&self, by: #type_) -> core::result::Result<(), WorkTableError> { + let rows_to_update = self.0.indexes.#index.get(#by).map(|kv| kv.1).collect::>(); + for link in rows_to_update { + let row = self.0.data.select_non_ghosted(link.0).map_err(WorkTableError::PagesError)?; + self.delete(row.get_primary_key()).await?; + } + core::result::Result::Ok(()) + } + } + } + + fn gen_unique_delete(type_: &TokenStream, name: &Ident, index: &Ident) -> TokenStream { + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + quote! { + pub async fn #name(&self, by: #type_) -> core::result::Result<(), WorkTableError> { + let row_to_update = self.0.indexes.#index.get(#by).map(|v| v.get().value.into()); + if let Some(link) = row_to_update { + let row = self.0.data.select_non_ghosted(link).map_err(WorkTableError::PagesError)?; + self.delete(row.get_primary_key()).await?; + } + core::result::Result::Ok(()) + } + } + } +} diff --git a/codegen/src/generators/persist/queries/in_place.rs b/codegen/src/generators/persist/queries/in_place.rs new file mode 100644 index 00000000..1e6bce4e --- /dev/null +++ b/codegen/src/generators/persist/queries/in_place.rs @@ -0,0 +1,137 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; +use crate::common::model::Operation; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span}; +use std::collections::HashMap; + +impl PersistGenerator { + pub fn gen_query_in_place_impl(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + + let custom_in_place = if let Some(q) = &self.queries { + let custom_in_place = self.gen_in_place_queries(q.in_place.clone()); + quote! { + #custom_in_place + } + } else { + quote! {} + }; + + Ok(quote! { + impl #table_ident { + #custom_in_place + } + }) + } + + fn gen_in_place_queries(&self, in_place_queries: HashMap) -> TokenStream { + let defs = in_place_queries + .iter() + .map(|(name, op)| { + let snake_case_name = name + .to_string() + .from_case(Case::Pascal) + .to_case(Case::Snake); + let index = self.columns.indexes.values().find(|idx| idx.field == op.by); + if let Some(index) = index { + let _index_name = &index.name; + + if index.is_unique { todo!() } else { todo!() } + } else if self.columns.primary_keys.len() == 1 { + self.gen_primary_key_in_place(snake_case_name, &op.columns) + } else { + todo!() + } + }) + .collect::>(); + + quote! { + #(#defs)* + } + } + + fn gen_primary_key_in_place(&self, snake_case_name: String, columns: &[Ident]) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let pk_type = name_generator.get_primary_key_type_ident(); + let lock_ident = + WorktableNameGenerator::get_update_in_place_query_lock_ident(&snake_case_name); + + let method_ident = Ident::new( + format!("update_{snake_case_name}_in_place").as_str(), + Span::mixed_site(), + ); + + let types = columns + .iter() + .map(|c| self.columns.columns_map.get(c).unwrap()) + .collect::>(); + let column_types = if types.len() == 1 { + let t = types[0]; + quote! { + &mut <#t as rkyv::Archive>::Archived + } + } else { + let types = types.iter().map(|t| { + quote! { + &mut <#t as rkyv::Archive>::Archived + } + }); + quote! { + ( #(#types),* ) + } + }; + let column_fields = if columns.len() == 1 { + let i = &columns[0]; + quote! { + &mut archived.inner.#i + } + } else { + let columns = columns.iter().map(|i| { + quote! { + &mut archived.inner.#i + } + }); + quote! { + ( #(#columns),* ) + } + }; + let custom_lock = self.gen_custom_lock_for_update(lock_ident); + + quote! { + pub async fn #method_ident( + &self, + mut f: F, + by: Pk, + ) -> eyre::Result<()> + where #pk_type: From + { + let pk: #pk_type = by.into(); + let op_lock = { #custom_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + let link = self + .0 + .primary_index.pk_map + .get(&pk) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + unsafe { + self.0 + .data + .with_mut_ref(link, move |archived| f(#column_fields)) + .map_err(WorkTableError::PagesError)? + }; + + Ok(()) + } + } + } +} diff --git a/codegen/src/generators/persist/queries/locks.rs b/codegen/src/generators/persist/queries/locks.rs new file mode 100644 index 00000000..49f7e311 --- /dev/null +++ b/codegen/src/generators/persist/queries/locks.rs @@ -0,0 +1,175 @@ +use std::collections::HashMap; + +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; +use crate::common::model::Operation; + +impl PersistGenerator { + pub fn gen_query_locks_impl(&mut self) -> syn::Result { + if let Some(q) = &self.queries { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_type_ident = name_generator.get_lock_type_ident(); + + let update_fns = Self::gen_update_query_locks(&q.updates); + let update_in_place_fns = Self::gen_in_place_update_query_locks(&q.in_place); + + Ok(quote! { + impl #lock_type_ident { + #update_fns + #update_in_place_fns + } + }) + } else { + Ok(quote! {}) + } + } + + fn gen_in_place_update_query_locks(updates: &HashMap) -> TokenStream { + let fns = updates + .keys() + .map(|name| { + let snake_case_name = name + .to_string() + .from_case(Case::Pascal) + .to_case(Case::Snake); + + let lock_ident = + WorktableNameGenerator::get_update_in_place_query_lock_ident(&snake_case_name); + + let columns = &updates.get(name).as_ref().expect("exists").columns; + let lock_fn = Self::gen_rows_lock_fn(columns, lock_ident); + + quote! { + #lock_fn + } + }) + .collect::>(); + + quote! { + #(#fns)* + } + } + + fn gen_update_query_locks(updates: &HashMap) -> TokenStream { + let fns = updates + .keys() + .map(|name| { + let snake_case_name = name + .to_string() + .from_case(Case::Pascal) + .to_case(Case::Snake); + + let lock_ident = + WorktableNameGenerator::get_update_query_lock_ident(&snake_case_name); + + let columns = &updates.get(name).as_ref().expect("exists").columns; + let lock_fn = Self::gen_rows_lock_fn(columns, lock_ident); + + quote! { + #lock_fn + } + }) + .collect::>(); + + quote! { + #(#fns)* + } + } + + fn gen_rows_lock_fn(columns: &[Ident], ident: Ident) -> TokenStream { + let inner = columns + .iter() + .map(|col| { + let col = Ident::new(format!("{col}_lock").as_str(), Span::mixed_site()); + quote! { + if let Some(lock) = &self.#col { + set.insert(lock.clone()); + } + self.#col = Some(new_lock.clone()); + } + }) + .collect::>(); + + quote! { + #[allow(clippy::mutable_key_type)] + pub fn #ident(&mut self, id: u16) -> (std::collections::HashSet>, std::sync::Arc) { + let mut set = std::collections::HashSet::new(); + let new_lock = std::sync::Arc::new(Lock::new(id)); + #(#inner)* + (set, new_lock) + } + } + } + + pub fn gen_full_lock_for_update(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + quote! { + let lock_id = self.0.lock_manager.next_id(); + if let Some(lock) = self.0.lock_manager.get(&pk) { + let mut lock_guard = lock.write().await; + #[allow(clippy::mutable_key_type)] + let (locks, op_lock) = lock_guard.lock(lock_id); + drop(lock_guard); + futures::future::join_all(locks.iter().map(|l| l.wait()).collect::>()).await; + + op_lock + } else { + #[allow(clippy::mutable_key_type)] + let (lock, op_lock) = #lock_ident::with_lock(lock_id); + let lock = std::sync::Arc::new(tokio::sync::RwLock::new(lock)); + let mut guard = lock.write().await; + if let Some(old_lock) = self.0.lock_manager.insert(pk.clone(), lock.clone()) { + let mut old_lock_guard = old_lock.write().await; + #[allow(clippy::mutable_key_type)] + let locks = guard.merge(&mut *old_lock_guard); + drop(old_lock_guard); + drop(guard); + + futures::future::join_all(locks.iter().map(|l| l.wait()).collect::>()).await; + } + + op_lock + } + } + } + + pub fn gen_custom_lock_for_update(&self, ident: Ident) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + quote! { + let lock_id = self.0.lock_manager.next_id(); + if let Some(lock) = self.0.lock_manager.get(&pk) { + let mut lock_guard = lock.write().await; + #[allow(clippy::mutable_key_type)] + let (locks, op_lock) = lock_guard.#ident(lock_id); + drop(lock_guard); + futures::future::join_all(locks.iter().map(|l| l.wait()).collect::>()).await; + op_lock + } else { + let mut lock = #lock_ident::new(); + #[allow(clippy::mutable_key_type)] + let (_, op_lock) = lock.#ident(lock_id); + let lock = std::sync::Arc::new(tokio::sync::RwLock::new(lock)); + let mut guard = lock.write().await; + if let Some(old_lock) = self.0.lock_manager.insert(pk.clone(), lock.clone()) { + let mut old_lock_guard = old_lock.write().await; + #[allow(clippy::mutable_key_type)] + let locks = guard.merge(&mut *old_lock_guard); + drop(old_lock_guard); + drop(guard); + + futures::future::join_all(locks.iter().map(|l| l.wait()).collect::>()).await; + } + + op_lock + } + } + } +} diff --git a/codegen/src/worktable/generator/queries/mod.rs b/codegen/src/generators/persist/queries/mod.rs similarity index 86% rename from codegen/src/worktable/generator/queries/mod.rs rename to codegen/src/generators/persist/queries/mod.rs index 83f7a274..8dd5b19e 100644 --- a/codegen/src/worktable/generator/queries/mod.rs +++ b/codegen/src/generators/persist/queries/mod.rs @@ -1,7 +1,7 @@ +pub mod r#type; mod delete; mod in_place; mod locks; mod select; -pub mod r#type; mod unsized_; -mod update; +mod update; \ No newline at end of file diff --git a/codegen/src/worktable/generator/queries/select.rs b/codegen/src/generators/persist/queries/select.rs similarity index 91% rename from codegen/src/worktable/generator/queries/select.rs rename to codegen/src/generators/persist/queries/select.rs index 91873e9d..567b8ada 100644 --- a/codegen/src/worktable/generator/queries/select.rs +++ b/codegen/src/generators/persist/queries/select.rs @@ -1,9 +1,9 @@ -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; use proc_macro2::TokenStream; use quote::quote; -impl Generator { +impl PersistGenerator { pub fn gen_query_select_impl(&mut self) -> syn::Result { let select_all = self.gen_select_all(); @@ -37,4 +37,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/queries/type.rs b/codegen/src/generators/persist/queries/type.rs similarity index 97% rename from codegen/src/worktable/generator/queries/type.rs rename to codegen/src/generators/persist/queries/type.rs index a0c93e3f..1758671d 100644 --- a/codegen/src/worktable/generator/queries/type.rs +++ b/codegen/src/generators/persist/queries/type.rs @@ -3,8 +3,8 @@ use std::collections::HashSet; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; pub fn map_to_uppercase(str: &str) -> String { if str.contains("OrderedFloat") { @@ -28,7 +28,7 @@ pub fn map_to_uppercase(str: &str) -> String { } } -impl Generator { +impl PersistGenerator { pub fn gen_available_types_def(&mut self) -> syn::Result { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let avt_type_ident = name_generator.get_available_type_ident(); @@ -170,4 +170,4 @@ impl Generator { Ok(quote! {}) } } -} +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/queries/unsized_.rs b/codegen/src/generators/persist/queries/unsized_.rs similarity index 96% rename from codegen/src/worktable/generator/queries/unsized_.rs rename to codegen/src/generators/persist/queries/unsized_.rs index a1392d41..63510db6 100644 --- a/codegen/src/worktable/generator/queries/unsized_.rs +++ b/codegen/src/generators/persist/queries/unsized_.rs @@ -1,10 +1,10 @@ use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; -impl Generator { +impl PersistGenerator { pub fn gen_unsized_impls(&self) -> TokenStream { if self.columns.is_sized { quote! {} diff --git a/codegen/src/generators/persist/queries/update.rs b/codegen/src/generators/persist/queries/update.rs new file mode 100644 index 00000000..6a32fe2e --- /dev/null +++ b/codegen/src/generators/persist/queries/update.rs @@ -0,0 +1,712 @@ +use proc_macro2::Literal; +use std::collections::HashMap; + +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::persist::PersistGenerator; +use crate::common::model::Operation; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +impl PersistGenerator { + pub fn gen_query_update_impl(&mut self) -> syn::Result { + let custom_updates = if let Some(q) = &self.queries { + let custom_updates = self.gen_custom_updates(q.updates.clone()); + + quote! { + #custom_updates + } + } else { + quote! {} + }; + let full_row_update = self.gen_full_row_update(); + + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + Ok(quote! { + impl #table_ident { + #full_row_update + #custom_updates + } + }) + } + + fn gen_full_row_update(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + + let row_updates = self + .columns + .columns_map + .keys() + .map(|i| { + quote! { + std::mem::swap(&mut archived.inner.#i, &mut archived_row.#i); + } + }) + .collect::>(); + + let idents: Vec<_> = self + .columns + .indexes + .values() + .map(|idx| idx.field.clone()) + .collect(); + + let diff_process_insert = + self.gen_process_diffs_insert_on_index(idents.as_slice(), Some(&idents)); + let diff_process_remove = self.gen_process_diffs_remove_on_index(Some(&idents)); + let persist_call = self.gen_persist_call(); + let persist_op = self.gen_persist_op(); + let full_row_lock = self.gen_full_lock_for_update(); + let size_check = if self.columns.is_sized { + quote! {} + } else { + quote! { + if true { + drop(_guard); + let op_lock = { #full_row_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + let row_old = self.0.data.select_non_ghosted(link)?; + if let Err(e) = self.reinsert(row_old, row).await { + self.0.update_state.remove(&pk); + + return Err(e); + } + + self.0.update_state.remove(&pk); + + return core::result::Result::Ok(()); + } + } + }; + + quote! { + pub async fn update(&self, row: #row_ident) -> core::result::Result<(), WorkTableError> { + let pk = row.get_primary_key(); + let op_lock = { #full_row_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + + let mut link: Link = self.0 + .primary_index + .pk_map + .get(&pk) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + + let row_old = self.0.data.select_non_ghosted(link)?; + self.0.update_state.insert(pk.clone(), row_old); + + let mut bytes = rkyv::to_bytes::(&row).map_err(|_| WorkTableError::SerializeError)?; + #size_check + + let mut archived_row = unsafe { rkyv::access_unchecked_mut::<<#row_ident as rkyv::Archive>::Archived>(&mut bytes[..]).unseal_unchecked() }; + + let op_id = OperationId::Single(uuid::Uuid::now_v7()); + #diff_process_insert + #persist_op + + unsafe { self.0.data.with_mut_ref(link, move |archived| { + #(#row_updates)* + }).map_err(WorkTableError::PagesError)? }; + + #diff_process_remove + + self.0.update_state.remove(&pk); + + #persist_call + + core::result::Result::Ok(()) + } + } + } + + fn gen_custom_updates(&mut self, updates: HashMap) -> TokenStream { + let defs = updates + .iter() + .map(|(name, op)| { + let snake_case_name = name + .to_string() + .from_case(Case::Pascal) + .to_case(Case::Snake); + let index = self.columns.indexes.values().find(|idx| idx.field == op.by); + + let indexes_columns: Option> = { + let columns: Vec<_> = self + .columns + .indexes + .values() + .filter(|idx| op.columns.contains(&idx.field)) + .map(|idx| idx.field.clone()) + .collect(); + + if columns.is_empty() { + None + } else { + Some(columns) + } + }; + let unsized_columns = if self.columns.is_sized { + None + } else { + let fields = op + .columns + .iter() + .filter(|c| { + self.columns.columns_map.get(c).unwrap().to_string() == "String" + }) + .collect::>(); + if fields.is_empty() { + None + } else { + Some(fields) + } + }; + + let idents = &op.columns; + if let Some(index) = index { + let index_name = &index.name; + + if index.is_unique { + self.gen_unique_update( + snake_case_name, + name, + index_name, + idents, + indexes_columns.as_ref(), + unsized_columns, + ) + } else { + self.gen_non_unique_update( + snake_case_name, + name, + index_name, + idents, + indexes_columns.as_ref(), + unsized_columns, + ) + } + } else if self.columns.primary_keys.len() == 1 { + if *self.columns.primary_keys.first().unwrap() == op.by { + self.gen_pk_update( + snake_case_name, + name, + idents, + indexes_columns.as_ref(), + unsized_columns, + ) + } else { + todo!() + } + } else { + todo!() + } + }) + .collect::>(); + + quote! { + #(#defs)* + } + } + + fn gen_persist_call(&self) -> TokenStream { + quote! { + if let Operation::Update(op) = &mut op { + op.bytes = self.0.data.select_raw(link)?; + } else { + unreachable!("") + }; + self.1.apply_operation(op); + } + } + + fn gen_size_check(&self, unsized_fields: Option>, idents: &[Ident]) -> TokenStream { + if let Some(f) = unsized_fields { + let fields_check: Vec<_> = f + .iter() + .map(|f| { + let fn_ident = Ident::new(format!("get_{f}_size").as_str(), Span::call_site()); + quote! { + need_to_reinsert |= archived_row.#fn_ident() != self.#fn_ident(link)?; + } + }) + .collect(); + let row_updates = idents + .iter() + .map(|i| { + quote! { + row_new.#i = row.#i; + } + }) + .collect::>(); + let full_row_lock = self.gen_full_lock_for_update(); + + quote! { + let mut need_to_reinsert = true; + #(#fields_check)* + if need_to_reinsert { + drop(_guard); + let op_lock = { #full_row_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + + let row_old = self.0.select(pk.clone()).expect("should not be deleted by other thread"); + let mut row_new = row_old.clone(); + #(#row_updates)* + if let Err(e) = self.reinsert(row_old, row_new).await { + self.0.update_state.remove(&pk); + + return Err(e); + } + + return core::result::Result::Ok(()); + } + } + } else { + quote! {} + } + } + + fn gen_persist_op(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); + let primary_key_ident = name_generator.get_primary_key_type_ident(); + + quote! { + let mut op: Operation< + <<#primary_key_ident as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, + #primary_key_ident, + #secondary_events_ident + > = Operation::Update(UpdateOperation { + id: op_id, + secondary_keys_events, + bytes: updated_bytes, + link, + }); + } + } + + fn gen_process_diffs_insert_on_index( + &self, + idents: &[Ident], + idx_idents: Option<&Vec>, + ) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let diff_container = if idx_idents.is_some() { + quote! { + let row_old = self.0.data.select_non_ghosted(link)?; + let row_new = row.clone(); + let updated_bytes: Vec = vec![]; + let mut diffs: std::collections::HashMap<&str, Difference<#avt_type_ident>> = std::collections::HashMap::new(); + } + } else { + quote! { + let updated_bytes: Vec = vec![]; + } + }; + + let diff = if let Some(idx_idents) = idx_idents { + idents + .iter() + .filter(|i| idx_idents.contains(i)) + .map(|i| { + let diff_key = Literal::string(i.to_string().as_str()); + quote! { + let old = &row_old.#i; + let new = &row_new.#i; + + if old != new { + let diff = Difference::<#avt_type_ident> { + old: old.clone().into(), + new: new.clone().into(), + }; + + diffs.insert(#diff_key, diff); + } + } + }) + .collect::>() + } else { + vec![] + }; + + let process_difference = { + let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); + if idx_idents.is_some() { + quote! { + let (secondary_events, indexes_res): (#secondary_events_ident, _) = self.0.indexes.process_difference_insert_cdc(link, diffs.clone()); + if let Err(e) = indexes_res { + return match e { + IndexError::AlreadyExists { + at, + inserted_already, + } => { + let (rollback_secondary_events, _): (#secondary_events_ident, _) = self.0.indexes.delete_from_indexes_cdc( + row_new.merge(row_old.clone()), + link, + inserted_already + ); + + let mut merged_events = secondary_events.clone(); + merged_events.extend(rollback_secondary_events); + + let ack_op = Operation::Acknowledge(AcknowledgeOperation { + id: OperationId::Single(uuid::Uuid::now_v7()), + primary_key_events: vec![], + secondary_keys_events: merged_events, + }); + self.1.apply_operation(ack_op); + + Err(WorkTableError::AlreadyExists(at.to_string_value())) + } + IndexError::NotFound => Err(WorkTableError::NotFound), + }; + } + let mut secondary_keys_events = secondary_events; + } + } else { + quote! { + let secondary_keys_events: #secondary_events_ident = core::default::Default::default(); + } + } + }; + + quote! { + #diff_container + #(#diff)* + #process_difference + } + } + + fn gen_process_diffs_remove_on_index(&self, idx_idents: Option<&Vec>) -> TokenStream { + if idx_idents.is_some() { + quote! { + let (secondary_keys_events_remove, res) = self.0.indexes.process_difference_remove_cdc(link, diffs); + res?; + op.extend_secondary_key_events(secondary_keys_events_remove); + } + } else { + quote! {} + } + } + + fn gen_pk_update( + &self, + snake_case_name: String, + name: &Ident, + idents: &[Ident], + idx_idents: Option<&Vec>, + unsized_fields: Option>, + ) -> TokenStream { + let pk_ident = &self.pk.as_ref().unwrap().ident; + let method_ident = Ident::new( + format!("update_{snake_case_name}").as_str(), + Span::mixed_site(), + ); + let query_ident = Ident::new(format!("{name}Query").as_str(), Span::mixed_site()); + let lock_ident = WorktableNameGenerator::get_update_query_lock_ident(&snake_case_name); + + let row_updates = idents + .iter() + .map(|i| { + quote! { + std::mem::swap(&mut archived.inner.#i, &mut archived_row.#i); + } + }) + .collect::>(); + + let size_check = self.gen_size_check(unsized_fields, idents); + let diff_process_insert = self.gen_process_diffs_insert_on_index(idents, idx_idents); + let diff_process_remove = self.gen_process_diffs_remove_on_index(idx_idents); + let persist_call = self.gen_persist_call(); + let persist_op = self.gen_persist_op(); + let custom_lock = self.gen_custom_lock_for_update(lock_ident); + + quote! { + pub async fn #method_ident(&self, row: #query_ident, pk: Pk) -> core::result::Result<(), WorkTableError> + where #pk_ident: From + { + let pk = pk.into(); + let op_lock = { #custom_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + + let mut link: Link = self.0 + .primary_index + .pk_map + .get(&pk) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + + let mut bytes = rkyv::to_bytes::(&row).map_err(|_| WorkTableError::SerializeError)?; + let mut archived_row = unsafe { rkyv::access_unchecked_mut::<<#query_ident as rkyv::Archive>::Archived>(&mut bytes[..]).unseal_unchecked() }; + + let op_id = OperationId::Single(uuid::Uuid::now_v7()); + #size_check + #diff_process_insert + #persist_op + + unsafe { self.0.data.with_mut_ref(link, |archived| { + #(#row_updates)* + }).map_err(WorkTableError::PagesError)? }; + + #diff_process_remove + + #persist_call + + core::result::Result::Ok(()) + } + } + } + + fn gen_non_unique_update( + &self, + snake_case_name: String, + name: &Ident, + index: &Ident, + idents: &[Ident], + idx_idents: Option<&Vec>, + unsized_fields: Option>, + ) -> TokenStream { + let method_ident = Ident::new( + format!("update_{snake_case_name}").as_str(), + Span::mixed_site(), + ); + + let query_ident = Ident::new(format!("{name}Query").as_str(), Span::mixed_site()); + let by_ident = Ident::new(format!("{name}By").as_str(), Span::mixed_site()); + let lock_ident = WorktableNameGenerator::get_update_query_lock_ident(&snake_case_name); + + let row_updates = idents + .iter() + .map(|i| { + quote! { + std::mem::swap(&mut archived.inner.#i, &mut archived_row.#i); + } + }) + .collect::>(); + + let size_check = if let Some(f) = unsized_fields { + let fields_check: Vec<_> = f + .iter() + .map(|f| { + let fn_ident = Ident::new(format!("get_{f}_size").as_str(), Span::call_site()); + quote! { + need_to_reinsert |= archived_row.#fn_ident() != self.#fn_ident(link)?; + } + }) + .collect(); + let row_updates = idents + .iter() + .map(|i| { + quote! { + row_new.#i = row.#i.clone(); + } + }) + .collect::>(); + let full_row_lock = self.gen_full_lock_for_update(); + + quote! { + let mut need_to_reinsert = true; + #(#fields_check)* + if need_to_reinsert { + let old_guard = guards.remove(&pk).expect("guard should exist for this pk"); + drop(old_guard); + + let op_lock = { #full_row_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + let row_old = self.0.select(pk.clone()).expect("should not be deleted by other thread"); + let mut row_new = row_old.clone(); + #(#row_updates)* + if let Err(e) = self.reinsert(row_old, row_new).await { + self.0.update_state.remove(&pk); + return Err(e); + } + + continue; + } + } + } else { + quote! {} + }; + let diff_process_insert = self.gen_process_diffs_insert_on_index(idents, idx_idents); + let diff_process_remove = self.gen_process_diffs_remove_on_index(idx_idents); + let persist_call = self.gen_persist_call(); + let persist_op = self.gen_persist_op(); + let by = if is_float(by_ident.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + let custom_lock = self.gen_custom_lock_for_update(lock_ident); + + quote! { + pub async fn #method_ident(&self, row: #query_ident, by: #by_ident) -> core::result::Result<(), WorkTableError> { + let links: Vec<_> = self.0.indexes.#index.get(#by).map(|(_, l)| l.0).collect(); + + let mut guards: std::collections::HashMap<_, _> = std::collections::HashMap::new(); + for link in links.iter() { + let pk = self.0.data.select_non_ghosted(*link)?.get_primary_key().clone(); + let op_lock = { #custom_lock }; + guards.insert(pk.clone(), LockGuard::new(op_lock, self.0.lock_manager.clone(), pk.clone())); + } + + let links: Vec<_> = self.0.indexes.#index.get(#by).map(|(_, l)| l.0).collect(); + let op_id = OperationId::Multi(uuid::Uuid::now_v7()); + for link in links.into_iter() { + let pk = self.0.data.select_non_ghosted(link)?.get_primary_key().clone(); + let mut bytes = rkyv::to_bytes::(&row) + .map_err(|_| WorkTableError::SerializeError)?; + + let mut archived_row = unsafe { + rkyv::access_unchecked_mut::<<#query_ident as rkyv::Archive>::Archived>(&mut bytes[..]) + .unseal_unchecked() + }; + + #size_check + #diff_process_insert + #persist_op + + unsafe { + self.0.data.with_mut_ref(link, |archived| { + #(#row_updates)* + }).map_err(WorkTableError::PagesError)?; + } + + #diff_process_remove + + #persist_call + + guards.remove(&pk); + } + core::result::Result::Ok(()) + } + } + } + + fn gen_unique_update( + &self, + snake_case_name: String, + name: &Ident, + index: &Ident, + idents: &[Ident], + idx_idents: Option<&Vec>, + unsized_fields: Option>, + ) -> TokenStream { + let method_ident = Ident::new( + format!("update_{snake_case_name}").as_str(), + Span::mixed_site(), + ); + + let query_ident = Ident::new(format!("{name}Query").as_str(), Span::mixed_site()); + let by_ident = Ident::new(format!("{name}By").as_str(), Span::mixed_site()); + let lock_ident = WorktableNameGenerator::get_update_query_lock_ident(&snake_case_name); + + let row_updates = idents + .iter() + .map(|i| { + quote! { + std::mem::swap(&mut archived.inner.#i, &mut archived_row.#i); + } + }) + .collect::>(); + let size_check = self.gen_size_check(unsized_fields, idents); + let diff_process_insert = self.gen_process_diffs_insert_on_index(idents, idx_idents); + let diff_process_remove = self.gen_process_diffs_remove_on_index(idx_idents); + let persist_call = self.gen_persist_call(); + let persist_op = self.gen_persist_op(); + let by = if is_float(by_ident.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + let custom_lock = self.gen_custom_lock_for_update(lock_ident); + + quote! { + pub async fn #method_ident(&self, row: #query_ident, by: #by_ident) -> core::result::Result<(), WorkTableError> { + let mut bytes = rkyv::to_bytes::(&row) + .map_err(|_| WorkTableError::SerializeError)?; + + let mut archived_row = unsafe { + rkyv::access_unchecked_mut::<<#query_ident as rkyv::Archive>::Archived>(&mut bytes[..]) + .unseal_unchecked() + }; + + let mut link: Link = self.0.indexes + .#index + .get(#by) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + + let pk = self.0.data.select_non_ghosted(link)?.get_primary_key().clone(); + + let op_lock = { #custom_lock }; + let _guard = LockGuard::new( + op_lock, + self.0.lock_manager.clone(), + pk.clone(), + ); + + let link = loop { + let link = self.0.indexes.#index + .get(#by) + .map(|v| v.get().value.into()) + .ok_or(WorkTableError::NotFound)?; + + if let Err(e) = self.0.data.select_non_vacuumed(link) { + if e.is_vacuumed() { + continue; + } + return Err(e.into()); + } else { + break link; + } + }; + + let op_id = OperationId::Single(uuid::Uuid::now_v7()); + #size_check + #diff_process_insert + #persist_op + + unsafe { + self.0.data.with_mut_ref(link, |archived| { + #(#row_updates)* + }).map_err(WorkTableError::PagesError)?; + } + + #diff_process_remove + + #persist_call + + core::result::Result::Ok(()) + } + } + } +} diff --git a/codegen/src/generators/persist/row.rs b/codegen/src/generators/persist/row.rs new file mode 100644 index 00000000..c03ef280 --- /dev/null +++ b/codegen/src/generators/persist/row.rs @@ -0,0 +1,136 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +impl PersistGenerator { + pub fn gen_row_def(&mut self) -> TokenStream { + let def = self.gen_row_type(); + let table_row_impl = self.gen_row_table_row_impl(); + let row_fields_enum = self.gen_row_fields_enum(); + let query_impl = self.gen_query_impl(); + + quote! { + #def + #table_row_impl + #row_fields_enum + #query_impl + } + } + + fn gen_row_table_row_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + let primary_key_ident = name_generator.get_primary_key_type_ident(); + + let primary_key = self + .pk + .clone() + .expect("should be set in `Generator` at this point"); + let primary_key_columns_clone = if primary_key.values.len() == 1 { + let pk_field = primary_key + .values + .keys() + .next() + .expect("should exist as length is checked"); + quote! { + self.#pk_field.clone().into() + } + } else { + let vals = primary_key + .values + .keys() + .map(|i| { + quote! { + self.#i.clone() + } + }) + .collect::>(); + quote! { + (#(#vals),*).into() + } + }; + + quote! { + impl TableRow<#primary_key_ident> for #ident { + + fn get_primary_key(&self) -> #primary_key_ident { + #primary_key_columns_clone + } + } + } + } + + fn gen_row_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + let mut rows = vec![quote! {}; self.columns.field_positions.len()]; + for (i, pos) in &self.columns.field_positions { + let type_ = self.columns.columns_map.get(i).unwrap(); + rows[*pos] = quote! {pub #i: #type_,} + } + + let custom_derives = + if let Some(custom_derives) = &self.config.as_ref().map(|c| &c.row_derives) { + quote! { + #[derive(#(#custom_derives),*)] + } + } else { + quote! {} + }; + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, Clone, rkyv::Serialize, PartialEq, MemStat)] + #custom_derives + #[rkyv(derive(Debug))] + #[repr(C)] + pub struct #ident { + #(#rows)* + } + } + } + + fn gen_query_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + quote! { + impl Query<#ident> for #ident { + fn merge(self, row: #ident) -> #ident { + self + } + } + } + } + + fn gen_row_fields_enum(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + let enum_name = Ident::new(format!("{ident}Fields").as_str(), Span::mixed_site()); + + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|name| { + let name_pascal = Ident::new( + name.to_string().to_case(Case::Pascal).as_str(), + Span::mixed_site(), + ); + quote! { #name_pascal, } + }) + .collect(); + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, Clone, rkyv::Serialize, PartialEq)] + #[rkyv(derive(Debug))] + #[repr(C)] + pub enum #enum_name { + #(#rows)* + } + } + } +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/table/impls.rs b/codegen/src/generators/persist/table/impls.rs similarity index 65% rename from codegen/src/worktable/generator/table/impls.rs rename to codegen/src/generators/persist/table/impls.rs index 172a85d5..5f6c845f 100644 --- a/codegen/src/worktable/generator/table/impls.rs +++ b/codegen/src/generators/persist/table/impls.rs @@ -1,17 +1,18 @@ use proc_macro2::TokenStream; use quote::quote; -use crate::name_generator::{WorktableNameGenerator, is_unsized_vec}; -use crate::worktable::generator::Generator; -use crate::worktable::model::GeneratorType; +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::persist::PersistGenerator; +use crate::common::model::GeneratorType; -impl Generator { +impl PersistGenerator { pub fn gen_table_impl(&self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let ident = name_generator.get_work_table_ident(); let persisted_impl = self.gen_table_new_fn(); let name_fn = self.gen_table_name_fn(); + let version_fn = self.gen_table_version_fn(); let select_fn = self.gen_table_select_fn(); let insert_fn = self.gen_table_insert_fn(); let reinsert_fn = self.gen_table_reinsert_fn(); @@ -27,6 +28,7 @@ impl Generator { #persisted_impl impl #ident { #name_fn + #version_fn #select_fn #insert_fn #reinsert_fn @@ -52,72 +54,69 @@ impl Generator { let secondary_index_events = name_generator.get_space_secondary_index_events_ident(); let avt_index_ident = name_generator.get_available_indexes_ident(); - if self.is_persist { - let pk_types = &self - .columns - .primary_keys - .iter() - .map(|i| { - self.columns - .columns_map - .get(i) - .expect("should exist as got from definition") - .to_string() - }) - .collect::>(); - let pk_types_unsized = is_unsized_vec(pk_types); - let index_setup = if pk_types_unsized { - quote! { - inner.primary_index = std::sync::Arc::new(PrimaryIndex { - pk_map: IndexMap::<#pk_type, OffsetEqLink<#const_name>, UnsizedNode<_>>::with_maximum_node_size(#const_name), - reverse_pk_map: IndexMap::new(), - }); - } - } else { - quote! { - let size = get_index_page_size_from_data_length::<#pk_type>(#const_name); - inner.primary_index = std::sync::Arc::new(PrimaryIndex { - pk_map: IndexMap::<_, OffsetEqLink<#const_name>>::with_maximum_node_size(size), - reverse_pk_map: IndexMap::new(), - }); - } - }; + let pk_types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .to_string() + }) + .collect::>(); + let pk_types_unsized = is_unsized_vec(pk_types); + let index_setup = if pk_types_unsized { quote! { - impl PersistedWorkTable for #ident - where - E: PersistenceEngine< - <<#pk_type as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, - #pk_type, - #secondary_index_events, - #avt_index_ident, - Config=C - > + Send - + 'static, - C: Clone + PersistenceConfig, - { - async fn new(engine: E) -> eyre::Result { - let mut inner = WorkTable::default(); - inner.table_name = #table_name; - #index_setup - core::result::Result::Ok(Self( - inner, - #task::run_engine(engine) - )) - } + inner.primary_index = std::sync::Arc::new(PrimaryIndex { + pk_map: IndexMap::<#pk_type, OffsetEqLink<#const_name>, UnsizedNode<_>>::with_maximum_node_size(#const_name), + reverse_pk_map: IndexMap::new(), + }); + } + } else { + quote! { + let size = get_index_page_size_from_data_length::<#pk_type>(#const_name); + inner.primary_index = std::sync::Arc::new(PrimaryIndex { + pk_map: IndexMap::<_, OffsetEqLink<#const_name>>::with_maximum_node_size(size), + reverse_pk_map: IndexMap::new(), + }); + } + }; - async fn load(engine: E) -> eyre::Result { - let table_path = engine.config().table_path(); - if !std::path::Path::new(table_path).exists() { - return Self::new(engine).await; - }; - let space = #space_ident::parse_file(table_path).await?; - let table = space.into_worktable(engine).await; - Ok(table) - } + quote! { + impl PersistedWorkTable for #ident + where + E: PersistenceEngine< + <<#pk_type as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, + #pk_type, + #secondary_index_events, + #avt_index_ident, + Config=C + > + Send + + 'static, + C: Clone + PersistenceConfig, + { + async fn new(engine: E) -> eyre::Result { + let mut inner = WorkTable::default(); + inner.table_name = #table_name; + #index_setup + core::result::Result::Ok(Self( + inner, + #task::run_engine(engine) + )) + } + + async fn load(engine: E) -> eyre::Result { + let table_path = engine.config().table_path(); + if !std::path::Path::new(table_path).exists() { + return Self::new(engine).await; + }; + let space = #space_ident::parse_file(table_path).await?; + let table = space.into_worktable(engine).await; + Ok(table) } } - } else { - quote! {} } } @@ -136,6 +135,16 @@ impl Generator { } } + fn gen_table_version_fn(&self) -> TokenStream { + let version = self.version; + + quote! { + pub fn version() -> u32 { + #version + } + } + } + fn gen_table_select_fn(&self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let row_type = name_generator.get_row_type_ident(); @@ -155,24 +164,14 @@ impl Generator { let primary_key_type = name_generator.get_primary_key_type_ident(); let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); - let insert = if self.is_persist { - quote! { + quote! { + pub fn insert(&self, row: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { let (op, res) = self.0.insert_cdc::<#secondary_events_ident>(row); if let Some(op) = op { self.1.apply_operation(op); } res } - } else { - quote! { - self.0.insert(row) - } - }; - - quote! { - pub fn insert(&self, row: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { - #insert - } } } @@ -182,24 +181,14 @@ impl Generator { let primary_key_type = name_generator.get_primary_key_type_ident(); let secondary_events_ident = name_generator.get_space_secondary_index_events_ident(); - let reinsert = if self.is_persist { - quote! { + quote! { + pub async fn reinsert(&self, row_old: #row_type, row_new: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { let (op, res) = self.0.reinsert_cdc::<#secondary_events_ident>(row_old, row_new); if let Some(op) = op { self.1.apply_operation(op); } res } - } else { - quote! { - self.0.reinsert(row_old, row_new).await - } - }; - - quote! { - pub async fn reinsert(&self, row_old: #row_type, row_new: #row_type) -> core::result::Result<#primary_key_type, WorkTableError> { - #reinsert - } } } @@ -335,49 +324,26 @@ impl Generator { let secondary_index_events = name_generator.get_space_secondary_index_events_ident(); let lock_type = name_generator.get_lock_type_ident(); - if self.is_persist { - quote! { - pub fn vacuum(&self) -> std::sync::Arc { - std::sync::Arc::new(EmptyDataVacuum::< - _, - _, - _, - _, - _, - _, - #lock_type, - _, - #secondary_index_events - >::new( - #table_name, - std::sync::Arc::clone(&self.0.data), - std::sync::Arc::clone(&self.0.lock_manager), - std::sync::Arc::clone(&self.0.primary_index), - std::sync::Arc::clone(&self.0.indexes), - )) - } - } - } else { - quote! { - pub fn vacuum(&self) -> std::sync::Arc { - std::sync::Arc::new(EmptyDataVacuum::< - _, - _, - _, - _, - _, - _, - #lock_type, - _ - >::new( - #table_name, - std::sync::Arc::clone(&self.0.data), - std::sync::Arc::clone(&self.0.lock_manager), - std::sync::Arc::clone(&self.0.primary_index), - std::sync::Arc::clone(&self.0.indexes), - )) - } + quote! { + pub fn vacuum(&self) -> std::sync::Arc { + std::sync::Arc::new(EmptyDataVacuum::< + _, + _, + _, + _, + _, + _, + #lock_type, + _, + #secondary_index_events + >::new( + #table_name, + std::sync::Arc::clone(&self.0.data), + std::sync::Arc::clone(&self.0.lock_manager), + std::sync::Arc::clone(&self.0.primary_index), + std::sync::Arc::clone(&self.0.indexes), + )) } } } -} +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/table/index_fns.rs b/codegen/src/generators/persist/table/index_fns.rs similarity index 95% rename from codegen/src/worktable/generator/table/index_fns.rs rename to codegen/src/generators/persist/table/index_fns.rs index d864fc2c..1ca1eca5 100644 --- a/codegen/src/worktable/generator/table/index_fns.rs +++ b/codegen/src/generators/persist/table/index_fns.rs @@ -3,11 +3,11 @@ use std::collections::HashMap; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use crate::name_generator::{WorktableNameGenerator, is_float}; -use crate::worktable::generator::Generator; -use crate::worktable::model::Index; +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::persist::PersistGenerator; +use crate::common::model::Index; -impl Generator { +impl PersistGenerator { pub fn gen_table_index_fns(&self) -> syn::Result { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let ident = name_generator.get_work_table_ident(); @@ -111,4 +111,4 @@ impl Generator { } }) } -} +} \ No newline at end of file diff --git a/codegen/src/generators/persist/table/mod.rs b/codegen/src/generators/persist/table/mod.rs new file mode 100644 index 00000000..d41ea519 --- /dev/null +++ b/codegen/src/generators/persist/table/mod.rs @@ -0,0 +1,146 @@ +mod impls; +mod index_fns; +mod select_executor; + +use proc_macro2::{Literal, TokenStream}; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::persist::PersistGenerator; + +impl PersistGenerator { + pub fn gen_table_def(&mut self) -> syn::Result { + let page_size_consts = self.gen_page_size_consts(); + let version_const = self.gen_version_const(); + let type_ = self.gen_table_type(); + let impl_ = self.gen_table_impl(); + let index_fns = self.gen_table_index_fns()?; + let select_query_executor_impl = self.gen_table_select_query_executor_impl(); + let column_range_type = self.gen_table_column_range_type(); + + Ok(quote! { + #page_size_consts + #version_const + #type_ + #impl_ + #index_fns + #select_query_executor_impl + #column_range_type + }) + } + + fn gen_page_size_consts(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let page_const_name = name_generator.get_page_size_const_ident(); + let inner_const_name = name_generator.get_page_inner_size_const_ident(); + + if let Some(page_size) = &self.config.as_ref().and_then(|c| c.page_size) { + let page_size = Literal::usize_unsuffixed(*page_size as usize); + quote! { + const #page_const_name: usize = #page_size; + const #inner_const_name: usize = #page_size - GENERAL_HEADER_SIZE; + } + } else { + quote! { + const #page_const_name: usize = PAGE_SIZE; + const #inner_const_name: usize = #page_const_name - GENERAL_HEADER_SIZE; + } + } + } + + fn gen_version_const(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let version_const_name = name_generator.get_version_const_ident(); + let version = self.version; + + quote! { + const #version_const_name: u32 = #version; + } + } + + fn gen_table_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + let index_type = name_generator.get_index_type_ident(); + let inner_const_name = name_generator.get_page_inner_size_const_ident(); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + let persistence_task = name_generator.get_persistence_task_ident(); + let lock_ident = name_generator.get_lock_type_ident(); + + let pk_types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .to_string() + }) + .collect::>(); + let pk_types_unsized = is_unsized_vec(pk_types); + + let derive = if pk_types_unsized { + quote! { + #[derive(Debug, PersistTable)] + #[table(pk_unsized)] + } + } else { + quote! { + #[derive(Debug, PersistTable)] + } + }; + + let node_type = if pk_types_unsized { + quote! { + UnsizedNode>> + } + } else { + quote! { + Vec>> + } + }; + + if self.config.as_ref().and_then(|c| c.page_size).is_some() { + quote! { + #derive + pub struct #ident( + WorkTable< + #row_type, + #primary_key_type, + #avt_type_ident, + #avt_index_ident, + #index_type, + #lock_ident, + <#primary_key_type as TablePrimaryKey>::Generator, + #inner_const_name, + #node_type + > + , #persistence_task + ); + } + } else { + quote! { + #derive + pub struct #ident( + WorkTable< + #row_type, + #primary_key_type, + #avt_type_ident, + #avt_index_ident, + #index_type, + #lock_ident, + <#primary_key_type as TablePrimaryKey>::Generator, + { INNER_PAGE_SIZE }, + #node_type + > + , #persistence_task + ); + } + } + } +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/table/select_executor.rs b/codegen/src/generators/persist/table/select_executor.rs similarity index 98% rename from codegen/src/worktable/generator/table/select_executor.rs rename to codegen/src/generators/persist/table/select_executor.rs index f43858b2..a97a3388 100644 --- a/codegen/src/worktable/generator/table/select_executor.rs +++ b/codegen/src/generators/persist/table/select_executor.rs @@ -4,8 +4,8 @@ use proc_macro2::Span; use proc_macro2::TokenStream; use quote::quote; -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; use quote::ToTokens; use syn::Type; @@ -28,7 +28,7 @@ fn is_numeric_type(ty: &Type) -> bool { ) } -impl Generator { +impl PersistGenerator { pub fn gen_table_column_range_type(&self) -> TokenStream { let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); let column_range_type = name_generator.get_column_range_type_ident(); @@ -224,4 +224,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/worktable/generator/wrapper.rs b/codegen/src/generators/persist/wrapper.rs similarity index 96% rename from codegen/src/worktable/generator/wrapper.rs rename to codegen/src/generators/persist/wrapper.rs index 7c82e9a2..10d0098d 100644 --- a/codegen/src/worktable/generator/wrapper.rs +++ b/codegen/src/generators/persist/wrapper.rs @@ -1,9 +1,9 @@ -use crate::name_generator::WorktableNameGenerator; -use crate::worktable::generator::Generator; +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::persist::PersistGenerator; use proc_macro2::TokenStream; use quote::quote; -impl Generator { +impl PersistGenerator { pub fn gen_wrapper_def(&self) -> TokenStream { let type_ = self.gen_wrapper_type(); let impl_ = self.gen_wrapper_impl(); @@ -104,4 +104,4 @@ impl Generator { } } } -} +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/index/info.rs b/codegen/src/generators/read_only/index/info.rs new file mode 100644 index 00000000..9da65644 --- /dev/null +++ b/codegen/src/generators/read_only/index/info.rs @@ -0,0 +1,91 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; + +impl ReadOnlyGenerator { + pub fn gen_secondary_index_info_impl_def(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let index_type_ident = name_generator.get_index_type_ident(); + + let info_fn = self.gen_index_info_fn(); + let is_empty_fn = self.gen_index_is_empty_fn(); + + quote! { + impl TableSecondaryIndexInfo for #index_type_ident { + #info_fn + #is_empty_fn + } + } + } + + fn gen_index_info_fn(&self) -> TokenStream { + let rows = self.columns.indexes.values().map(|idx| { + let index_field_name = &idx.name; + let index_name_str = index_field_name.to_string(); + + if idx.is_unique { + quote! { + info.push(IndexInfo { + name: #index_name_str.to_string(), + index_type: IndexKind::Unique, + key_count: self.#index_field_name.len(), + capacity: self.#index_field_name.capacity(), + heap_size: self.#index_field_name.heap_size(), + used_size: self.#index_field_name.used_size(), + node_count: self.#index_field_name.node_count(), + }); + } + } else { + quote! { + info.push(IndexInfo { + name: #index_name_str.to_string(), + index_type: IndexKind::NonUnique, + key_count: self.#index_field_name.len(), + capacity: self.#index_field_name.capacity(), + heap_size: self.#index_field_name.heap_size(), + used_size: self.#index_field_name.used_size(), + node_count: self.#index_field_name.node_count(), + }); + } + } + }); + + quote! { + fn index_info(&self) -> Vec { + let mut info = Vec::new(); + #(#rows)* + info + } + } + } + + fn gen_index_is_empty_fn(&self) -> TokenStream { + let is_empty = self + .columns + .indexes + .values() + .map(|idx| { + let index_field_name = &idx.name; + quote! { + self.#index_field_name.len() == 0 + } + }) + .collect::>(); + + if is_empty.is_empty() { + quote! { + fn is_empty(&self) -> bool { + true + } + } + } else { + quote! { + fn is_empty(&self) -> bool { + #(#is_empty) &&* + } + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/index/mod.rs b/codegen/src/generators/read_only/index/mod.rs new file mode 100644 index 00000000..3864bf00 --- /dev/null +++ b/codegen/src/generators/read_only/index/mod.rs @@ -0,0 +1,171 @@ +mod info; +mod usual; + +use crate::common::name_generator::{WorktableNameGenerator, is_float, is_unsized}; +use crate::generators::read_only::ReadOnlyGenerator; +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_index_def(&mut self) -> syn::Result { + let type_def = self.gen_type_def()?; + let impl_def = self.gen_secondary_index_impl_def(); + let info_def = self.gen_secondary_index_info_impl_def(); + let default_impl = self.gen_index_default_impl()?; + let available_indexes = self.gen_available_indexes(); + + Ok(quote! { + #type_def + #impl_def + #info_def + #default_impl + #available_indexes + }) + } + + fn gen_type_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_index_type_ident(); + let index_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let Some(t) = self.columns.columns_map.get(i) else { + return Err(syn::Error::new( + i.span(), + format!("cannot find column `{i}` in this table"), + )); + }; + let t = if is_float(t.to_string().as_str()) { + quote! { OrderedFloat<#t> } + } else { + quote! { #t } + }; + let i = &idx.name; + + #[allow(clippy::collapsible_else_if)] + let res = if idx.is_unique { + if is_unsized(&t.to_string()) { + quote! { + #i: IndexMap<#t, OffsetEqLink, UnsizedNode>> + } + } else { + quote! {#i: IndexMap<#t, OffsetEqLink>} + } + } else { + if is_unsized(&t.to_string()) { + quote! {#i: IndexMultiMap<#t, OffsetEqLink, UnsizedNode>>} + } else { + quote! {#i: IndexMultiMap<#t, OffsetEqLink>} + } + }; + Ok::<_, syn::Error>(res) + }) + .collect::, syn::Error>>()?; + + let derive = quote! { + #[derive(Debug, MemStat, PersistIndex)] + #[index(read_only)] + }; + + Ok(quote! { + #derive + pub struct #ident { + #(#index_rows),* + } + }) + } + + fn gen_index_default_impl(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let index_type_ident = name_generator.get_index_type_ident(); + let const_name = name_generator.get_page_inner_size_const_ident(); + + let index_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let Some(t) = self.columns.columns_map.get(i) else { + return Err(syn::Error::new( + i.span(), + format!("cannot find column `{i}` in this table"), + )); + }; + let t = if is_float(t.to_string().as_str()) { + quote! { OrderedFloat<#t> } + } else { + quote! { #t } + }; + let i = &idx.name; + + #[allow(clippy::collapsible_else_if)] + let res = if idx.is_unique { + if is_unsized(&t.to_string()) { + quote! { + #i: IndexMap::with_maximum_node_size(#const_name), + } + } else { + quote! {#i: IndexMap::with_maximum_node_size(get_index_page_size_from_data_length::<#t>(#const_name)),} + } + } else { + if is_unsized(&t.to_string()) { + quote! {#i: IndexMultiMap::with_maximum_node_size(#const_name), } + } else { + quote! {#i: IndexMultiMap::with_maximum_node_size(get_index_page_size_from_data_length::<#t>(#const_name)),} + } + }; + + Ok::<_, syn::Error>(res) + }) + .collect::, syn::Error>>()?; + + Ok(quote! { + impl Default for #index_type_ident { + fn default() -> Self { + Self { + #(#index_rows)* + } + } + } + }) + } + + fn gen_available_indexes(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_indexes_ident(); + + let indexes = self.columns.indexes.values().map(|i| { + let camel_case_name = i + .name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let i: TokenStream = camel_case_name.parse().unwrap(); + quote! { + #i, + } + }); + + if self.columns.indexes.is_empty() { + quote! { + pub type #avt_type_ident = (); + } + } else { + quote! { + #[derive(Debug, Clone, Copy, MoreDisplay, PartialEq, PartialOrd, Ord, Hash, Eq)] + pub enum #avt_type_ident { + #(#indexes)* + } + + impl AvailableIndex for #avt_type_ident { + fn to_string_value(&self) -> String { + ToString::to_string(&self) + } + } + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/index/usual.rs b/codegen/src/generators/read_only/index/usual.rs new file mode 100644 index 00000000..5277d32d --- /dev/null +++ b/codegen/src/generators/read_only/index/usual.rs @@ -0,0 +1,391 @@ +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::read_only::ReadOnlyGenerator; +use crate::generators::read_only::queries::r#type::map_to_uppercase; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Literal, Span, TokenStream}; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_secondary_index_impl_def(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let index_type_ident = name_generator.get_index_type_ident(); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let save_row_fn = self.gen_save_row_index_fn(); + let reinsert_row_fn = self.gen_reinsert_row_index_fn(); + let delete_row_fn = self.gen_delete_row_index_fn(); + let process_difference_insert_fn = self.gen_process_difference_insert_index_fn(); + let process_difference_remove_fn = self.gen_process_difference_remove_index_fn(); + let delete_from_indexes = self.gen_index_delete_from_indexes_fn(); + + quote! { + impl TableSecondaryIndex<#row_type_ident, #avt_type_ident, #avt_index_ident> for #index_type_ident { + #save_row_fn + #reinsert_row_fn + #delete_row_fn + #process_difference_insert_fn + #process_difference_remove_fn + #delete_from_indexes + } + } + } + + fn gen_save_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let save_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + quote! { + if self.#index_field_name.insert_checked(#row.clone(), link).is_none() { + return Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#available_index_ident::#index_variant); + } + }) + .collect::>(); + + quote! { + fn save_row(&self, row: #row_type_ident, link: Link) -> core::result::Result<(), IndexError<#available_index_ident>> { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + #(#save_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_reinsert_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let (insert_rows, remove_rows): (Vec<_>, Vec<_>) = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + let remove = if idx.is_unique { + quote! { + if val_new == val_old { + TableIndex::insert(&self.#index_field_name, val_new.clone(), link_new); + } else { + TableIndex::remove(&self.#index_field_name, &val_old, link_old); + } + } + } else { + quote! { + TableIndex::insert(&self.#index_field_name, val_new.clone(), link_new); + TableIndex::remove(&self.#index_field_name, &val_old, link_old); + } + }; + let insert = if idx.is_unique { + quote! { + let row = &row_new; + let val_new = #row.clone(); + let row = &row_old; + let val_old = #row.clone(); + if val_new != val_old { + if self.#index_field_name.insert_checked(val_new.clone(), link_new).is_none() { + return Err(IndexError::AlreadyExists { + at: #available_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#available_index_ident::#index_variant); + } + } + } else { + quote! {} + }; + let remove = quote! { + let row = &row_new; + let val_new = #row.clone(); + let row = &row_old; + let val_old = #row.clone(); + #remove + }; + (insert, remove) + }) + .unzip(); + + quote! { + fn reinsert_row(&self, + row_old: #row_type_ident, + link_old: Link, + row_new: #row_type_ident, + link_new: Link + ) -> core::result::Result<(), IndexError<#available_index_ident>> + { + let mut inserted_indexes: Vec<#available_index_ident> = vec![]; + #(#insert_rows)* + #(#remove_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_delete_row_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type_ident = name_generator.get_row_type_ident(); + let available_index_ident = name_generator.get_available_indexes_ident(); + + let delete_rows = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + quote! { + TableIndex::remove(&self.#index_field_name, &#row, link); + } + }) + .collect::>(); + + quote! { + fn delete_row(&self, row: #row_type_ident, link: Link) -> core::result::Result<(), IndexError<#available_index_ident>> { + #(#delete_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_process_difference_remove_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let process_difference_remove_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + + let old_value_expr = if type_str == "String" { + quote! { old.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*old) } + } else { + quote! { *old } + }; + + quote! { + if let Some(diff) = difference.get(#diff_key) { + if let #avt_type_ident::#variant_ident(old) = &diff.old { + let key_old = #old_value_expr; + TableIndex::remove(&self.#index_field_name, &key_old, link); + } + } + } + } else { + quote! {} + } + }); + + quote! { + fn process_difference_remove( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + #(#process_difference_remove_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_process_difference_insert_index_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let process_difference_insert_rows = self.columns.indexes.iter().map(|(i, idx)| { + let index_field_name = &idx.name; + let diff_key = Literal::string(i.to_string().as_str()); + + if let Some(t) = self.columns.columns_map.get(&idx.field) { + let type_str = t.to_string(); + let variant_ident = Ident::new(&map_to_uppercase(&type_str), Span::mixed_site()); + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + + let new_value_expr = if type_str == "String" { + quote! { new.to_string() } + } else if is_float(type_str.as_str()) { + quote! { OrderedFloat(*new) } + } else { + quote! { *new } + }; + + quote! { + if let Some(diff) = difference.get(#diff_key) { + if let #avt_type_ident::#variant_ident(new) = &diff.new { + let key_new = #new_value_expr; + if TableIndex::insert_checked(&self.#index_field_name, key_new, link).is_none() { + return Err(IndexError::AlreadyExists { + at: #avt_index_ident::#index_variant, + inserted_already: inserted_indexes.clone(), + }) + } + inserted_indexes.push(#avt_index_ident::#index_variant); + } + } + } + } else { + quote! {} + } + }); + + quote! { + fn process_difference_insert( + &self, + link: Link, + difference: std::collections::HashMap<&str, Difference<#avt_type_ident>> + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + let mut inserted_indexes: Vec<#avt_index_ident> = vec![]; + #(#process_difference_insert_rows)* + core::result::Result::Ok(()) + } + } + } + + fn gen_index_delete_from_indexes_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_index_ident = name_generator.get_available_indexes_ident(); + let row_type_ident = name_generator.get_row_type_ident(); + + let matches = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + let index_field_name = &idx.name; + let camel_case_name = index_field_name + .to_string() + .from_case(Case::Snake) + .to_case(Case::Pascal); + let index_variant: TokenStream = camel_case_name.parse().unwrap(); + let row = if is_float( + self.columns + .columns_map + .get(i) + .unwrap() + .to_string() + .as_str(), + ) { + quote! { + OrderedFloat(row.#i) + } + } else { + quote! { + row.#i + } + }; + + quote! { + #avt_index_ident::#index_variant => { + TableIndex::remove(&self.#index_field_name, &#row, link); + }, + } + }) + .collect::>(); + + let inner = if matches.is_empty() { + quote! {} + } else { + quote! { + for index in indexes { + match index { + #(#matches)* + } + } + } + }; + + quote! { + fn delete_from_indexes( + &self, + row: #row_type_ident, + link: Link, + indexes: Vec<#avt_index_ident>, + ) -> core::result::Result<(), IndexError<#avt_index_ident>> { + #inner + core::result::Result::Ok(()) + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/locks.rs b/codegen/src/generators/read_only/locks.rs new file mode 100644 index 00000000..08f8db30 --- /dev/null +++ b/codegen/src/generators/read_only/locks.rs @@ -0,0 +1,193 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_locks_def(&self) -> TokenStream { + let type_ = self.gen_locks_type(); + let impl_ = self.gen_locks_impl(); + + quote! { + #type_ + #impl_ + } + } + + fn gen_locks_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let name = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #name: Option>, } + }) + .collect(); + + quote! { + #[derive(Debug, Clone)] + pub struct #lock_ident { + #(#rows)* + } + } + } + + fn gen_locks_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let new_fn = self.gen_new_fn(); + let row_impl = self.gen_lock_row_impl(); + + quote! { + impl #lock_ident { + #new_fn + } + + #row_impl + } + } + + fn gen_lock_row_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let lock_ident = name_generator.get_lock_type_ident(); + + let is_locked_fn = self.gen_is_locked_fn(); + let with_lock_fn = self.gen_with_lock_fn(); + let lock_fn = self.gen_lock_fn(); + let merge_fn = self.gen_merge_fn(); + + quote! { + impl RowLock for #lock_ident { + #is_locked_fn + #lock_fn + #with_lock_fn + #merge_fn + } + } + } + + fn gen_is_locked_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { self.#col.as_ref().map(|l| l.is_locked()).unwrap_or(false) } + }) + .collect(); + + quote! { + fn is_locked(&self) -> bool { + #(#rows) ||* + } + } + } + + fn gen_new_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #col: None } + }) + .collect(); + + quote! { + pub fn new() -> Self { + Self { + #(#rows),* + } + } + } + } + + fn gen_with_lock_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { #col: Some(lock.clone()) } + }) + .collect(); + + quote! { + fn with_lock(id: u16) -> (Self, std::sync::Arc) { + let lock = std::sync::Arc::new(Lock::new(id)); + ( + Self { + #(#rows),* + }, + lock + ) + } + } + } + + fn gen_lock_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|i| { + let col = Ident::new(format!("{i}_lock").as_str(), Span::mixed_site()); + quote! { + if let Some(lock) = &self.#col { + set.insert(lock.clone()); + } + self.#col = Some(lock.clone()); + } + }) + .collect(); + + quote! { + #[allow(clippy::mutable_key_type)] + fn lock(&mut self, id: u16) -> (std::collections::HashSet>, std::sync::Arc) { + let mut set = std::collections::HashSet::new(); + let lock = std::sync::Arc::new(Lock::new(id)); + #(#rows)* + + (set, lock) + } + } + } + + fn gen_merge_fn(&self) -> TokenStream { + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|col| { + let col = Ident::new(format!("{col}_lock").as_str(), Span::mixed_site()); + quote! { + if let Some(#col) = &other.#col { + if self.#col.is_none() { + self.#col = Some(#col.clone()); + } else { + set.insert(#col.clone()); + } + } + other.#col = self.#col.clone(); + } + }) + .collect(); + + quote! { + #[allow(clippy::mutable_key_type)] + fn merge(&mut self, other: &mut Self) -> std::collections::HashSet> { + let mut set = std::collections::HashSet::new(); + #(#rows)* + set + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/mod.rs b/codegen/src/generators/read_only/mod.rs new file mode 100644 index 00000000..55933742 --- /dev/null +++ b/codegen/src/generators/read_only/mod.rs @@ -0,0 +1,54 @@ +mod index; +mod locks; +mod primary_key; +mod queries; +mod row; +mod table; +mod wrapper; + +use proc_macro2::Ident; +use quote::quote; + +use crate::common::model::Columns; + +pub struct ReadOnlyGenerator { + pub name: Ident, + pub columns: Columns, + pub pk: Option, + pub version: u32, +} + +impl ReadOnlyGenerator { + pub fn new(name: Ident, columns: Columns, version: u32) -> Self { + Self { + name, + columns, + pk: None, + version, + } + } +} + +pub fn expand(name: proc_macro2::Ident, columns: crate::common::model::Columns, version: u32) -> syn::Result { + let mut generator = ReadOnlyGenerator::new(name, columns, version); + + let pk_def = generator.gen_primary_key_def()?; + let row_def = generator.gen_row_def(); + let query_available_def = generator.gen_available_types_def()?; + let wrapper_def = generator.gen_wrapper_def(); + let locks_def = generator.gen_locks_def(); + let index_def = generator.gen_index_def()?; + let table_def = generator.gen_table_def()?; + let select_impls = generator.gen_query_select_impl()?; + + Ok(quote! { + #pk_def + #row_def + #query_available_def + #wrapper_def + #locks_def + #index_def + #table_def + #select_impls + }) +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/primary_key.rs b/codegen/src/generators/read_only/primary_key.rs new file mode 100644 index 00000000..55b74898 --- /dev/null +++ b/codegen/src/generators/read_only/primary_key.rs @@ -0,0 +1,144 @@ +use std::collections::HashMap; + +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::common::model::{GeneratorType, PrimaryKey}; +use crate::generators::read_only::ReadOnlyGenerator; + +use proc_macro2::{Ident, TokenStream}; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_primary_key_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + let values = self + .columns + .primary_keys + .iter() + .map(|i| { + ( + i.clone(), + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .clone(), + ) + }) + .collect::>(); + + let def = self.gen_primary_key_type(); + let impl_ = self.gen_table_primary_key_impl()?; + + self.pk = Some(PrimaryKey { ident, values }); + + Ok(quote! { + #def + #impl_ + }) + } + + fn gen_primary_key_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + + let types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + }) + .collect::>(); + let unsized_derive = + if is_unsized_vec(&types.iter().map(|v| v.to_string()).collect::>()) { + quote! { + VariableSizeMeasure, + } + } else { + quote! {} + }; + + quote! { + #[derive( + Clone, + rkyv::Archive, + Debug, + Default, + rkyv::Deserialize, + Hash, + rkyv::Serialize, + From, + Eq, + Into, + PartialEq, + PartialOrd, + Ord, + SizeMeasure, + MemStat, + #unsized_derive + )] + #[rkyv(derive(PartialEq, Eq, PartialOrd, Ord, Debug))] + pub struct #ident(#(#types),*); + } + } + + fn gen_table_primary_key_impl(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_primary_key_type_ident(); + + Ok(match self.columns.generator_type { + GeneratorType::None => { + quote! { + impl TablePrimaryKey for #ident { + type Generator = (); + } + } + } + GeneratorType::Autoincrement => { + let i = self + .columns + .primary_keys + .first() + .expect("at least one primary key should exist if autoincrement"); + let type_ = self + .columns + .columns_map + .get(i) + .expect("primary key column name always exists if in primary keys list"); + + let generator = Self::get_generator_from_type(type_, i)?; + quote! { + impl TablePrimaryKey for #ident { + type Generator = #generator; + } + } + } + GeneratorType::Custom => { + quote! {} + } + }) + } + + fn get_generator_from_type(type_: &TokenStream, i: &Ident) -> syn::Result { + Ok(match type_.to_string().as_str() { + "u8" => quote! { std::sync::atomic::AtomicU8 }, + "u16" => quote! { std::sync::atomic::AtomicU16 }, + "u32" => quote! { std::sync::atomic::AtomicU32 }, + "u64" => quote! { std::sync::atomic::AtomicU64 }, + "i8" => quote! { std::sync::atomic::AtomicI8 }, + "i16" => quote! { std::sync::atomic::AtomicI16 }, + "i32" => quote! { std::sync::atomic::AtomicI32 }, + "i64" => quote! { std::sync::atomic::AtomicI64 }, + _ => { + return Err(syn::Error::new( + i.span(), + "Type is not supported for autoincrement", + )); + } + }) + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/queries/mod.rs b/codegen/src/generators/read_only/queries/mod.rs new file mode 100644 index 00000000..4121177b --- /dev/null +++ b/codegen/src/generators/read_only/queries/mod.rs @@ -0,0 +1,2 @@ +pub mod r#type; +mod select; \ No newline at end of file diff --git a/codegen/src/generators/read_only/queries/select.rs b/codegen/src/generators/read_only/queries/select.rs new file mode 100644 index 00000000..89bef9aa --- /dev/null +++ b/codegen/src/generators/read_only/queries/select.rs @@ -0,0 +1,40 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; +use proc_macro2::TokenStream; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_query_select_impl(&mut self) -> syn::Result { + let select_all = self.gen_select_all(); + + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let table_ident = name_generator.get_work_table_ident(); + + Ok(quote! { + impl #table_ident { + #select_all + } + }) + } + + fn gen_select_all(&mut self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + quote! { + pub fn select_all(&self) -> SelectQueryBuilder<#row_ident, + impl DoubleEndedIterator + '_ + Sized, + #column_range_type, + #row_fields_ident> + { + let iter = self.0.primary_index.pk_map + .iter() + .filter_map(|(_, link)| self.0.data.select_non_ghosted(link.0).ok()); + + SelectQueryBuilder::new(iter) + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/queries/type.rs b/codegen/src/generators/read_only/queries/type.rs new file mode 100644 index 00000000..a55060ab --- /dev/null +++ b/codegen/src/generators/read_only/queries/type.rs @@ -0,0 +1,74 @@ +use std::collections::HashSet; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; + +pub fn map_to_uppercase(str: &str) -> String { + if str.contains("OrderedFloat") { + let mut split = str.split("<"); + let _ = split.next(); + let inner_type = split + .next() + .expect("OrderedFloat def contains inner type") + .replace(">", ""); + format!("Ordered{}", inner_type.to_uppercase().trim()) + } else if str.contains("Option") { + let mut split = str.split("<"); + let _ = split.next(); + let inner_type = split + .next() + .expect("Option def contains inner type") + .replace(">", ""); + format!("Option{}", inner_type.to_uppercase().trim()) + } else { + str.to_uppercase() + } +} + +impl ReadOnlyGenerator { + pub fn gen_available_types_def(&mut self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let avt_type_ident = name_generator.get_available_type_ident(); + + let unique_types: HashSet = self + .columns + .indexes + .iter() + .filter_map(|(_, idx)| self.columns.columns_map.get(&idx.field)) + .map(|ty| ty.to_string()) + .collect(); + + let rows: Vec<_> = unique_types + .iter() + .map(|s| { + let type_ident: TokenStream = s + .to_string() + .parse() + .expect("should be valid because parsed from declaration"); + let type_upper = map_to_uppercase(s); + let type_upper = Ident::new(type_upper.as_str(), Span::mixed_site()); + Some(quote! { + #[from] + #type_upper(#type_ident), + }) + }) + .collect(); + + if !rows.is_empty() { + Ok(quote! { + #[derive(Clone, Debug, From, PartialEq)] + #[non_exhaustive] + pub enum #avt_type_ident { + #(#rows)* + } + }) + } else { + Ok(quote! { + type #avt_type_ident = (); + }) + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/row.rs b/codegen/src/generators/read_only/row.rs new file mode 100644 index 00000000..cb49b679 --- /dev/null +++ b/codegen/src/generators/read_only/row.rs @@ -0,0 +1,126 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; +use convert_case::{Case, Casing}; +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_row_def(&mut self) -> TokenStream { + let def = self.gen_row_type(); + let table_row_impl = self.gen_row_table_row_impl(); + let row_fields_enum = self.gen_row_fields_enum(); + let query_impl = self.gen_query_impl(); + + quote! { + #def + #table_row_impl + #row_fields_enum + #query_impl + } + } + + fn gen_row_table_row_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + let primary_key_ident = name_generator.get_primary_key_type_ident(); + + let primary_key = self + .pk + .clone() + .expect("should be set in `Generator` at this point"); + let primary_key_columns_clone = if primary_key.values.len() == 1 { + let pk_field = primary_key + .values + .keys() + .next() + .expect("should exist as length is checked"); + quote! { + self.#pk_field.clone().into() + } + } else { + let vals = primary_key + .values + .keys() + .map(|i| { + quote! { + self.#i.clone() + } + }) + .collect::>(); + quote! { + (#(#vals),*).into() + } + }; + + quote! { + impl TableRow<#primary_key_ident> for #ident { + + fn get_primary_key(&self) -> #primary_key_ident { + #primary_key_columns_clone + } + } + } + } + + fn gen_row_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + let mut rows = vec![quote! {}; self.columns.field_positions.len()]; + for (i, pos) in &self.columns.field_positions { + let type_ = self.columns.columns_map.get(i).unwrap(); + rows[*pos] = quote! {pub #i: #type_,} + } + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, Clone, rkyv::Serialize, PartialEq, MemStat)] + #[rkyv(derive(Debug))] + #[repr(C)] + pub struct #ident { + #(#rows)* + } + } + } + + fn gen_query_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + quote! { + impl Query<#ident> for #ident { + fn merge(self, row: #ident) -> #ident { + self + } + } + } + } + + fn gen_row_fields_enum(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_row_type_ident(); + + let enum_name = Ident::new(format!("{ident}Fields").as_str(), Span::mixed_site()); + + let rows: Vec<_> = self + .columns + .columns_map + .keys() + .map(|name| { + let name_pascal = Ident::new( + name.to_string().to_case(Case::Pascal).as_str(), + Span::mixed_site(), + ); + quote! { #name_pascal, } + }) + .collect(); + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, Clone, rkyv::Serialize, PartialEq)] + #[rkyv(derive(Debug))] + #[repr(C)] + pub enum #enum_name { + #(#rows)* + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/table/impls.rs b/codegen/src/generators/read_only/table/impls.rs new file mode 100644 index 00000000..060b17b2 --- /dev/null +++ b/codegen/src/generators/read_only/table/impls.rs @@ -0,0 +1,274 @@ +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::read_only::ReadOnlyGenerator; + +impl ReadOnlyGenerator { + pub fn gen_table_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + + let persisted_impl = self.gen_table_new_fn(); + let name_fn = self.gen_table_name_fn(); + let version_fn = self.gen_table_version_fn(); + let select_fn = self.gen_table_select_fn(); + let insert_fn = self.gen_table_insert_fn(); + let reinsert_fn = self.gen_table_reinsert_fn(); + let upsert_fn = self.gen_table_upsert_fn(); + let get_next_fn = self.gen_table_get_next_fn(); + let iter_with_fn = self.gen_table_iter_with_fn(); + let iter_with_async_fn = self.gen_table_iter_with_async_fn(); + let count_fn = self.gen_table_count_fn(); + let system_info_fn = self.gen_system_info_fn(); + let vacuum_fn = self.gen_table_vacuum_fn(); + + quote! { + #persisted_impl + impl #ident { + #name_fn + #version_fn + #select_fn + #insert_fn + #reinsert_fn + #upsert_fn + #count_fn + #get_next_fn + #iter_with_fn + #iter_with_async_fn + #system_info_fn + #vacuum_fn + } + } + } + + fn gen_table_new_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + let table_name = name_generator.get_work_table_literal_name(); + let space_ident = name_generator.get_space_file_ident(); + let pk_type = name_generator.get_primary_key_type_ident(); + let const_name = name_generator.get_page_inner_size_const_ident(); + let secondary_index_events = name_generator.get_space_secondary_index_events_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + + let pk_types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .to_string() + }) + .collect::>(); + let pk_types_unsized = is_unsized_vec(pk_types); + + let index_setup = if pk_types_unsized { + quote! { + inner.primary_index = std::sync::Arc::new(PrimaryIndex { + pk_map: IndexMap::<#pk_type, OffsetEqLink<#const_name>, UnsizedNode<_>>::with_maximum_node_size(#const_name), + reverse_pk_map: IndexMap::new(), + }); + } + } else { + quote! { + let size = get_index_page_size_from_data_length::<#pk_type>(#const_name); + inner.primary_index = std::sync::Arc::new(PrimaryIndex { + pk_map: IndexMap::<_, OffsetEqLink<#const_name>>::with_maximum_node_size(size), + reverse_pk_map: IndexMap::new(), + }); + } + }; + + quote! { + impl PersistedWorkTable for #ident + where + E: PersistenceEngine< + <<#pk_type as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, + #pk_type, + #secondary_index_events, + #avt_index_ident, + Config=C + > + Send + + 'static, + C: Clone + PersistenceConfig, + { + async fn new(engine: E) -> eyre::Result { + let mut inner = WorkTable::default(); + inner.table_name = #table_name; + #index_setup + core::result::Result::Ok(Self(inner)) + } + + async fn load(engine: E) -> eyre::Result { + let table_path = engine.config().table_path(); + if !std::path::Path::new(table_path).exists() { + return Self::new(engine).await; + }; + let space = #space_ident::parse_file(table_path).await?; + let table = space.into_worktable(); + Ok(table) + } + } + } + } + + fn gen_table_name_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let dir_name = name_generator.get_dir_name(); + + quote! { + pub fn name(&self) -> &'static str { + &self.0.table_name + } + + pub fn name_snake_case() -> &'static str { + #dir_name + } + } + } + + fn gen_table_version_fn(&self) -> TokenStream { + let version = self.version; + + quote! { + pub fn version() -> u32 { + #version + } + } + } + + fn gen_table_select_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + quote! { + pub fn select(&self, pk: Pk) -> Option<#row_type> + where #primary_key_type: From { + self.0.select(pk.into()) + } + } + } + + fn gen_table_insert_fn(&self) -> TokenStream { + quote! {} + } + + fn gen_table_reinsert_fn(&self) -> TokenStream { + quote! {} + } + + fn gen_table_upsert_fn(&self) -> TokenStream { + quote! {} + } + + fn gen_table_get_next_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let primary_key_type = name_generator.get_primary_key_type_ident(); + + match self.columns.generator_type { + crate::common::model::GeneratorType::Custom | crate::common::model::GeneratorType::Autoincrement => { + quote! { + pub fn get_next_pk(&self) -> #primary_key_type { + self.0.get_next_pk() + } + } + } + crate::common::model::GeneratorType::None => { + quote! {} + } + } + } + + fn gen_table_iter_with_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let inner = self.gen_table_iter_inner(quote! { + f(data)?; + }); + + quote! { + pub fn iter_with< + F: Fn(#row_type) -> core::result::Result<(), WorkTableError> + >(&self, f: F) -> core::result::Result<(), WorkTableError> { + #inner + } + } + } + + fn gen_table_iter_with_async_fn(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let inner = self.gen_table_iter_inner(quote! { + f(data).await?; + }); + + quote! { + pub async fn iter_with_async< + F: Fn(#row_type) -> Fut, + Fut: std::future::Future> + >(&self, f: F) -> core::result::Result<(), WorkTableError> { + #inner + } + } + } + + fn gen_table_iter_inner(&self, func: TokenStream) -> TokenStream { + quote! { + let first = self.0.primary_index.pk_map.iter().next().map(|(k, v)| (k.clone(), v.0)); + let Some((mut k, link)) = first else { + return Ok(()) + }; + + let data = self.0.data.select_non_ghosted(link).map_err(WorkTableError::PagesError)?; + #func + + let mut ind = false; + while !ind { + let next = { + let mut iter = self.0.primary_index.pk_map.range(k.clone()..); + let next = iter.next().map(|(k, v)| (k.clone(), v.0)).filter(|(key, _)| key != &k); + if next.is_some() { + next + } else { + iter.next().map(|(k, v)| (k.clone(), v.0)) + } + }; + if let Some((key, link)) = next { + let data = self.0.data.select_non_ghosted(link).map_err(WorkTableError::PagesError)?; + #func + k = key + } else { + ind = true; + }; + } + + core::result::Result::Ok(()) + } + } + + fn gen_table_count_fn(&self) -> TokenStream { + quote! { + pub fn count(&self) -> usize { + let count = self.0.primary_index.pk_map.len(); + count + } + } + } + + fn gen_system_info_fn(&self) -> TokenStream { + quote! { + pub fn system_info(&self) -> SystemInfo { + self.0.system_info() + } + } + } + + fn gen_table_vacuum_fn(&self) -> TokenStream { + quote! {} + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/table/index_fns.rs b/codegen/src/generators/read_only/table/index_fns.rs new file mode 100644 index 00000000..d5a9e9f4 --- /dev/null +++ b/codegen/src/generators/read_only/table/index_fns.rs @@ -0,0 +1,114 @@ +use std::collections::HashMap; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_float}; +use crate::generators::read_only::ReadOnlyGenerator; +use crate::common::model::Index; + +impl ReadOnlyGenerator { + pub fn gen_table_index_fns(&self) -> syn::Result { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + let row_ident = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + let fn_defs = self + .columns + .indexes + .iter() + .map(|(i, idx)| { + if idx.is_unique { + Self::gen_unique_index_fn(i, idx, &self.columns.columns_map, row_ident.clone()) + } else { + Self::gen_non_unique_index_fn( + i, + idx, + &self.columns.columns_map, + row_ident.clone(), + &column_range_type, + &row_fields_ident, + ) + } + }) + .collect::, syn::Error>>()?; + + Ok(quote! { + impl #ident { + #(#fn_defs)* + } + }) + } + + fn gen_unique_index_fn( + i: &Ident, + idx: &Index, + columns_map: &HashMap, + row_ident: Ident, + ) -> syn::Result { + let type_ = columns_map + .get(i) + .ok_or(syn::Error::new(i.span(), "Row not found"))?; + let fn_name = Ident::new(format!("select_by_{i}").as_str(), Span::mixed_site()); + let field_ident = &idx.name; + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + + Ok(quote! { + pub fn #fn_name(&self, by: #type_) -> Option<#row_ident> { + let link: Link = self.0.indexes.#field_ident.get(#by).map(|kv| kv.get().value.into())?; + self.0.data.select_non_ghosted(link).ok() + } + }) + } + + fn gen_non_unique_index_fn( + i: &Ident, + idx: &Index, + columns_map: &HashMap, + row_ident: Ident, + column_range_type: &Ident, + row_fields_ident: &Ident, + ) -> syn::Result { + let type_ = columns_map + .get(i) + .ok_or(syn::Error::new(i.span(), "Row not found"))?; + let fn_name = Ident::new(format!("select_by_{i}").as_str(), Span::mixed_site()); + let field_ident = &idx.name; + let row_field_ident = &idx.field; + let by = if is_float(type_.to_string().as_str()) { + quote! { + &OrderedFloat(by) + } + } else { + quote! { + &by + } + }; + + Ok(quote! { + pub fn #fn_name(&self, by: #type_) -> SelectQueryBuilder<#row_ident, + impl DoubleEndedIterator + '_, + #column_range_type, + #row_fields_ident> + { + let rows = self.0.indexes.#field_ident + .get(#by) + .into_iter() + .filter_map(|(_, link)| self.0.data.select_non_ghosted(link.0).ok()) + .filter(move |r| &r.#row_field_ident == &by); + + SelectQueryBuilder::new(rows) + } + }) + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/table/mod.rs b/codegen/src/generators/read_only/table/mod.rs new file mode 100644 index 00000000..6c90fb9e --- /dev/null +++ b/codegen/src/generators/read_only/table/mod.rs @@ -0,0 +1,117 @@ +mod impls; +mod index_fns; +mod select_executor; + +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::{WorktableNameGenerator, is_unsized_vec}; +use crate::generators::read_only::ReadOnlyGenerator; + +impl ReadOnlyGenerator { + pub fn gen_table_def(&mut self) -> syn::Result { + let page_size_consts = self.gen_page_size_consts(); + let version_const = self.gen_version_const(); + let type_ = self.gen_table_type(); + let impl_ = self.gen_table_impl(); + let index_fns = self.gen_table_index_fns()?; + let select_query_executor_impl = self.gen_table_select_query_executor_impl(); + let column_range_type = self.gen_table_column_range_type(); + + Ok(quote! { + #page_size_consts + #version_const + #type_ + #impl_ + #index_fns + #select_query_executor_impl + #column_range_type + }) + } + + fn gen_page_size_consts(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let page_const_name = name_generator.get_page_size_const_ident(); + let inner_const_name = name_generator.get_page_inner_size_const_ident(); + + quote! { + const #page_const_name: usize = PAGE_SIZE; + const #inner_const_name: usize = #page_const_name - GENERAL_HEADER_SIZE; + } + } + + fn gen_version_const(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let version_const_name = name_generator.get_version_const_ident(); + let version = self.version; + + quote! { + const #version_const_name: u32 = #version; + } + } + + fn gen_table_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let ident = name_generator.get_work_table_ident(); + let row_type = name_generator.get_row_type_ident(); + let primary_key_type = name_generator.get_primary_key_type_ident(); + let index_type = name_generator.get_index_type_ident(); + let inner_const_name = name_generator.get_page_inner_size_const_ident(); + let avt_type_ident = name_generator.get_available_type_ident(); + let avt_index_ident = name_generator.get_available_indexes_ident(); + let lock_ident = name_generator.get_lock_type_ident(); + + let pk_types = &self + .columns + .primary_keys + .iter() + .map(|i| { + self.columns + .columns_map + .get(i) + .expect("should exist as got from definition") + .to_string() + }) + .collect::>(); + let pk_types_unsized = is_unsized_vec(pk_types); + + let derive = if pk_types_unsized { + quote! { + #[derive(Debug, PersistTable)] + #[table(pk_unsized)] + } + } else { + quote! { + #[derive(Debug, PersistTable)] + #[table(read_only)] + } + }; + + let node_type = if pk_types_unsized { + quote! { + UnsizedNode>> + } + } else { + quote! { + Vec>> + } + }; + + quote! { + #derive + pub struct #ident( + WorkTable< + #row_type, + #primary_key_type, + #avt_type_ident, + #avt_index_ident, + #index_type, + #lock_ident, + <#primary_key_type as TablePrimaryKey>::Generator, + #inner_const_name, + #node_type + > + ); + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/table/select_executor.rs b/codegen/src/generators/read_only/table/select_executor.rs new file mode 100644 index 00000000..a247a31d --- /dev/null +++ b/codegen/src/generators/read_only/table/select_executor.rs @@ -0,0 +1,227 @@ +use convert_case::{Case, Casing}; +use proc_macro2::Ident; +use proc_macro2::Span; +use proc_macro2::TokenStream; +use quote::quote; + +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; +use quote::ToTokens; +use syn::Type; + +const RANGE_VARIANTS: &[&str] = &["", "Inclusive", "From", "To", "ToInclusive"]; + +fn is_numeric_type(ty: &Type) -> bool { + matches!( + ty.to_token_stream().to_string().as_str(), + "i8" | "i16" + | "i32" + | "i64" + | "i128" + | "u8" + | "u16" + | "u32" + | "u64" + | "u128" + | "f32" + | "f64" + ) +} + +impl ReadOnlyGenerator { + pub fn gen_table_column_range_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let column_range_type = name_generator.get_column_range_type_ident(); + + let unique_types: std::collections::HashSet = self + .columns + .columns_map + .values() + .map(|ty| ty.to_token_stream().to_string()) + .filter(|ty| is_numeric_type(&syn::parse_str::(ty).unwrap())) + .map(|ty| ty.to_string()) + .collect(); + + let column_range_variants = unique_types.iter().map(|type_name| { + let ty_ident = Ident::new(&type_name.to_string(), Span::call_site()); + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|variant| { + let variant_ident = Ident::new( + &format!("{}{}", type_name.to_string().to_case(Case::Pascal), variant), + Span::call_site(), + ); + let range_ident = Ident::new(&format!("Range{variant}"), Span::call_site()); + quote! { + #variant_ident(std::ops::#range_ident<#ty_ident>), + } + }) + .collect(); + + quote! { + #(#variants)* + } + }); + + let from_impls = unique_types.iter().map(|type_name| { + let ty_ident = Ident::new(&type_name.to_string(), Span::call_site()); + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|variant| { + let variant_ident = Ident::new( + &format!("{}{}", type_name.to_string().to_case(Case::Pascal), variant), + Span::call_site(), + ); + let range_ident = Ident::new(&format!("Range{variant}"), Span::call_site()); + quote! { + impl From> for #column_range_type { + fn from(range: std::ops::#range_ident<#ty_ident>) -> Self { + Self::#variant_ident(range) + } + } + } + }) + .collect(); + + quote! { + #(#variants)* + } + }); + + quote! { + #[derive(Debug, Clone)] + pub enum #column_range_type { + #(#column_range_variants)* + } + + #(#from_impls)* + } + } + + pub fn gen_table_select_query_executor_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_type = name_generator.get_row_type_ident(); + let column_range_type = name_generator.get_column_range_type_ident(); + let row_fields_ident = name_generator.get_row_fields_enum_ident(); + + let order_matches = self.columns.columns_map.keys().map(|column| { + let column_variant = Ident::new(&column.to_string().to_case(Case::Pascal), Span::mixed_site()); + let col_ident = Ident::new(&column.to_string(), Span::call_site()); + quote! { + #row_fields_ident::#column_variant => { + let cmp = a.#col_ident.partial_cmp(&b.#col_ident).unwrap_or(std::cmp::Ordering::Equal); + if cmp != std::cmp::Ordering::Equal { + return match order { + Order::Asc => cmp, + Order::Desc => cmp.reverse(), + }; + } + } + } + }); + + let range_matches = self + .columns + .columns_map + .iter() + .filter(|(_, ty)| { + is_numeric_type(&syn::parse_str::(&ty.to_token_stream().to_string()).unwrap()) + }) + .map(|(column, ty)| { + let variants: Vec<_> = RANGE_VARIANTS + .iter() + .map(|v| { + let column_variant = Ident::new(&column.to_string().to_case(Case::Pascal), Span::mixed_site()); + let col_ident = Ident::new(&column.to_string(), Span::call_site()); + let variant_ident = Ident::new( + &format!("{}{}", ty.to_string().to_case(Case::Pascal), v), + Span::call_site(), + ); + quote! { + (#row_fields_ident::#column_variant, #column_range_type::#variant_ident(range)) => { + Box::new(iter.filter(move |row| range.contains(&row.#col_ident))) + as Box> + }, + } + }) + .collect(); + + quote! { + #(#variants)* + } + }).collect::>(); + + let range = if range_matches.is_empty() { + quote! {} + } else { + quote! { + if !self.params.range.is_empty() { + for (range, column) in &self.params.range { + iter = match (column, range.clone().into()) { + #(#range_matches)* + _ => unreachable!(), + }; + } + } + } + }; + + quote! { + impl SelectQueryExecutor<#row_type, I, #column_range_type, #row_fields_ident> + for SelectQueryBuilder<#row_type, I, #column_range_type, #row_fields_ident> + where + I: DoubleEndedIterator + Sized, + { + + fn where_by(self, predicate: F) -> SelectQueryBuilder<#row_type, + impl DoubleEndedIterator + Sized, + #column_range_type, + #row_fields_ident> + where + F: FnMut(&#row_type) -> bool, + { + SelectQueryBuilder { + params: self.params, + iter: self.iter.filter(predicate), + } + } + + fn execute(self) -> Result, WorkTableError> { + let mut iter: Box> = Box::new(self.iter); + + #range + + if !self.params.order.is_empty() { + let mut items: Vec<#row_type> = iter.collect(); + + items.sort_by(|a, b| { + for (order, col) in &self.params.order { + match col { + #(#order_matches)* + _ => continue, + } + } + std::cmp::Ordering::Equal + }); + + iter = Box::new(items.into_iter()); + } + + let iter_result: Box> = if let Some(offset) = self.params.offset { + Box::new(iter.skip(offset)) + } else { + Box::new(iter) + }; + + let iter_result: Box> = if let Some(limit) = self.params.limit { + Box::new(iter_result.take(limit)) + } else { + Box::new(iter_result) + }; + + Ok(iter_result.collect()) + } + } + } + } +} \ No newline at end of file diff --git a/codegen/src/generators/read_only/wrapper.rs b/codegen/src/generators/read_only/wrapper.rs new file mode 100644 index 00000000..99950cb7 --- /dev/null +++ b/codegen/src/generators/read_only/wrapper.rs @@ -0,0 +1,107 @@ +use crate::common::name_generator::WorktableNameGenerator; +use crate::generators::read_only::ReadOnlyGenerator; +use proc_macro2::TokenStream; +use quote::quote; + +impl ReadOnlyGenerator { + pub fn gen_wrapper_def(&self) -> TokenStream { + let type_ = self.gen_wrapper_type(); + let impl_ = self.gen_wrapper_impl(); + let storable_impl = self.get_wrapper_storable_impl(); + let archived_wrapper_impl = self.get_archived_wrapper_impl(); + + quote! { + #type_ + #impl_ + #storable_impl + #archived_wrapper_impl + } + } + + fn gen_wrapper_type(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + + quote! { + #[derive(rkyv::Archive, Debug, rkyv::Deserialize, rkyv::Serialize)] + #[repr(C)] + pub struct #wrapper_ident { + inner: #row_ident, + is_ghosted: bool, + is_deleted: bool, + is_in_vacuum_process: bool, + } + } + } + + pub fn gen_wrapper_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + let row_ident = name_generator.get_row_type_ident(); + + quote! { + + impl RowWrapper<#row_ident> for #wrapper_ident { + fn get_inner(self) -> #row_ident { + self.inner + } + + fn is_ghosted(&self) -> bool { + self.is_ghosted + } + + fn is_vacuumed(&self) -> bool { + self.is_in_vacuum_process + } + + fn is_deleted(&self) -> bool { + self.is_deleted + } + + fn from_inner(inner: #row_ident) -> Self { + Self { + inner, + is_ghosted: true, + is_deleted: false, + is_in_vacuum_process: false, + } + } + } + } + } + + fn get_wrapper_storable_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_row_type_ident(); + let wrapper_ident = name_generator.get_wrapper_type_ident(); + + quote! { + impl StorableRow for #row_ident { + type WrappedRow = #wrapper_ident; + } + } + } + + fn get_archived_wrapper_impl(&self) -> TokenStream { + let name_generator = WorktableNameGenerator::from_table_name(self.name.to_string()); + let row_ident = name_generator.get_archived_wrapper_type_ident(); + + quote! { + impl ArchivedRowWrapper for #row_ident { + fn unghost(&mut self) { + self.is_ghosted = false; + } + fn set_in_vacuum_process(&mut self) { + self.is_in_vacuum_process = true; + } + fn delete(&mut self) { + self.is_deleted = true; + } + fn is_deleted(&self) -> bool { + self.is_deleted + } + } + } + } +} \ No newline at end of file diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index df324c57..bc7b2ce9 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -1,10 +1,13 @@ +mod common; +mod generators; mod mem_stat; -mod name_generator; +mod migration_engine; mod persist_index; mod persist_table; #[cfg(feature = "s3-support")] mod s3_persistence; mod worktable; +mod worktable_version; use proc_macro::TokenStream; // TODO: Refactor this codegen stuff because it's now too strange. @@ -24,7 +27,7 @@ pub fn s3_sync_persistence(input: TokenStream) -> TokenStream { .into() } -#[proc_macro_derive(PersistIndex)] +#[proc_macro_derive(PersistIndex, attributes(index))] pub fn persist_index(input: TokenStream) -> TokenStream { persist_index::expand(input.into()) .unwrap_or_else(|e| e.to_compile_error()) @@ -44,3 +47,17 @@ pub fn mem_stat(input: TokenStream) -> TokenStream { .unwrap_or_else(|e| e.to_compile_error()) .into() } + +#[proc_macro] +pub fn worktable_version(input: TokenStream) -> TokenStream { + worktable_version::expand(input.into()) + .unwrap_or_else(|e| e.to_compile_error()) + .into() +} + +#[proc_macro] +pub fn migration_engine(input: TokenStream) -> TokenStream { + migration_engine::expand(input.into()) + .unwrap_or_else(|e| e.to_compile_error()) + .into() +} diff --git a/codegen/src/migration_engine/generator.rs b/codegen/src/migration_engine/generator.rs new file mode 100644 index 00000000..f23a91bc --- /dev/null +++ b/codegen/src/migration_engine/generator.rs @@ -0,0 +1,175 @@ +use proc_macro2::{Ident, TokenStream}; +use quote::{quote, ToTokens}; + +use super::parser::MigrationEngineInput; + +pub fn generate(input: MigrationEngineInput) -> TokenStream { + let migration = &input.migration; + let current_table = &input.current; + let ctx_type = &input.ctx; + let engine_name = Ident::new( + &format!("{}Engine", input.migration), + input.migration.span(), + ); + + let table_name_snake = input.name_generator.get_dir_name(); + let table_name_lit = proc_macro2::Literal::string(&table_name_snake); + let persistence_engine = input.name_generator.get_persistence_engine_ident(); + let pk_type = input.name_generator.get_primary_key_type_ident(); + + let current_table_path = syn::Path::from(input.current.clone()); + let current_row = MigrationEngineInput::row_type_for(¤t_table_path); + + let sorted_versions: Vec = input.version_tables.keys().copied().collect(); + let current_version: u32 = sorted_versions.last().map(|v| v + 1).unwrap_or(1); + + let version_fns = input.version_tables.iter().map(|(version, table_path)| { + let fn_name = Ident::new( + &format!("migrate_v{}", version), + current_table.span(), + ); + + // Build the migration chain from this version to current + let chain_steps = build_chain_steps( + &sorted_versions, + *version, + table_path, + migration, + ¤t_row, + &input.version_tables, + ); + + quote! { + async fn #fn_name( + source_path: &str, + target: &#current_table, + ctx: &#ctx_type, + ) -> eyre::Result<()> { + let config = DiskConfig::new_with_table_name(source_path, #table_name_lit, #version); + let engine = ReadOnlyPersistenceEngine::create(config).await?; + let source = #table_path::load(engine).await?; + + let rows = source.select_all().execute()?; + for row in rows { + #chain_steps + target.insert(current_row)?; + } + Ok(()) + } + } + }); + + // Generate the version match arms + let match_arms = input.version_tables.keys().map(|version| { + let fn_name = Ident::new(&format!("migrate_v{}", version), current_table.span()); + quote! { + #version => Self::#fn_name(source_path, &target, ctx).await?, + } + }); + + quote! { + pub struct #engine_name; + + impl #engine_name { + #( #version_fns )* + + pub async fn migrate( + source_path: &str, + target_path: &str, + ctx: &#ctx_type, + ) -> eyre::Result { + let source_table_path = format!("{}/{}", source_path, #table_name_lit); + let version = worktable::migration::detect_version::<<<#pk_type as worktable::prelude::TablePrimaryKey>::Generator as worktable::prelude::PrimaryKeyGeneratorState>::State>(&source_table_path).await?; + + let target_config = DiskConfig::new_with_table_name(target_path, #table_name_lit, #current_version); + let target_engine = #persistence_engine::new(target_config).await?; + let target = #current_table::new(target_engine).await?; + + match version { + #( #match_arms )* + v => return Err(eyre::eyre!("Unsupported version: {}", v)), + }; + + target.wait_for_ops().await; + + Ok(MigrationReport { source_version: version }) + } + } + + pub struct MigrationReport { + pub source_version: u32, + } + } +} + +/// Build the chain of migration steps from a version table to the current row. +fn build_chain_steps( + sorted_versions: &[u32], + start_version: u32, + start_table: &syn::Path, + migration_type: &Ident, + current_row: &syn::Path, + version_tables: &std::collections::BTreeMap, +) -> TokenStream { + let start_idx = sorted_versions + .iter() + .position(|v| *v == start_version) + .unwrap_or(0); + let total = sorted_versions.len(); + + let span = start_table + .segments + .last() + .map(|s| s.ident.span()) + .unwrap_or(proc_macro2::Span::call_site()); + + if sorted_versions.is_empty() { + let from_row = MigrationEngineInput::row_type_for(start_table); + let to_row = current_row.to_token_stream(); + return quote! { + let current_row = <#migration_type as Migration<#from_row, #to_row>>::migrate(row, ctx); + }; + } + + if start_idx == total - 1 { + let from_row = MigrationEngineInput::row_type_for(start_table); + let to_row = current_row.to_token_stream(); + return quote! { + let current_row = <#migration_type as Migration<#from_row, #to_row>>::migrate(row, ctx); + }; + } + + let mut steps = TokenStream::new(); + let mut current_var = quote! { row }; + + for i in (start_idx + 1)..=total { + let from_row_tokens = if i == start_idx + 1 { + MigrationEngineInput::row_type_for(start_table).to_token_stream() + } else { + let from_version = sorted_versions[i - 1]; + let from_table = version_tables.get(&from_version).unwrap(); + MigrationEngineInput::row_type_for(from_table).to_token_stream() + }; + + let to_var = Ident::new(&format!("next_{}", i), span); + + let to_row_tokens = if i < total { + let to_version = sorted_versions[i]; + let to_table = version_tables.get(&to_version).unwrap(); + MigrationEngineInput::row_type_for(to_table).to_token_stream() + } else { + current_row.to_token_stream() + }; + + steps = quote! { + #steps + let #to_var = <#migration_type as Migration<#from_row_tokens, #to_row_tokens>>::migrate(#current_var, ctx); + }; + current_var = quote! { #to_var }; + } + + quote! { + #steps + let current_row = #current_var; + } +} diff --git a/codegen/src/migration_engine/mod.rs b/codegen/src/migration_engine/mod.rs new file mode 100644 index 00000000..b67a9d80 --- /dev/null +++ b/codegen/src/migration_engine/mod.rs @@ -0,0 +1,9 @@ +mod generator; +mod parser; + +use proc_macro2::TokenStream; + +pub fn expand(input: TokenStream) -> syn::Result { + let parsed = parser::MigrationEngineInput::parse(input)?; + Ok(generator::generate(parsed)) +} diff --git a/codegen/src/migration_engine/parser.rs b/codegen/src/migration_engine/parser.rs new file mode 100644 index 00000000..520d62f0 --- /dev/null +++ b/codegen/src/migration_engine/parser.rs @@ -0,0 +1,164 @@ +use proc_macro2::{Ident, TokenStream, TokenTree}; +use std::collections::BTreeMap; +use syn::spanned::Spanned as _; + +use crate::common::{name_generator::WorktableNameGenerator, Parser}; + +pub struct MigrationEngineInput { + pub migration: Ident, + pub current: Ident, + pub ctx: Ident, + pub version_tables: BTreeMap, + pub name_generator: WorktableNameGenerator, +} + +impl MigrationEngineInput { + pub fn parse(input: TokenStream) -> syn::Result { + let span = input.span(); + let mut parser = Parser::new(input); + let mut migration = None; + let mut current = None; + let mut ctx = None; + let mut version_tables = None; + + while let Some(ident) = parser.peek_next() { + if let TokenTree::Punct(p) = ident + && p.as_char() == ',' + { + parser.input_iter.next(); + continue; + } + match ident.to_string().as_str() { + "migration" => migration = Some(parse_ident_field(&mut parser)?), + "current" => current = Some(parse_ident_field(&mut parser)?), + "ctx" => ctx = Some(parse_ident_field(&mut parser)?), + "version_tables" => version_tables = Some(parse_version_tables(&mut parser)?), + _ => { + return Err(syn::Error::new( + ident.span(), + format!("Unexpected identifier: {}", ident), + )); + } + } + } + + let current = current.ok_or_else(|| syn::Error::new(span, "missing `current`"))?; + let name_generator = WorktableNameGenerator::from_struct_ident(¤t); + + Ok(Self { + migration: migration.ok_or_else(|| syn::Error::new(span, "missing `migration`"))?, + current, + ctx: ctx.ok_or_else(|| syn::Error::new(span, "missing `ctx`"))?, + version_tables: version_tables + .ok_or_else(|| syn::Error::new(span, "missing `version_tables`"))?, + name_generator, + }) + } + + /// Derive the row type path from a table path. + /// e.g. `v1::UserV1WorkTable` -> `v1::UserV1Row` + /// e.g. `UserWorkTable` -> `UserRow` + pub fn row_type_for(table: &syn::Path) -> syn::Path { + let last_segment = table + .segments + .last() + .expect("path should have at least one segment"); + let ident_str = last_segment.ident.to_string(); + let base = ident_str + .strip_suffix("WorkTable") + .expect("table type should end with `WorkTable`"); + let row_ident = Ident::new(&format!("{}Row", base), last_segment.ident.span()); + + let leading_segments: Vec<_> = table + .segments + .iter() + .take(table.segments.len() - 1) + .cloned() + .collect(); + + let mut new_path = syn::Path { + leading_colon: table.leading_colon, + segments: Default::default(), + }; + + for seg in leading_segments { + new_path.segments.push(seg); + } + new_path.segments.push(syn::PathSegment { + ident: row_ident, + arguments: syn::PathArguments::None, + }); + + new_path + } +} + +fn parse_ident_field(parser: &mut Parser) -> syn::Result { + let _key = parser.input_iter.next().unwrap(); // consume the key ident + parser.parse_colon()?; + let value = parser + .input_iter + .next() + .ok_or_else(|| syn::Error::new(parser.input.span(), "Expected identifier"))?; + let ident = if let TokenTree::Ident(ident) = value { + ident + } else { + return Err(syn::Error::new(value.span(), "Expected identifier")); + }; + parser.try_parse_comma()?; + Ok(ident) +} + +fn parse_version_tables(parser: &mut Parser) -> syn::Result> { + let _key = parser.input_iter.next().unwrap(); + parser.parse_colon()?; + + let brace = parser + .input_iter + .next() + .ok_or_else(|| syn::Error::new(parser.input.span(), "Expected `{`"))?; + let span = brace.span(); + + let inner_tokens = match brace { + TokenTree::Group(group) => group.stream(), + _ => return Err(syn::Error::new(span, "Expected `{` for version_tables")), + }; + + let mut result = BTreeMap::new(); + let mut inner_parser = Parser::new(inner_tokens); + + while inner_parser.peek_next().is_some() { + let key_token = inner_parser + .input_iter + .next() + .ok_or_else(|| syn::Error::new(span, "Expected version number"))?; + let version = if let TokenTree::Literal(lit) = key_token { + let s = lit.to_string().replace('_', ""); + s.parse::() + .map_err(|_| syn::Error::new(lit.span(), "Expected u32 version number"))? + } else { + return Err(syn::Error::new(key_token.span(), "Expected version number")); + }; + + inner_parser.parse_fat_arrow()?; + + let path_tokens: TokenStream = inner_parser + .input_iter + .by_ref() + .take_while(|t| { + if let TokenTree::Punct(p) = t { + p.as_char() != ',' + } else { + true + } + }) + .collect(); + let path: syn::Path = syn::parse2(path_tokens.clone()) + .map_err(|e| syn::Error::new(path_tokens.span(), format!("Invalid path: {}", e)))?; + + inner_parser.try_parse_comma()?; + result.insert(version, path); + } + + Ok(result) +} diff --git a/codegen/src/persist_index/generator.rs b/codegen/src/persist_index/generator.rs index 4492c198..82cdb3b9 100644 --- a/codegen/src/persist_index/generator.rs +++ b/codegen/src/persist_index/generator.rs @@ -2,15 +2,21 @@ use std::collections::HashMap; use proc_macro2::{Ident, Literal, TokenStream}; use quote::__private::Span; -use quote::{ToTokens, quote}; +use quote::{quote, ToTokens}; use syn::ItemStruct; -use crate::name_generator::{WorktableNameGenerator, is_unsized}; +use crate::common::name_generator::{is_unsized, WorktableNameGenerator}; use crate::persist_table::WT_INDEX_EXTENSION; +#[derive(Default)] +pub struct PersistIndexAttributes { + pub read_only: bool, +} + pub struct Generator { pub struct_def: ItemStruct, pub field_types: HashMap, + pub attributes: PersistIndexAttributes, } impl WorktableNameGenerator { @@ -33,7 +39,7 @@ impl WorktableNameGenerator { } impl Generator { - pub fn new(struct_def: ItemStruct) -> Self { + pub fn with_attributes(struct_def: ItemStruct, attributes: PersistIndexAttributes) -> Self { let mut fields = vec![]; let mut types = vec![]; @@ -46,7 +52,7 @@ impl Generator { ); let syn::Type::Path(type_path) = &field.ty else { - unreachable!() + unreachable!(); }; let last_segment = type_path @@ -75,6 +81,7 @@ impl Generator { Self { struct_def, field_types: map, + attributes, } } @@ -114,7 +121,11 @@ impl Generator { let name_generator = WorktableNameGenerator::from_index_ident(&self.struct_def.ident); let name_ident = name_generator.get_persisted_index_ident(); - let persist_fn = self.gen_persist_fn(); + let persist_fn = if self.attributes.read_only { + quote! {} + } else { + self.gen_persist_fn() + }; let parse_from_file_fn = self.gen_parse_from_file_fn(); Ok(quote! { @@ -237,7 +248,17 @@ impl Generator { let name_generator = WorktableNameGenerator::from_index_ident(&self.struct_def.ident); let name_ident = name_generator.get_persisted_index_ident(); - let get_persisted_index_fn = self.gen_get_persisted_index_fn(); + let get_persisted_index_fn = if self.attributes.read_only { + let name_generator = WorktableNameGenerator::from_index_ident(&self.struct_def.ident); + let name_ident = name_generator.get_persisted_index_ident(); + quote! { + fn get_persisted_index(&self) -> Self::PersistedIndex { + #name_ident::default() + } + } + } else { + self.gen_get_persisted_index_fn() + }; let from_persisted_fn = self.gen_from_persisted_fn()?; Ok(quote! { @@ -489,7 +510,7 @@ mod tests { use proc_macro2::{Ident, Span}; use quote::quote; - use crate::persist_index::generator::Generator; + use crate::persist_index::generator::{Generator, PersistIndexAttributes}; use crate::persist_index::parser::Parser; #[test] @@ -502,7 +523,7 @@ mod tests { } }; let struct_ = Parser::parse_struct(input).unwrap(); - let generator = Generator::new(struct_); + let generator = Generator::with_attributes(struct_, PersistIndexAttributes::default()); assert_eq!( generator @@ -523,4 +544,39 @@ mod tests { "String" ); } + + #[test] + fn parses_read_only_attribute() { + let input = quote! { + #[derive(Debug, Default, Clone)] + #[index(read_only)] + pub struct TestIndex { + test_idx: TreeIndex, + } + }; + let struct_ = Parser::parse_struct(input).unwrap(); + let attrs = Parser::parse_attributes(&struct_.attrs); + + assert!(attrs.read_only); + } + + #[test] + fn default_attributes_are_false() { + let attrs = PersistIndexAttributes::default(); + assert!(!attrs.read_only); + } + + #[test] + fn without_read_only_attribute() { + let input = quote! { + #[derive(Debug, Default, Clone)] + pub struct TestIndex { + test_idx: TreeIndex, + } + }; + let struct_ = Parser::parse_struct(input).unwrap(); + let attrs = Parser::parse_attributes(&struct_.attrs); + + assert!(!attrs.read_only); + } } diff --git a/codegen/src/persist_index/mod.rs b/codegen/src/persist_index/mod.rs index 1f040e2b..10cf91d3 100644 --- a/codegen/src/persist_index/mod.rs +++ b/codegen/src/persist_index/mod.rs @@ -10,7 +10,8 @@ mod space; pub fn expand(input: TokenStream) -> syn::Result { let input_struct = Parser::parse_struct(input)?; - let mut generator = Generator::new(input_struct); + let attributes = Parser::parse_attributes(&input_struct.attrs); + let mut generator = Generator::with_attributes(input_struct, attributes); let type_def = generator.gen_persist_type()?; let persistable_def = generator.gen_persistable_impl()?; @@ -51,7 +52,37 @@ mod tests { } }; + let _res = expand(input).unwrap(); + } + + #[test] + fn test_read_only() { + let input = quote! { + #[derive(Debug, Default, Clone)] + #[index(read_only)] + pub struct ReadOnlyIndex { + test_idx: TreeIndex, + } + }; + let res = expand(input).unwrap(); - println!("{:?}", res.to_string()) + let output = res.to_string(); + + assert!( + !output.contains("pub async fn persist"), + "read_only index should not have persist method" + ); + assert!( + output.contains("fn get_persisted_index"), + "read_only index should have get_persisted_index returning default" + ); + assert!( + output.contains("pub async fn parse_from_file"), + "read_only index should have parse_from_file method" + ); + assert!( + output.contains("fn from_persisted"), + "read_only index should have from_persisted method" + ); } } diff --git a/codegen/src/persist_index/parser.rs b/codegen/src/persist_index/parser.rs index 1a407325..5eefef7f 100644 --- a/codegen/src/persist_index/parser.rs +++ b/codegen/src/persist_index/parser.rs @@ -1,7 +1,10 @@ use proc_macro2::TokenStream; -use syn::ItemStruct; +use quote::ToTokens; +use syn::{Attribute, ItemStruct}; use syn::spanned::Spanned; +use crate::persist_index::generator::PersistIndexAttributes; + pub struct Parser; impl Parser { @@ -11,6 +14,25 @@ impl Parser { Err(err) => Err(syn::Error::new(input.span(), err.to_string())), } } + + pub fn parse_attributes(attrs: &Vec) -> PersistIndexAttributes { + let mut res = PersistIndexAttributes::default(); + + for attr in attrs { + if attr.path().to_token_stream().to_string().as_str() == "index" { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("read_only") { + res.read_only = true; + return Ok(()); + } + Ok(()) + }) + .expect("always ok even on unrecognized attrs"); + } + } + + res + } } #[cfg(test)] diff --git a/codegen/src/persist_index/space/events.rs b/codegen/src/persist_index/space/events.rs index b99f68ec..28b66819 100644 --- a/codegen/src/persist_index/space/events.rs +++ b/codegen/src/persist_index/space/events.rs @@ -2,7 +2,7 @@ use convert_case::{Case, Casing}; use proc_macro2::TokenStream; use quote::quote; -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; use crate::persist_index::generator::Generator; impl Generator { diff --git a/codegen/src/persist_index/space/index.rs b/codegen/src/persist_index/space/index.rs index 27688283..48bbba0d 100644 --- a/codegen/src/persist_index/space/index.rs +++ b/codegen/src/persist_index/space/index.rs @@ -1,7 +1,7 @@ use proc_macro2::{Literal, TokenStream}; use quote::quote; -use crate::name_generator::{WorktableNameGenerator, is_unsized}; +use crate::common::name_generator::{WorktableNameGenerator, is_unsized}; use crate::persist_index::generator::Generator; impl Generator { @@ -62,18 +62,18 @@ impl Generator { let literal_name = Literal::string(i.to_string().as_str()); if is_unsized(&t.to_string()) { quote! { - #i: SpaceIndexUnsized::secondary_from_table_files_path(path, #literal_name).await?, + #i: SpaceIndexUnsized::secondary_from_table_files_path(path, #literal_name, version).await?, } } else { quote! { - #i: SpaceIndex::secondary_from_table_files_path(path, #literal_name).await?, + #i: SpaceIndex::secondary_from_table_files_path(path, #literal_name, version).await?, } } }) .collect(); quote! { - async fn from_table_files_path>(path: S) -> eyre::Result { + async fn from_table_files_path>(path: S, version: u32) -> eyre::Result { let path = path.as_ref(); Ok(Self { #(#fields)* diff --git a/codegen/src/persist_table/generator/mod.rs b/codegen/src/persist_table/generator/mod.rs index bab3211b..bd9e2d35 100644 --- a/codegen/src/persist_table/generator/mod.rs +++ b/codegen/src/persist_table/generator/mod.rs @@ -1,8 +1,7 @@ -use convert_case::{Case, Casing}; use proc_macro2::{Ident, Span}; use syn::ItemStruct; -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; pub use space_file::WT_INDEX_EXTENSION; @@ -11,6 +10,7 @@ mod space_file; pub struct PersistTableAttributes { pub pk_unsized: bool, + pub read_only: bool, } pub struct Generator { @@ -27,10 +27,6 @@ impl WorktableNameGenerator { ) } - pub fn get_dir_name(&self) -> String { - self.name.from_case(Case::Pascal).to_case(Case::Snake) - } - pub fn get_persistence_engine_ident(&self) -> Ident { Ident::new( format!("{}PersistenceEngine", self.name).as_str(), diff --git a/codegen/src/persist_table/generator/space.rs b/codegen/src/persist_table/generator/space.rs index da3e4174..9690ce2c 100644 --- a/codegen/src/persist_table/generator/space.rs +++ b/codegen/src/persist_table/generator/space.rs @@ -1,10 +1,13 @@ -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; use crate::persist_table::generator::Generator; use proc_macro2::TokenStream; use quote::quote; impl Generator { pub fn get_persistence_task_type(&self) -> TokenStream { + if self.attributes.read_only { + return quote! {}; + } let name_generator = WorktableNameGenerator::from_struct_ident(&self.struct_def.ident); let ident = name_generator.get_persistence_task_ident(); let primary_key_type = name_generator.get_primary_key_type_ident(); @@ -23,6 +26,9 @@ impl Generator { } pub fn get_persistence_engine_type(&self) -> TokenStream { + if self.attributes.read_only { + return quote! {}; + } let name_generator = WorktableNameGenerator::from_struct_ident(&self.struct_def.ident); let ident = name_generator.get_persistence_engine_ident(); let primary_key_type = name_generator.get_primary_key_type_ident(); diff --git a/codegen/src/persist_table/generator/space_file/mod.rs b/codegen/src/persist_table/generator/space_file/mod.rs index b0f9e29c..bd7c8200 100644 --- a/codegen/src/persist_table/generator/space_file/mod.rs +++ b/codegen/src/persist_table/generator/space_file/mod.rs @@ -3,7 +3,7 @@ mod worktable_impls; use proc_macro2::{Literal, TokenStream}; use quote::quote; -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; use crate::persist_table::generator::Generator; pub const WT_INDEX_EXTENSION: &str = ".wt.idx"; @@ -52,20 +52,22 @@ impl Generator { fn gen_space_file_get_primary_index_info_fn(&self) -> TokenStream { let name_generator = WorktableNameGenerator::from_struct_ident(&self.struct_def.ident); let literal_name = name_generator.get_work_table_literal_name(); + let version_const = name_generator.get_version_const_ident(); quote! { fn get_primary_index_info(&self) -> eyre::Result>> { let mut info = { let inner = SpaceInfoPage { - id: 0.into(), - page_count: 0, - name: #literal_name.to_string(), - pk_gen_state: (), - empty_links_list: vec![], - primary_key_fields: vec![], - row_schema: vec![], - secondary_index_types: vec![], - }; + id: 0.into(), + version: #version_const, + page_count: 0, + name: #literal_name.to_string(), + pk_gen_state: (), + empty_links_list: vec![], + primary_key_fields: vec![], + row_schema: vec![], + secondary_index_types: vec![], + }; let header = GeneralHeader { data_version: DATA_VERSION, page_id: 0.into(), @@ -165,49 +167,83 @@ impl Generator { } }; - quote! { - pub async fn into_worktable(self, engine: E) -> #wt_ident - where - E: PersistenceEngine< - <<#pk_type as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, - #pk_type, - #secondary_index_events, - #avt_index_ident, - Config=C - > + Send - + 'static, - C: Clone + PersistenceConfig, - { - let mut page_id = 1; - let data = self.data.into_iter().map(|p| { - let mut data = Data::from_data_page(p); - data.set_page_id(page_id.into()); - page_id += 1; + if self.attributes.read_only { + quote! { + pub fn into_worktable(self) -> #wt_ident { + let mut page_id = 1; + let data = self.data.into_iter().map(|p| { + let mut data = Data::from_data_page(p); + data.set_page_id(page_id.into()); + page_id += 1; - std::sync::Arc::new(data) - }) - .collect(); - let data = DataPages::from_data(data) - .with_empty_links(self.data_info.inner.empty_links_list); - let indexes = #index_ident::from_persisted(self.indexes); + std::sync::Arc::new(data) + }) + .collect(); + let data = DataPages::from_data(data) + .with_empty_links(self.data_info.inner.empty_links_list); + let indexes = #index_ident::from_persisted(self.indexes); - #primary_index_init + #primary_index_init - let table = WorkTable { - data: std::sync::Arc::new(data), - primary_index: std::sync::Arc::new(primary_index), - indexes: std::sync::Arc::new(indexes), - pk_gen: PrimaryKeyGeneratorState::from_state(self.data_info.inner.pk_gen_state), - lock_manager: std::sync::Arc::new(LockMap::<#lock_type, #pk_type>::default()), - update_state: IndexMap::default(), - table_name: #table_name, - pk_phantom: std::marker::PhantomData, - }; + let table = WorkTable { + data: std::sync::Arc::new(data), + primary_index: std::sync::Arc::new(primary_index), + indexes: std::sync::Arc::new(indexes), + pk_gen: PrimaryKeyGeneratorState::from_state(self.data_info.inner.pk_gen_state), + lock_manager: std::sync::Arc::new(LockMap::<#lock_type, #pk_type>::default()), + update_state: IndexMap::default(), + table_name: #table_name, + pk_phantom: std::marker::PhantomData, + }; + + #wt_ident(table) + } + } + } else { + quote! { + pub async fn into_worktable(self, engine: E) -> #wt_ident + where + E: PersistenceEngine< + <<#pk_type as TablePrimaryKey>::Generator as PrimaryKeyGeneratorState>::State, + #pk_type, + #secondary_index_events, + #avt_index_ident, + Config=C + > + Send + + 'static, + C: Clone + PersistenceConfig, + { + let mut page_id = 1; + let data = self.data.into_iter().map(|p| { + let mut data = Data::from_data_page(p); + data.set_page_id(page_id.into()); + page_id += 1; + + std::sync::Arc::new(data) + }) + .collect(); + let data = DataPages::from_data(data) + .with_empty_links(self.data_info.inner.empty_links_list); + let indexes = #index_ident::from_persisted(self.indexes); + + #primary_index_init - #wt_ident( - table, - #task_ident::run_engine(engine) - ) + let table = WorkTable { + data: std::sync::Arc::new(data), + primary_index: std::sync::Arc::new(primary_index), + indexes: std::sync::Arc::new(indexes), + pk_gen: PrimaryKeyGeneratorState::from_state(self.data_info.inner.pk_gen_state), + lock_manager: std::sync::Arc::new(LockMap::<#lock_type, #pk_type>::default()), + update_state: IndexMap::default(), + table_name: #table_name, + pk_phantom: std::marker::PhantomData, + }; + + #wt_ident( + table, + #task_ident::run_engine(engine) + ) + } } } } diff --git a/codegen/src/persist_table/generator/space_file/worktable_impls.rs b/codegen/src/persist_table/generator/space_file/worktable_impls.rs index da9095bf..3e2e4f03 100644 --- a/codegen/src/persist_table/generator/space_file/worktable_impls.rs +++ b/codegen/src/persist_table/generator/space_file/worktable_impls.rs @@ -1,7 +1,7 @@ use proc_macro2::TokenStream; use quote::quote; -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; use crate::persist_table::generator::Generator; impl Generator { @@ -21,9 +21,15 @@ impl Generator { } fn gen_worktable_wait_for_ops_fn(&self) -> TokenStream { - quote! { - pub async fn wait_for_ops(&self) { - self.1.wait_for_ops().await + if self.attributes.read_only { + quote! { + pub async fn wait_for_ops(&self) {} + } + } else { + quote! { + pub async fn wait_for_ops(&self) { + self.1.wait_for_ops().await + } } } } @@ -32,10 +38,12 @@ impl Generator { let name_generator = WorktableNameGenerator::from_struct_ident(&self.struct_def.ident); let pk = name_generator.get_primary_key_type_ident(); let literal_name = name_generator.get_work_table_literal_name(); + let version_const = name_generator.get_version_const_ident(); quote! { pub fn space_info_default() -> GeneralPage::Generator as PrimaryKeyGeneratorState>::State>> { let inner = SpaceInfoPage { + version: #version_const, id: 0.into(), page_count: 0, name: #literal_name.to_string(), diff --git a/codegen/src/persist_table/mod.rs b/codegen/src/persist_table/mod.rs index 92c7a36a..b7ba5abb 100644 --- a/codegen/src/persist_table/mod.rs +++ b/codegen/src/persist_table/mod.rs @@ -21,8 +21,16 @@ pub fn expand(input: TokenStream) -> syn::Result { }; let space_file_def = generator.gen_space_file_def(); - let persistence_engine = generator.get_persistence_engine_type(); - let persistence_task = generator.get_persistence_task_type(); + let persistence_engine = if generator.attributes.read_only { + quote! {} + } else { + generator.get_persistence_engine_type() + }; + let persistence_task = if generator.attributes.read_only { + quote! {} + } else { + generator.get_persistence_task_type() + }; Ok(quote! { #space_file_def @@ -30,3 +38,63 @@ pub fn expand(input: TokenStream) -> syn::Result { #persistence_task }) } + +#[cfg(test)] +mod tests { + use quote::quote; + + use crate::persist_table::expand; + + #[test] + fn test_read_only_skips_engine_and_task() { + let input = quote! { + #[derive(Debug)] + #[table(read_only)] + pub struct TestReadOnlyWorkTable(WorkTable); + }; + + let res = expand(input).unwrap(); + let output = res.to_string(); + + assert!( + !output.contains("PersistenceEngine"), + "read_only should not generate PersistenceEngine" + ); + assert!( + !output.contains("PersistenceTask"), + "read_only should not generate PersistenceTask" + ); + assert!( + output.contains("fn into_worktable (self)"), + "read_only should have sync into_worktable without engine param" + ); + assert!( + !output.contains("async fn into_worktable"), + "read_only into_worktable should not be async" + ); + } + + #[test] + fn test_normal_generates_engine_and_task() { + let input = quote! { + #[derive(Debug)] + pub struct TestWorkTable(WorkTable); + }; + + let res = expand(input).unwrap(); + let output = res.to_string(); + + assert!( + output.contains("PersistenceEngine"), + "normal should generate PersistenceEngine" + ); + assert!( + output.contains("PersistenceTask"), + "normal should generate PersistenceTask" + ); + assert!( + output.contains("async fn into_worktable"), + "normal into_worktable should be async" + ); + } +} diff --git a/codegen/src/persist_table/parser.rs b/codegen/src/persist_table/parser.rs index cd8e0aa9..e8f1a4d9 100644 --- a/codegen/src/persist_table/parser.rs +++ b/codegen/src/persist_table/parser.rs @@ -33,7 +33,7 @@ impl Parser { } pub fn parse_attributes(attrs: &Vec) -> PersistTableAttributes { - let mut res = PersistTableAttributes { pk_unsized: false }; + let mut res = PersistTableAttributes { pk_unsized: false, read_only: false }; for attr in attrs { if attr.path().to_token_stream().to_string().as_str() == "table" { @@ -42,6 +42,10 @@ impl Parser { res.pk_unsized = true; return Ok(()); } + if meta.path.is_ident("read_only") { + res.read_only = true; + return Ok(()); + } Ok(()) }) .expect("always ok even on unrecognized attrs"); diff --git a/codegen/src/s3_persistence/mod.rs b/codegen/src/s3_persistence/mod.rs index d32ef5c5..453f5acc 100644 --- a/codegen/src/s3_persistence/mod.rs +++ b/codegen/src/s3_persistence/mod.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::parse::{Parse, ParseStream}; use syn::{Ident, Result}; -use crate::name_generator::WorktableNameGenerator; +use crate::common::name_generator::WorktableNameGenerator; struct S3PersistenceInput { table_name: Ident, diff --git a/codegen/src/worktable/generator/mod.rs b/codegen/src/worktable/generator/mod.rs deleted file mode 100644 index 40602754..00000000 --- a/codegen/src/worktable/generator/mod.rs +++ /dev/null @@ -1,36 +0,0 @@ -mod locks; -mod primary_key; -mod queries; -mod row; -mod table; -//mod table_old; -//mod table_index; -mod index; -mod wrapper; - -use proc_macro2::Ident; - -use crate::worktable::model::{Columns, Config, PrimaryKey, Queries}; - -pub struct Generator { - pub name: Ident, - pub is_persist: bool, - pub pk: Option, - pub queries: Option, - pub config: Option, - - pub columns: Columns, -} - -impl Generator { - pub fn new(name: Ident, is_persist: bool, columns: Columns) -> Self { - Self { - name, - is_persist, - pk: None, - queries: None, - config: None, - columns, - } - } -} diff --git a/codegen/src/worktable/mod.rs b/codegen/src/worktable/mod.rs index 90583c66..151c84f0 100644 --- a/codegen/src/worktable/mod.rs +++ b/codegen/src/worktable/mod.rs @@ -1,12 +1,6 @@ use proc_macro2::TokenStream; -use quote::quote; -mod generator; -mod model; -mod parser; - -use crate::worktable::generator::Generator; -pub use parser::Parser; +use crate::common::Parser; pub fn expand(input: TokenStream) -> syn::Result { let mut parser = Parser::new(input); @@ -16,6 +10,7 @@ pub fn expand(input: TokenStream) -> syn::Result { let mut config = None; let name = parser.parse_name()?; + let version = parser.parse_version()?.unwrap_or(1); let is_persist = parser.parse_persist()?; while let Some(ident) = parser.peek_next() { match ident.to_string().as_str() { @@ -35,6 +30,12 @@ pub fn expand(input: TokenStream) -> syn::Result { let res = parser.parse_configs()?; config = Some(res) } + "version" => { + return Err(syn::Error::new( + ident.span(), + "version must be specified before columns/indexes/queries/config", + )) + } _ => return Err(syn::Error::new(ident.span(), "Unexpected identifier")), } } @@ -43,39 +44,10 @@ pub fn expand(input: TokenStream) -> syn::Result { if let Some(i) = indexes { columns.indexes = i } - let mut generator = Generator::new(name, is_persist, columns); - generator.queries = queries; - generator.config = config; - let pk_def = generator.gen_primary_key_def()?; - let row_def = generator.gen_row_def(); - let wrapper_def = generator.gen_wrapper_def(); - let locks_def = generator.gen_locks_def(); - let index_def = generator.gen_index_def()?; - let table_def = generator.gen_table_def()?; - let query_types_def = generator.gen_result_types_def()?; - let query_available_def = generator.gen_available_types_def()?; - let query_locks_impls = generator.gen_query_locks_impl()?; - let select_impls = generator.gen_query_select_impl()?; - let update_impls = generator.gen_query_update_impl()?; - let update_in_place_impls = generator.gen_query_in_place_impl()?; - let delete_impls = generator.gen_query_delete_impl()?; - let unsized_impl = generator.gen_unsized_impls(); - - Ok(quote! { - #pk_def - #row_def - #query_available_def - #wrapper_def - #locks_def - #index_def - #table_def - #query_types_def - #query_locks_impls - #select_impls - #update_impls - #update_in_place_impls - #delete_impls - #unsized_impl - }) -} + if is_persist { + crate::generators::persist::expand(name, columns, queries, config, version) + } else { + crate::generators::in_memory::expand_from_parsed(name, columns, queries, config) + } +} \ No newline at end of file diff --git a/codegen/src/worktable/parser/name.rs b/codegen/src/worktable/parser/name.rs deleted file mode 100644 index 9cc449ba..00000000 --- a/codegen/src/worktable/parser/name.rs +++ /dev/null @@ -1,93 +0,0 @@ -use proc_macro2::Ident; -use proc_macro2::TokenTree; -use syn::spanned::Spanned as _; - -use crate::worktable::parser::Parser; - -impl Parser { - pub fn parse_name(&mut self) -> syn::Result { - let ident = self.input_iter.next().ok_or(syn::Error::new( - self.input.span(), - "Expected `name` field in declaration", - ))?; - if let TokenTree::Ident(ident) = ident { - if ident.to_string().as_str() != "name" { - return Err(syn::Error::new( - ident.span(), - "Expected `name` field. `WorkTable` name must be specified", - )); - } - } else { - return Err(syn::Error::new( - ident.span(), - "Expected field name identifier.", - )); - }; - - self.parse_colon()?; - - let name = self - .input_iter - .next() - .ok_or(syn::Error::new(self.input.span(), "Expected token."))?; - let name = if let TokenTree::Ident(name) = name { - name - } else { - return Err(syn::Error::new(name.span(), "Expected identifier.")); - }; - - self.try_parse_comma()?; - - Ok(name) - } -} - -#[cfg(test)] -mod tests { - use quote::quote; - - use crate::worktable::Parser; - - #[test] - fn test_name_parse() { - let tokens = quote! {name: TestName,}; - - let mut parser = Parser::new(tokens); - let name = parser.parse_name(); - - assert!(name.is_ok()); - let name = name.unwrap(); - - assert_eq!(name, "TestName"); - } - - #[test] - fn test_empty() { - let tokens = quote! {}; - - let mut parser = Parser::new(tokens); - let name = parser.parse_name(); - - assert!(name.is_err()); - } - - #[test] - fn test_literal_field() { - let tokens = quote! {"nme": TestName,}; - - let mut parser = Parser::new(tokens); - let name = parser.parse_name(); - - assert!(name.is_err()); - } - - #[test] - fn test_wrong_field() { - let tokens = quote! {nme: TestName,}; - - let mut parser = Parser::new(tokens); - let name = parser.parse_name(); - - assert!(name.is_err()); - } -} diff --git a/codegen/src/worktable_version/mod.rs b/codegen/src/worktable_version/mod.rs new file mode 100644 index 00000000..1f64cc93 --- /dev/null +++ b/codegen/src/worktable_version/mod.rs @@ -0,0 +1,161 @@ +use proc_macro2::TokenStream; +use syn::Error; + +use crate::common::Parser; +use crate::generators::read_only; + +pub fn expand(input: TokenStream) -> syn::Result { + let mut parser = Parser::new(input); + let mut columns = None; + let mut indexes = None; + + let name = parser.parse_name()?; + let version = parser.parse_version()?.unwrap_or(1); + + while let Some(ident) = parser.peek_next() { + match ident.to_string().as_str() { + "columns" => columns = Some(parser.parse_columns()?), + "indexes" => indexes = Some(parser.parse_indexes()?), + "queries" => { + return Err(Error::new( + ident.span(), + "worktable_version! does not support queries", + )) + } + "config" => { + return Err(Error::new( + ident.span(), + "worktable_version! does not support config", + )) + } + "version" => { + return Err(Error::new( + ident.span(), + "version must be specified before columns/indexes", + )) + } + _ => return Err(Error::new(ident.span(), "Unexpected identifier")), + } + } + + let mut columns = columns.expect("columns must be defined"); + if let Some(i) = indexes { + columns.indexes = i + } + + read_only::expand(name, columns, version) +} + +#[cfg(test)] +mod tests { + use quote::quote; + + use crate::worktable_version::expand; + + #[test] + fn test_basic_version_macro() { + let input = quote! { + name: UserV1, + columns: { + id: u64 primary_key, + name: String, + }, + }; + + let res = expand(input).unwrap(); + let output = res.to_string(); + + assert!( + output.contains("index (read_only)"), + "should generate read_only index attribute" + ); + assert!( + output.contains("table (read_only)"), + "should generate read_only table attribute" + ); + } + + #[test] + fn test_version_with_indexes() { + let input = quote! { + name: UserV2, + columns: { + id: u64 primary_key, + email: String, + }, + indexes: { + email_idx: email unique, + }, + }; + + let res = expand(input).unwrap(); + let output = res.to_string(); + + assert!(output.contains("email_idx"), "should include index field"); + } + + #[test] + fn test_rejects_queries() { + let input = quote! { + name: UserV1, + columns: { + id: u64 primary_key, + }, + queries: { + select: { ById() by id }, + }, + }; + + let res = expand(input); + assert!(res.is_err(), "should reject queries section"); + } + + #[test] + fn test_rejects_config() { + let input = quote! { + name: UserV1, + columns: { + id: u64 primary_key, + }, + config: { + page_size: 8192, + }, + }; + + let res = expand(input); + assert!(res.is_err(), "should reject config section"); + } + + #[test] + fn test_explicit_version() { + let input = quote! { + name: UserV1, + version: 2, + columns: { + id: u64 primary_key, + }, + }; + + let res = expand(input).unwrap(); + let output = res.to_string(); + + assert!( + output.contains("index (read_only)"), + "should generate read_only index attribute" + ); + } + + #[test] + fn test_rejects_version_after_columns() { + let input = quote! { + name: UserV1, + columns: { + id: u64 primary_key, + }, + version: 2, + }; + + let res = expand(input); + assert!(res.is_err(), "should reject version after columns"); + } +} \ No newline at end of file diff --git a/src/features/s3_support.rs b/src/features/s3_support.rs index c67d9103..8a53cf42 100644 --- a/src/features/s3_support.rs +++ b/src/features/s3_support.rs @@ -39,6 +39,10 @@ impl PersistenceConfig for S3DiskConfig { fn table_path(&self) -> &str { self.disk.table_path() } + + fn version(&self) -> u32 { + todo!() + } } #[derive(Debug)] diff --git a/src/lib.rs b/src/lib.rs index 03f3f9f5..c27e34aa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ pub mod in_memory; mod index; pub mod lock; +pub mod migration; mod mem_stat; pub mod persistence; mod primary_key; @@ -17,7 +18,9 @@ pub use row::*; pub use table::*; pub use data_bucket; +pub use worktable_codegen::migration_engine; pub use worktable_codegen::worktable; +pub use worktable_codegen::worktable_version; #[cfg(feature = "s3-support")] pub use worktable_codegen::s3_sync_persistence; @@ -33,7 +36,7 @@ pub mod prelude { pub use crate::persistence::{ AcknowledgeOperation, DeleteOperation, DiskConfig, DiskPersistenceEngine, IndexTableOfContents, InsertOperation, Operation, OperationId, PersistedWorkTable, PersistenceConfig, PersistenceEngine, - PersistenceTask, SpaceData, SpaceDataOps, SpaceIndex, SpaceIndexOps, SpaceIndexUnsized, + PersistenceTask, ReadOnlyPersistenceEngine, SpaceData, SpaceDataOps, SpaceIndex, SpaceIndexOps, SpaceIndexUnsized, SpaceSecondaryIndexOps, UpdateOperation, map_index_pages_to_toc_and_general, map_unsized_index_pages_to_toc_and_general, validate_events, }; diff --git a/src/migration/engine.rs b/src/migration/engine.rs new file mode 100644 index 00000000..d537afc0 --- /dev/null +++ b/src/migration/engine.rs @@ -0,0 +1,25 @@ +use rkyv::{Archive, Deserialize, Serialize}; +use rkyv::api::high::HighDeserializer; +use rkyv::rancor::Strategy; +use rkyv::ser::allocator::ArenaHandle; +use rkyv::ser::sharing::Share; +use rkyv::ser::Serializer; +use rkyv::util::AlignedVec; +use tokio::fs::File; + +use crate::prelude::{WT_DATA_EXTENSION, GeneralPage, SpaceInfoPage, parse_page, Persistable}; + +/// Detect version from SpaceInfoPage at page 0 in .wt.data. +pub async fn detect_version(table_path: &str) -> eyre::Result +where + PkGenState: Default + Clone + Archive + Send, + for<'a> PkGenState: Serialize, Share>, rkyv::rancor::Error>>, + ::Archived: Deserialize>, + SpaceInfoPage: Persistable, +{ + let data_file_path = format!("{}/{}", table_path, WT_DATA_EXTENSION); + let mut file = File::open(&data_file_path).await?; + let info: GeneralPage> = + parse_page::<_, 4096>(&mut file, 0).await?; + Ok(info.inner.version) +} diff --git a/src/migration/mod.rs b/src/migration/mod.rs new file mode 100644 index 00000000..73e6599f --- /dev/null +++ b/src/migration/mod.rs @@ -0,0 +1,32 @@ +use rkyv::api::high::HighDeserializer; +use rkyv::rancor::Strategy; +use rkyv::ser::allocator::ArenaHandle; +use rkyv::ser::sharing::Share; +use rkyv::ser::Serializer; +use rkyv::util::AlignedVec; +use rkyv::{Archive, Deserialize, Serialize}; +use tokio::fs::File; + +use crate::prelude::{parse_page, GeneralPage, Persistable, SpaceInfoPage, WT_DATA_EXTENSION}; + +pub trait Migration { + type Context: Default + Send + Sync; + + fn migrate(row: FromRow, ctx: &Self::Context) -> ToRow; +} + +/// Detect version from SpaceInfoPage at page 0 in .wt.data. +pub async fn detect_version(table_path: &str) -> eyre::Result +where + PkGenState: Default + Clone + Archive + Send, + for<'a> PkGenState: + Serialize, Share>, rkyv::rancor::Error>>, + ::Archived: + Deserialize>, + SpaceInfoPage: Persistable, +{ + let data_file_path = format!("{}/{}", table_path, WT_DATA_EXTENSION); + let mut file = File::open(&data_file_path).await?; + let info: GeneralPage> = parse_page::<_, 4096>(&mut file, 0).await?; + Ok(info.inner.version) +} diff --git a/src/migration/traits.rs b/src/migration/traits.rs new file mode 100644 index 00000000..e69de29b diff --git a/src/persistence/engine.rs b/src/persistence/engine.rs index eb491489..a654bd6f 100644 --- a/src/persistence/engine.rs +++ b/src/persistence/engine.rs @@ -19,19 +19,22 @@ use crate::prelude::{PrimaryKeyGeneratorState, TablePrimaryKey}; pub struct DiskConfig { pub config_path: String, pub tables_path: String, + pub version: u32, } impl DiskConfig { - pub fn new, S2: Into>(config_path: S1, table_files_dir: S2) -> Self { + pub fn new, S2: Into>(config_path: S1, table_files_dir: S2, version: u32) -> Self { Self { config_path: config_path.into(), tables_path: table_files_dir.into(), + version, } } pub fn new_with_table_name, S2: AsRef>( config_path: S1, table_name_snake_case: S2, + version: u32, ) -> Self { let config_path = config_path.into(); let table_name = table_name_snake_case.as_ref(); @@ -39,6 +42,7 @@ impl DiskConfig { Self { config_path, tables_path, + version, } } } @@ -47,6 +51,10 @@ impl PersistenceConfig for DiskConfig { fn table_path(&self) -> &str { &self.tables_path } + + fn version(&self) -> u32 { + self.version + } } #[derive(Debug)] @@ -112,21 +120,18 @@ where Ok(Self { config: config.clone(), - data: SpaceData::from_table_files_path(config.tables_path.clone()).await?, + data: SpaceData::from_table_files_path(config.tables_path.clone(), config.version).await?, primary_index: SpacePrimaryIndex::primary_from_table_files_path( config.tables_path.clone(), + config.version, ) .await?, - secondary_indexes: SpaceSecondaryIndexes::from_table_files_path(config.tables_path) + secondary_indexes: SpaceSecondaryIndexes::from_table_files_path(config.tables_path, config.version) .await?, phantom_data: PhantomData, }) } - fn config(&self) -> &DiskConfig { - &self.config - } - async fn apply_operation( &mut self, op: Operation, @@ -205,4 +210,8 @@ where Ok(()) } + + fn config(&self) -> &DiskConfig { + &self.config + } } diff --git a/src/persistence/mod.rs b/src/persistence/mod.rs index 7d889e25..b0f57dd7 100644 --- a/src/persistence/mod.rs +++ b/src/persistence/mod.rs @@ -4,6 +4,7 @@ use crate::persistence::operation::BatchOperation; pub use engine::DiskConfig; pub use engine::DiskPersistenceEngine; +pub use readonly_engine::ReadOnlyPersistenceEngine; pub use operation::{ AcknowledgeOperation, DeleteOperation, InsertOperation, Operation, OperationId, OperationType, UpdateOperation, validate_events, @@ -17,12 +18,15 @@ pub use task::PersistenceTask; mod engine; pub mod operation; +mod readonly_engine; mod space; mod task; // TODO: remove this pub trait PersistenceConfig { fn table_path(&self) -> &str; + + fn version(&self) -> u32; } pub trait PersistedWorkTable: Sized diff --git a/src/persistence/readonly_engine.rs b/src/persistence/readonly_engine.rs new file mode 100644 index 00000000..75e0c1aa --- /dev/null +++ b/src/persistence/readonly_engine.rs @@ -0,0 +1,66 @@ +use std::fmt::Debug; +use std::hash::Hash; + +use crate::TableSecondaryIndexEventsOps; +use crate::persistence::operation::{BatchOperation, Operation}; +use crate::persistence::{PersistenceConfig, PersistenceEngine}; +use crate::prelude::{PrimaryKeyGeneratorState, TablePrimaryKey}; + +#[derive(Debug)] +pub struct ReadOnlyPersistenceEngine { + config: C, +} + +impl ReadOnlyPersistenceEngine { + pub async fn create(config: C) -> eyre::Result { + Ok(Self { config }) + } + + pub fn config_ref(&self) -> &C { + &self.config + } +} + +impl + PersistenceEngine + for ReadOnlyPersistenceEngine +where + C: PersistenceConfig + Send, + PrimaryKey: Clone + Debug + Ord + TablePrimaryKey + Send, + ::Generator: PrimaryKeyGeneratorState, + SecondaryIndexEvents: Clone + Debug + Default + TableSecondaryIndexEventsOps + Send, + PrimaryKeyGenState: Clone + Debug + Send, + AvailableIndexes: Clone + Copy + Debug + Eq + Hash + Send, +{ + type Config = C; + + async fn new(config: Self::Config) -> eyre::Result + where + Self: Sized, + { + Ok(Self { config }) + } + + async fn apply_operation( + &mut self, + _op: Operation, + ) -> eyre::Result<()> { + Ok(()) + } + + async fn apply_batch_operation( + &mut self, + _batch_op: BatchOperation< + PrimaryKeyGenState, + PrimaryKey, + SecondaryIndexEvents, + AvailableIndexes, + >, + ) -> eyre::Result<()> { + Ok(()) + } + + fn config(&self) -> &Self::Config { + &self.config + } +} diff --git a/src/persistence/space/data.rs b/src/persistence/space/data.rs index fcae0501..c57f5465 100644 --- a/src/persistence/space/data.rs +++ b/src/persistence/space/data.rs @@ -2,20 +2,20 @@ use std::future::Future; use std::io::SeekFrom; use std::path::Path; +use crate::persistence::space::{open_or_create_file, BatchData}; use crate::persistence::SpaceDataOps; -use crate::persistence::space::{BatchData, open_or_create_file}; use crate::prelude::WT_DATA_EXTENSION; use convert_case::{Case, Casing}; use data_bucket::{ - DataPage, GeneralHeader, GeneralPage, Link, PageType, Persistable, SizeMeasurable, - SpaceInfoPage, parse_data_pages_batch, parse_general_header_by_index, parse_page, persist_page, - persist_pages_batch, update_at, + parse_data_pages_batch, parse_general_header_by_index, parse_page, persist_page, persist_pages_batch, update_at, DataPage, + GeneralHeader, GeneralPage, Link, PageType, Persistable, + SizeMeasurable, SpaceInfoPage, }; use rkyv::api::high::HighDeserializer; use rkyv::rancor::Strategy; -use rkyv::ser::Serializer; use rkyv::ser::allocator::ArenaHandle; use rkyv::ser::sharing::Share; +use rkyv::ser::Serializer; use rkyv::util::AlignedVec; use rkyv::{Archive, Deserialize, Serialize}; use tokio::fs::File; @@ -58,7 +58,7 @@ where Deserialize>, SpaceInfoPage: Persistable, { - async fn from_table_files_path + Send>(table_path: S) -> eyre::Result { + async fn from_table_files_path + Send>(table_path: S, version: u32) -> eyre::Result { let path = format!("{}/{}", table_path.as_ref(), WT_DATA_EXTENSION); let mut data_file = if !Path::new(&path).exists() { let name = table_path @@ -70,7 +70,7 @@ where .from_case(Case::Snake) .to_case(Case::Pascal); let mut data_file = open_or_create_file(path).await?; - Self::bootstrap(&mut data_file, name).await?; + Self::bootstrap(&mut data_file, name, version).await?; data_file } else { open_or_create_file(path).await? @@ -89,11 +89,12 @@ where }) } - async fn bootstrap(file: &mut File, table_name: String) -> eyre::Result<()> { + async fn bootstrap(file: &mut File, table_name: String, version: u32) -> eyre::Result<()> { let info = SpaceInfoPage { id: 0.into(), page_count: 0, name: table_name, + version, row_schema: vec![], primary_key_fields: vec![], secondary_index_types: vec![], diff --git a/src/persistence/space/index/mod.rs b/src/persistence/space/index/mod.rs index b317027f..9f9aadad 100644 --- a/src/persistence/space/index/mod.rs +++ b/src/persistence/space/index/mod.rs @@ -63,7 +63,7 @@ where + 'static, ::Archived: Deserialize> + Ord + Eq + Debug, { - pub async fn new>(index_file_path: S, space_id: SpaceId) -> eyre::Result { + pub async fn new>(index_file_path: S, space_id: SpaceId, version: u32) -> eyre::Result { let mut index_file = if !Path::new(index_file_path.as_ref()).exists() { let name = index_file_path .as_ref() @@ -77,7 +77,7 @@ where .from_case(Case::Snake) .to_case(Case::Pascal); let mut index_file = open_or_create_file(index_file_path.as_ref()).await?; - Self::bootstrap(&mut index_file, name).await?; + Self::bootstrap(&mut index_file, name, version).await?; index_file } else { open_or_create_file(index_file_path).await? @@ -351,14 +351,16 @@ where { async fn primary_from_table_files_path + Send>( table_path: S, + version: u32, ) -> eyre::Result { let path = format!("{}/primary{}", table_path.as_ref(), WT_INDEX_EXTENSION); - Self::new(path, 0.into()).await + Self::new(path, 0.into(), version).await } async fn secondary_from_table_files_path + Send, S2: AsRef + Send>( table_path: S1, name: S2, + version: u32, ) -> eyre::Result where Self: Sized, @@ -369,14 +371,15 @@ where name.as_ref(), WT_INDEX_EXTENSION ); - Self::new(path, 0.into()).await + Self::new(path, 0.into(), version).await } - async fn bootstrap(file: &mut File, table_name: String) -> eyre::Result<()> { + async fn bootstrap(file: &mut File, table_name: String, version: u32) -> eyre::Result<()> { let info = SpaceInfoPage { id: 0.into(), page_count: 0, name: table_name, + version, row_schema: vec![], primary_key_fields: vec![], secondary_index_types: vec![], diff --git a/src/persistence/space/index/unsized_.rs b/src/persistence/space/index/unsized_.rs index 64228a53..66980997 100644 --- a/src/persistence/space/index/unsized_.rs +++ b/src/persistence/space/index/unsized_.rs @@ -55,8 +55,8 @@ where + 'static, ::Archived: Deserialize> + Ord + Eq + Debug, { - pub async fn new>(index_file_path: S, space_id: SpaceId) -> eyre::Result { - let space_index = SpaceIndex::::new(index_file_path, space_id).await?; + pub async fn new>(index_file_path: S, space_id: SpaceId, version: u32) -> eyre::Result { + let space_index = SpaceIndex::::new(index_file_path, space_id, version).await?; Ok(Self { space_id, table_of_contents: space_index.table_of_contents, @@ -325,14 +325,16 @@ where { async fn primary_from_table_files_path + Send>( table_path: S, + version: u32, ) -> eyre::Result { let path = format!("{}/primary{}", table_path.as_ref(), WT_INDEX_EXTENSION); - Self::new(path, 0.into()).await + Self::new(path, 0.into(), version).await } async fn secondary_from_table_files_path + Send, S2: AsRef + Send>( table_path: S1, name: S2, + version: u32, ) -> eyre::Result where Self: Sized, @@ -343,11 +345,11 @@ where name.as_ref(), WT_INDEX_EXTENSION ); - Self::new(path, 0.into()).await + Self::new(path, 0.into(), version).await } - async fn bootstrap(file: &mut File, table_name: String) -> eyre::Result<()> { - SpaceIndex::::bootstrap(file, table_name).await + async fn bootstrap(file: &mut File, table_name: String, version: u32) -> eyre::Result<()> { + SpaceIndex::::bootstrap(file, table_name, version).await } async fn process_change_event( diff --git a/src/persistence/space/mod.rs b/src/persistence/space/mod.rs index 554e71f6..66b4d0e8 100644 --- a/src/persistence/space/mod.rs +++ b/src/persistence/space/mod.rs @@ -24,12 +24,14 @@ pub type BatchChangeEvent = Vec>>; pub trait SpaceDataOps { fn from_table_files_path + Send>( path: S, + version: u32, ) -> impl Future> + Send where Self: Sized; fn bootstrap( file: &mut File, table_name: String, + version: u32, ) -> impl Future> + Send; fn save_data( &mut self, @@ -50,18 +52,21 @@ where { fn primary_from_table_files_path + Send>( path: S, + version: u32, ) -> impl Future> + Send where Self: Sized; fn secondary_from_table_files_path + Send, S2: AsRef + Send>( path: S1, name: S2, + version: u32, ) -> impl Future> + Send where Self: Sized; fn bootstrap( file: &mut File, table_name: String, + version: u32, ) -> impl Future> + Send; fn process_change_event( &mut self, @@ -76,6 +81,7 @@ where pub trait SpaceSecondaryIndexOps { fn from_table_files_path + Send>( path: S, + version: u32, ) -> impl Future> + Send where Self: Sized; diff --git a/tests/data/expected/persist_index_table_of_contents.wt.idx b/tests/data/expected/persist_index_table_of_contents.wt.idx index c8cf8224..9235e590 100644 Binary files a/tests/data/expected/persist_index_table_of_contents.wt.idx and b/tests/data/expected/persist_index_table_of_contents.wt.idx differ diff --git a/tests/data/expected/space_index/indexset/process_create_node.wt.idx b/tests/data/expected/space_index/indexset/process_create_node.wt.idx index 965fd837..4fc48140 100644 Binary files a/tests/data/expected/space_index/indexset/process_create_node.wt.idx and b/tests/data/expected/space_index/indexset/process_create_node.wt.idx differ diff --git a/tests/data/expected/space_index/indexset/process_insert_at.wt.idx b/tests/data/expected/space_index/indexset/process_insert_at.wt.idx index 8f038df6..998027e3 100644 Binary files a/tests/data/expected/space_index/indexset/process_insert_at.wt.idx and b/tests/data/expected/space_index/indexset/process_insert_at.wt.idx differ diff --git a/tests/data/expected/space_index/indexset/process_insert_at_big_amount.wt.idx b/tests/data/expected/space_index/indexset/process_insert_at_big_amount.wt.idx index a00aee38..681ca432 100644 Binary files a/tests/data/expected/space_index/indexset/process_insert_at_big_amount.wt.idx and b/tests/data/expected/space_index/indexset/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/expected/space_index/process_create_node.wt.idx b/tests/data/expected/space_index/process_create_node.wt.idx index 0573a1dd..2c81cac6 100644 Binary files a/tests/data/expected/space_index/process_create_node.wt.idx and b/tests/data/expected/space_index/process_create_node.wt.idx differ diff --git a/tests/data/expected/space_index/process_create_node_after_remove.wt.idx b/tests/data/expected/space_index/process_create_node_after_remove.wt.idx index 84926678..8972871f 100644 Binary files a/tests/data/expected/space_index/process_create_node_after_remove.wt.idx and b/tests/data/expected/space_index/process_create_node_after_remove.wt.idx differ diff --git a/tests/data/expected/space_index/process_create_second_node.wt.idx b/tests/data/expected/space_index/process_create_second_node.wt.idx index 6f2d11cd..e5adb74d 100644 Binary files a/tests/data/expected/space_index/process_create_second_node.wt.idx and b/tests/data/expected/space_index/process_create_second_node.wt.idx differ diff --git a/tests/data/expected/space_index/process_insert_at.wt.idx b/tests/data/expected/space_index/process_insert_at.wt.idx index 8f038df6..998027e3 100644 Binary files a/tests/data/expected/space_index/process_insert_at.wt.idx and b/tests/data/expected/space_index/process_insert_at.wt.idx differ diff --git a/tests/data/expected/space_index/process_insert_at_big_amount.wt.idx b/tests/data/expected/space_index/process_insert_at_big_amount.wt.idx index 24b0aba0..b7d05af1 100644 Binary files a/tests/data/expected/space_index/process_insert_at_big_amount.wt.idx and b/tests/data/expected/space_index/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/expected/space_index/process_insert_at_removed_place.wt.idx b/tests/data/expected/space_index/process_insert_at_removed_place.wt.idx index 1c336b77..c90e8f91 100644 Binary files a/tests/data/expected/space_index/process_insert_at_removed_place.wt.idx and b/tests/data/expected/space_index/process_insert_at_removed_place.wt.idx differ diff --git a/tests/data/expected/space_index/process_insert_at_with_node_id_update.wt.idx b/tests/data/expected/space_index/process_insert_at_with_node_id_update.wt.idx index 47977220..6f3dad2d 100644 Binary files a/tests/data/expected/space_index/process_insert_at_with_node_id_update.wt.idx and b/tests/data/expected/space_index/process_insert_at_with_node_id_update.wt.idx differ diff --git a/tests/data/expected/space_index/process_remove_at.wt.idx b/tests/data/expected/space_index/process_remove_at.wt.idx index 0573a1dd..2c81cac6 100644 Binary files a/tests/data/expected/space_index/process_remove_at.wt.idx and b/tests/data/expected/space_index/process_remove_at.wt.idx differ diff --git a/tests/data/expected/space_index/process_remove_at_node_id.wt.idx b/tests/data/expected/space_index/process_remove_at_node_id.wt.idx index cda5fe8a..8ca971de 100644 Binary files a/tests/data/expected/space_index/process_remove_at_node_id.wt.idx and b/tests/data/expected/space_index/process_remove_at_node_id.wt.idx differ diff --git a/tests/data/expected/space_index/process_remove_node.wt.idx b/tests/data/expected/space_index/process_remove_node.wt.idx index 6114c013..4a178159 100644 Binary files a/tests/data/expected/space_index/process_remove_node.wt.idx and b/tests/data/expected/space_index/process_remove_node.wt.idx differ diff --git a/tests/data/expected/space_index/process_split_node.wt.idx b/tests/data/expected/space_index/process_split_node.wt.idx index f6787ec1..cfc83485 100644 Binary files a/tests/data/expected/space_index/process_split_node.wt.idx and b/tests/data/expected/space_index/process_split_node.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/indexset/process_create_node.wt.idx b/tests/data/expected/space_index_unsized/indexset/process_create_node.wt.idx index 4b55de09..d39710d9 100644 Binary files a/tests/data/expected/space_index_unsized/indexset/process_create_node.wt.idx and b/tests/data/expected/space_index_unsized/indexset/process_create_node.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/indexset/process_insert_at.wt.idx b/tests/data/expected/space_index_unsized/indexset/process_insert_at.wt.idx index 54b6055b..f2ce86f8 100644 Binary files a/tests/data/expected/space_index_unsized/indexset/process_insert_at.wt.idx and b/tests/data/expected/space_index_unsized/indexset/process_insert_at.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx b/tests/data/expected/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx index caa290c2..07a32e23 100644 Binary files a/tests/data/expected/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx and b/tests/data/expected/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_create_node.wt.idx b/tests/data/expected/space_index_unsized/process_create_node.wt.idx index 4601d2a9..67f1dcdb 100644 Binary files a/tests/data/expected/space_index_unsized/process_create_node.wt.idx and b/tests/data/expected/space_index_unsized/process_create_node.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_create_node_after_remove.wt.idx b/tests/data/expected/space_index_unsized/process_create_node_after_remove.wt.idx index 92928f9c..077bfa42 100644 Binary files a/tests/data/expected/space_index_unsized/process_create_node_after_remove.wt.idx and b/tests/data/expected/space_index_unsized/process_create_node_after_remove.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_create_second_node.wt.idx b/tests/data/expected/space_index_unsized/process_create_second_node.wt.idx index 06bdd5d7..71990622 100644 Binary files a/tests/data/expected/space_index_unsized/process_create_second_node.wt.idx and b/tests/data/expected/space_index_unsized/process_create_second_node.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_insert_at.wt.idx b/tests/data/expected/space_index_unsized/process_insert_at.wt.idx index e6942d81..77f709cc 100644 Binary files a/tests/data/expected/space_index_unsized/process_insert_at.wt.idx and b/tests/data/expected/space_index_unsized/process_insert_at.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_insert_at_big_amount.wt.idx b/tests/data/expected/space_index_unsized/process_insert_at_big_amount.wt.idx index 48cd3566..c8d4efe9 100644 Binary files a/tests/data/expected/space_index_unsized/process_insert_at_big_amount.wt.idx and b/tests/data/expected/space_index_unsized/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_insert_at_removed_place.wt.idx b/tests/data/expected/space_index_unsized/process_insert_at_removed_place.wt.idx index 2edd226e..6fa8d58b 100644 Binary files a/tests/data/expected/space_index_unsized/process_insert_at_removed_place.wt.idx and b/tests/data/expected/space_index_unsized/process_insert_at_removed_place.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_insert_at_with_node_id_update.wt.idx b/tests/data/expected/space_index_unsized/process_insert_at_with_node_id_update.wt.idx index 809410fb..bdf2a54e 100644 Binary files a/tests/data/expected/space_index_unsized/process_insert_at_with_node_id_update.wt.idx and b/tests/data/expected/space_index_unsized/process_insert_at_with_node_id_update.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_remove_at.wt.idx b/tests/data/expected/space_index_unsized/process_remove_at.wt.idx index ca1fd08f..912b7b28 100644 Binary files a/tests/data/expected/space_index_unsized/process_remove_at.wt.idx and b/tests/data/expected/space_index_unsized/process_remove_at.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_remove_at_node_id.wt.idx b/tests/data/expected/space_index_unsized/process_remove_at_node_id.wt.idx index bd772e9a..8ddb966c 100644 Binary files a/tests/data/expected/space_index_unsized/process_remove_at_node_id.wt.idx and b/tests/data/expected/space_index_unsized/process_remove_at_node_id.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_remove_node.wt.idx b/tests/data/expected/space_index_unsized/process_remove_node.wt.idx index c2cbcadf..15c10160 100644 Binary files a/tests/data/expected/space_index_unsized/process_remove_node.wt.idx and b/tests/data/expected/space_index_unsized/process_remove_node.wt.idx differ diff --git a/tests/data/expected/space_index_unsized/process_split_node.wt.idx b/tests/data/expected/space_index_unsized/process_split_node.wt.idx index caf696f8..7b62df18 100644 Binary files a/tests/data/expected/space_index_unsized/process_split_node.wt.idx and b/tests/data/expected/space_index_unsized/process_split_node.wt.idx differ diff --git a/tests/data/persist_index_table_of_contents.wt.idx b/tests/data/persist_index_table_of_contents.wt.idx index c8cf8224..9235e590 100644 Binary files a/tests/data/persist_index_table_of_contents.wt.idx and b/tests/data/persist_index_table_of_contents.wt.idx differ diff --git a/tests/data/space_index/indexset/process_create_node.wt.idx b/tests/data/space_index/indexset/process_create_node.wt.idx index 965fd837..4fc48140 100644 Binary files a/tests/data/space_index/indexset/process_create_node.wt.idx and b/tests/data/space_index/indexset/process_create_node.wt.idx differ diff --git a/tests/data/space_index/indexset/process_insert_at.wt.idx b/tests/data/space_index/indexset/process_insert_at.wt.idx index 8f038df6..998027e3 100644 Binary files a/tests/data/space_index/indexset/process_insert_at.wt.idx and b/tests/data/space_index/indexset/process_insert_at.wt.idx differ diff --git a/tests/data/space_index/indexset/process_insert_at_big_amount.wt.idx b/tests/data/space_index/indexset/process_insert_at_big_amount.wt.idx index a00aee38..681ca432 100644 Binary files a/tests/data/space_index/indexset/process_insert_at_big_amount.wt.idx and b/tests/data/space_index/indexset/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/space_index/process_create_node.wt.idx b/tests/data/space_index/process_create_node.wt.idx index 0573a1dd..2c81cac6 100644 Binary files a/tests/data/space_index/process_create_node.wt.idx and b/tests/data/space_index/process_create_node.wt.idx differ diff --git a/tests/data/space_index/process_create_node_after_remove.wt.idx b/tests/data/space_index/process_create_node_after_remove.wt.idx index 84926678..8972871f 100644 Binary files a/tests/data/space_index/process_create_node_after_remove.wt.idx and b/tests/data/space_index/process_create_node_after_remove.wt.idx differ diff --git a/tests/data/space_index/process_create_second_node.wt.idx b/tests/data/space_index/process_create_second_node.wt.idx index 6f2d11cd..e5adb74d 100644 Binary files a/tests/data/space_index/process_create_second_node.wt.idx and b/tests/data/space_index/process_create_second_node.wt.idx differ diff --git a/tests/data/space_index/process_insert_at.wt.idx b/tests/data/space_index/process_insert_at.wt.idx index 8f038df6..998027e3 100644 Binary files a/tests/data/space_index/process_insert_at.wt.idx and b/tests/data/space_index/process_insert_at.wt.idx differ diff --git a/tests/data/space_index/process_insert_at_big_amount.wt.idx b/tests/data/space_index/process_insert_at_big_amount.wt.idx index 24b0aba0..b7d05af1 100644 Binary files a/tests/data/space_index/process_insert_at_big_amount.wt.idx and b/tests/data/space_index/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/space_index/process_insert_at_removed_place.wt.idx b/tests/data/space_index/process_insert_at_removed_place.wt.idx index 1c336b77..c90e8f91 100644 Binary files a/tests/data/space_index/process_insert_at_removed_place.wt.idx and b/tests/data/space_index/process_insert_at_removed_place.wt.idx differ diff --git a/tests/data/space_index/process_insert_at_with_node_id_update.wt.idx b/tests/data/space_index/process_insert_at_with_node_id_update.wt.idx index 47977220..6f3dad2d 100644 Binary files a/tests/data/space_index/process_insert_at_with_node_id_update.wt.idx and b/tests/data/space_index/process_insert_at_with_node_id_update.wt.idx differ diff --git a/tests/data/space_index/process_remove_at.wt.idx b/tests/data/space_index/process_remove_at.wt.idx index 0573a1dd..2c81cac6 100644 Binary files a/tests/data/space_index/process_remove_at.wt.idx and b/tests/data/space_index/process_remove_at.wt.idx differ diff --git a/tests/data/space_index/process_remove_at_node_id.wt.idx b/tests/data/space_index/process_remove_at_node_id.wt.idx index cda5fe8a..8ca971de 100644 Binary files a/tests/data/space_index/process_remove_at_node_id.wt.idx and b/tests/data/space_index/process_remove_at_node_id.wt.idx differ diff --git a/tests/data/space_index/process_remove_node.wt.idx b/tests/data/space_index/process_remove_node.wt.idx index 6114c013..4a178159 100644 Binary files a/tests/data/space_index/process_remove_node.wt.idx and b/tests/data/space_index/process_remove_node.wt.idx differ diff --git a/tests/data/space_index/process_split_node.wt.idx b/tests/data/space_index/process_split_node.wt.idx index f6787ec1..cfc83485 100644 Binary files a/tests/data/space_index/process_split_node.wt.idx and b/tests/data/space_index/process_split_node.wt.idx differ diff --git a/tests/data/space_index_unsized/indexset/process_create_node.wt.idx b/tests/data/space_index_unsized/indexset/process_create_node.wt.idx index 4b55de09..d39710d9 100644 Binary files a/tests/data/space_index_unsized/indexset/process_create_node.wt.idx and b/tests/data/space_index_unsized/indexset/process_create_node.wt.idx differ diff --git a/tests/data/space_index_unsized/indexset/process_insert_at.wt.idx b/tests/data/space_index_unsized/indexset/process_insert_at.wt.idx index 54b6055b..f2ce86f8 100644 Binary files a/tests/data/space_index_unsized/indexset/process_insert_at.wt.idx and b/tests/data/space_index_unsized/indexset/process_insert_at.wt.idx differ diff --git a/tests/data/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx b/tests/data/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx index caa290c2..07a32e23 100644 Binary files a/tests/data/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx and b/tests/data/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/space_index_unsized/process_create_node.wt.idx b/tests/data/space_index_unsized/process_create_node.wt.idx index 4601d2a9..67f1dcdb 100644 Binary files a/tests/data/space_index_unsized/process_create_node.wt.idx and b/tests/data/space_index_unsized/process_create_node.wt.idx differ diff --git a/tests/data/space_index_unsized/process_create_node_after_remove.wt.idx b/tests/data/space_index_unsized/process_create_node_after_remove.wt.idx index 92928f9c..077bfa42 100644 Binary files a/tests/data/space_index_unsized/process_create_node_after_remove.wt.idx and b/tests/data/space_index_unsized/process_create_node_after_remove.wt.idx differ diff --git a/tests/data/space_index_unsized/process_create_second_node.wt.idx b/tests/data/space_index_unsized/process_create_second_node.wt.idx index 06bdd5d7..71990622 100644 Binary files a/tests/data/space_index_unsized/process_create_second_node.wt.idx and b/tests/data/space_index_unsized/process_create_second_node.wt.idx differ diff --git a/tests/data/space_index_unsized/process_insert_at.wt.idx b/tests/data/space_index_unsized/process_insert_at.wt.idx index e6942d81..77f709cc 100644 Binary files a/tests/data/space_index_unsized/process_insert_at.wt.idx and b/tests/data/space_index_unsized/process_insert_at.wt.idx differ diff --git a/tests/data/space_index_unsized/process_insert_at_big_amount.wt.idx b/tests/data/space_index_unsized/process_insert_at_big_amount.wt.idx index 48cd3566..c8d4efe9 100644 Binary files a/tests/data/space_index_unsized/process_insert_at_big_amount.wt.idx and b/tests/data/space_index_unsized/process_insert_at_big_amount.wt.idx differ diff --git a/tests/data/space_index_unsized/process_insert_at_removed_place.wt.idx b/tests/data/space_index_unsized/process_insert_at_removed_place.wt.idx index 2edd226e..6fa8d58b 100644 Binary files a/tests/data/space_index_unsized/process_insert_at_removed_place.wt.idx and b/tests/data/space_index_unsized/process_insert_at_removed_place.wt.idx differ diff --git a/tests/data/space_index_unsized/process_insert_at_with_node_id_update.wt.idx b/tests/data/space_index_unsized/process_insert_at_with_node_id_update.wt.idx index 809410fb..bdf2a54e 100644 Binary files a/tests/data/space_index_unsized/process_insert_at_with_node_id_update.wt.idx and b/tests/data/space_index_unsized/process_insert_at_with_node_id_update.wt.idx differ diff --git a/tests/data/space_index_unsized/process_remove_at.wt.idx b/tests/data/space_index_unsized/process_remove_at.wt.idx index ca1fd08f..912b7b28 100644 Binary files a/tests/data/space_index_unsized/process_remove_at.wt.idx and b/tests/data/space_index_unsized/process_remove_at.wt.idx differ diff --git a/tests/data/space_index_unsized/process_remove_at_node_id.wt.idx b/tests/data/space_index_unsized/process_remove_at_node_id.wt.idx index bd772e9a..8ddb966c 100644 Binary files a/tests/data/space_index_unsized/process_remove_at_node_id.wt.idx and b/tests/data/space_index_unsized/process_remove_at_node_id.wt.idx differ diff --git a/tests/data/space_index_unsized/process_remove_node.wt.idx b/tests/data/space_index_unsized/process_remove_node.wt.idx index c2cbcadf..15c10160 100644 Binary files a/tests/data/space_index_unsized/process_remove_node.wt.idx and b/tests/data/space_index_unsized/process_remove_node.wt.idx differ diff --git a/tests/data/space_index_unsized/process_split_node.wt.idx b/tests/data/space_index_unsized/process_split_node.wt.idx index caf696f8..7b62df18 100644 Binary files a/tests/data/space_index_unsized/process_split_node.wt.idx and b/tests/data/space_index_unsized/process_split_node.wt.idx differ diff --git a/tests/migration/mod.rs b/tests/migration/mod.rs new file mode 100644 index 00000000..dadfd013 --- /dev/null +++ b/tests/migration/mod.rs @@ -0,0 +1,270 @@ +use crate::remove_dir_if_exists; +use worktable::migration::Migration; +use worktable::prelude::*; +use worktable_codegen::{migration_engine, worktable}; + +mod v1 { + use super::*; + + worktable!( + name: User, + version: 1, + persist: true, + columns: { + id: u64 primary_key autoincrement, + name: String, + }, + ); +} + +mod v2 { + use super::*; + + worktable!( + name: User, + version: 2, + persist: true, + columns: { + id: u64 primary_key autoincrement, + name: String, + email: String, + }, + ); +} + +worktable!( + name: User, + version: 3, + persist: true, + columns: { + id: u64 primary_key autoincrement, + name: String, + email: String, + created_at: u64, + }, + indexes: { + name_idx: name, + }, +); + +#[derive(Default)] +pub struct UserMigrationContext { + pub default_email: String, + pub default_created_at: u64, +} + +pub struct UserMigration; + +impl Migration for UserMigration { + type Context = UserMigrationContext; + + fn migrate(row: v1::UserRow, ctx: &Self::Context) -> v2::UserRow { + v2::UserRow { + id: row.id, + name: row.name, + email: ctx.default_email.clone(), + } + } +} + +impl Migration for UserMigration { + type Context = UserMigrationContext; + + fn migrate(row: v2::UserRow, ctx: &Self::Context) -> UserRow { + UserRow { + id: row.id.into(), + name: row.name, + email: row.email, + created_at: ctx.default_created_at, + } + } +} + +migration_engine!( + migration: UserMigration, + current: UserWorkTable, + ctx: UserMigrationContext, + version_tables: { + 1 => v1::UserWorkTable, + 2 => v2::UserWorkTable, + }, +); + +/// v1 → current: create v1 data, migrate to current, verify data +#[test] +fn test_migrate_v1_to_current() { + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_io() + .enable_time() + .build() + .unwrap(); + + runtime.block_on(async { + let src = "tests/data/migration/v1_to_current"; + let dst = "tests/data/migration/v1_to_current/dst"; + remove_dir_if_exists(src.to_string()).await; + + // Write v1 data + { + let config = DiskConfig::new_with_table_name( + src, + v1::UserWorkTable::name_snake_case(), + v1::UserWorkTable::version(), + ); + let engine = v1::UserPersistenceEngine::new(config).await.unwrap(); + let table = v1::UserWorkTable::load(engine).await.unwrap(); + + table + .insert(v1::UserRow { + id: table.get_next_pk().into(), + name: "Alice".to_string(), + }) + .unwrap(); + table + .insert(v1::UserRow { + id: table.get_next_pk().into(), + name: "Bob".to_string(), + }) + .unwrap(); + + table.wait_for_ops().await; + } + + // Verify source data is readable + { + let config = DiskConfig::new_with_table_name( + src, + v1::UserWorkTable::name_snake_case(), + v1::UserWorkTable::version(), + ); + let engine = ReadOnlyPersistenceEngine::create(config).await.unwrap(); + let table = v1::UserWorkTable::load(engine).await.unwrap(); + let count = table.count(); + assert_eq!(count, 2, "v1 table should have 2 rows, got {}", count); + } + + let ctx = UserMigrationContext { + default_email: "unknown@example.com".to_string(), + default_created_at: chrono::Utc::now().timestamp() as u64, + }; + + let report = UserMigrationEngine::migrate(src, dst, &ctx).await.unwrap(); + assert_eq!(report.source_version, v1::UserWorkTable::version()); + + { + let config = DiskConfig::new_with_table_name( + dst, + UserWorkTable::name_snake_case(), + UserWorkTable::version(), + ); + let engine = UserPersistenceEngine::new(config).await.unwrap(); + let table = UserWorkTable::load(engine).await.unwrap(); + + let rows = table.select_all().execute().unwrap(); + assert_eq!(rows.len(), 2); + + for row in &rows { + assert_eq!(row.email, ctx.default_email); + assert_eq!(row.created_at, ctx.default_created_at); + } + + let names: Vec<_> = rows.iter().map(|r| r.name.clone()).collect(); + assert!(names.contains(&"Alice".to_string())); + assert!(names.contains(&"Bob".to_string())); + } + }); +} + +#[test] +fn test_migrate_v2_to_current() { + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_io() + .enable_time() + .build() + .unwrap(); + + runtime.block_on(async { + let src = "tests/data/migration/v2_to_current"; + let dst = "tests/data/migration/v2_to_current/dst"; + remove_dir_if_exists(src.to_string()).await; + + { + let config = DiskConfig::new_with_table_name( + src, + v2::UserWorkTable::name_snake_case(), + v2::UserWorkTable::version(), + ); + let engine = v2::UserPersistenceEngine::new(config).await.unwrap(); + let table = v2::UserWorkTable::load(engine).await.unwrap(); + + table + .insert(v2::UserRow { + id: table.get_next_pk().into(), + name: "Charlie".to_string(), + email: "charlie@test.com".to_string(), + }) + .unwrap(); + table + .insert(v2::UserRow { + id: table.get_next_pk().into(), + name: "Diana".to_string(), + email: "diana@test.com".to_string(), + }) + .unwrap(); + + table.wait_for_ops().await; + } + + let ctx = UserMigrationContext { + default_email: "unknown@example.com".to_string(), + default_created_at: chrono::Utc::now().timestamp() as u64, + }; + + let report = UserMigrationEngine::migrate(src, dst, &ctx).await.unwrap(); + assert_eq!(report.source_version, v2::UserWorkTable::version()); + + { + let config = DiskConfig::new_with_table_name( + dst, + UserWorkTable::name_snake_case(), + UserWorkTable::version(), + ); + let engine = UserPersistenceEngine::new(config).await.unwrap(); + let table = UserWorkTable::load(engine).await.unwrap(); + + let rows = table.select_all().execute().unwrap(); + assert_eq!(rows.len(), 2); + + let charlie = rows.iter().find(|r| r.name == "Charlie").unwrap(); + assert_eq!(charlie.email, "charlie@test.com"); + assert_eq!(charlie.created_at, ctx.default_created_at); + + let diana = rows.iter().find(|r| r.name == "Diana").unwrap(); + assert_eq!(diana.email, "diana@test.com"); + assert_eq!(diana.created_at, ctx.default_created_at); + } + }); +} + +/// Nonexistent source returns an error +#[test] +fn test_nonexistent_source_error() { + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_io() + .enable_time() + .build() + .unwrap(); + + runtime.block_on(async { + let dst = "tests/data/migration/nonexistent_new"; + remove_dir_if_exists(dst.to_string()).await; + + let ctx = UserMigrationContext::default(); + let result = + UserMigrationEngine::migrate("tests/data/migration/does_not_exist", dst, &ctx).await; + assert!(result.is_err()); + }); +} diff --git a/tests/mod.rs b/tests/mod.rs index f305944f..86ec0ed7 100644 --- a/tests/mod.rs +++ b/tests/mod.rs @@ -1,8 +1,10 @@ use std::io::{BufReader, Read}; use std::path::Path; +mod migration; mod persistence; mod worktable; +mod worktable_version; pub fn check_if_files_are_same(got: String, expected: String) -> bool { let got = std::fs::File::open(got).unwrap(); diff --git a/tests/non-existent/test_persist/.wt.data b/tests/non-existent/test_persist/.wt.data index 0eb323ce..e6d55908 100644 Binary files a/tests/non-existent/test_persist/.wt.data and b/tests/non-existent/test_persist/.wt.data differ diff --git a/tests/non-existent/test_persist/another_idx.wt.idx b/tests/non-existent/test_persist/another_idx.wt.idx index aef1dbe6..b6b43e41 100644 Binary files a/tests/non-existent/test_persist/another_idx.wt.idx and b/tests/non-existent/test_persist/another_idx.wt.idx differ diff --git a/tests/non-existent/test_persist/primary.wt.idx b/tests/non-existent/test_persist/primary.wt.idx index aef1dbe6..b6b43e41 100644 Binary files a/tests/non-existent/test_persist/primary.wt.idx and b/tests/non-existent/test_persist/primary.wt.idx differ diff --git a/tests/persistence/concurrent/mod.rs b/tests/persistence/concurrent/mod.rs index 5d8af804..8be816d3 100644 --- a/tests/persistence/concurrent/mod.rs +++ b/tests/persistence/concurrent/mod.rs @@ -58,6 +58,7 @@ fn test_concurrent() { let config = DiskConfig::new_with_table_name( "tests/data/concurrent/test", TestConcurrentWorkTable::name_snake_case(), + TestConcurrentWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/failure/insert.rs b/tests/persistence/failure/insert.rs index b8bbe384..3ea1e47d 100644 --- a/tests/persistence/failure/insert.rs +++ b/tests/persistence/failure/insert.rs @@ -6,6 +6,7 @@ fn test_insert_two_indexes_first_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_two_first", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -102,6 +103,7 @@ fn test_insert_two_indexes_second_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_two_second", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -200,6 +202,7 @@ fn test_insert_three_indexes_first_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_three_first", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -294,6 +297,7 @@ fn test_insert_three_indexes_middle_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_three_middle", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -391,6 +395,7 @@ fn test_insert_three_indexes_last_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_three_last", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -488,6 +493,7 @@ fn test_insert_primary_duplicate() { let config = DiskConfig::new_with_table_name( "tests/data/failure/insert_primary_dup", PrimaryOnlyWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); diff --git a/tests/persistence/failure/reinsert.rs b/tests/persistence/failure/reinsert.rs index 7de7ab18..7326feff 100644 --- a/tests/persistence/failure/reinsert.rs +++ b/tests/persistence/failure/reinsert.rs @@ -6,6 +6,7 @@ fn test_reinsert_pk_mismatch() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_pk_mismatch", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -91,6 +92,7 @@ fn test_reinsert_two_indexes_first_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_two_first", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -193,6 +195,7 @@ fn test_reinsert_two_indexes_second_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_two_second", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -294,6 +297,7 @@ fn test_reinsert_three_indexes_first_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_three_first", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -396,6 +400,7 @@ fn test_reinsert_three_indexes_middle_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_three_middle", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -501,6 +506,7 @@ fn test_reinsert_three_indexes_last_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/reinsert_three_last", ThreeUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); diff --git a/tests/persistence/failure/update.rs b/tests/persistence/failure/update.rs index c5d50495..f7d1a516 100644 --- a/tests/persistence/failure/update.rs +++ b/tests/persistence/failure/update.rs @@ -2,11 +2,13 @@ use super::*; use crate::remove_dir_if_exists; + #[test] fn test_update_unique_secondary_conflict() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_unique_conflict", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -99,6 +101,7 @@ fn test_update_pk_based_success() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_pk_success", TwoUniqueIdxWorkTable::name_snake_case(), + TwoUniqueIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -177,4 +180,4 @@ fn test_update_pk_based_success() { table.wait_for_ops().await; } }); -} \ No newline at end of file +} diff --git a/tests/persistence/failure/update_non_unique.rs b/tests/persistence/failure/update_non_unique.rs index 1c9c05c1..95fccfa6 100644 --- a/tests/persistence/failure/update_non_unique.rs +++ b/tests/persistence/failure/update_non_unique.rs @@ -2,11 +2,13 @@ use super::*; use crate::remove_dir_if_exists; + #[test] fn test_update_non_unique_middle_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_non_unique_middle", MixedIdxWorkTable::name_snake_case(), + MixedIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -108,6 +110,7 @@ fn test_update_non_unique_last_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_non_unique_last", MixedIdxWorkTable::name_snake_case(), + MixedIdxWorkTable::version(), ); let runtime = get_runtime(); @@ -209,4 +212,4 @@ fn test_update_non_unique_last_fail() { table.wait_for_ops().await; } }); -} \ No newline at end of file +} diff --git a/tests/persistence/failure/update_unsized.rs b/tests/persistence/failure/update_unsized.rs index 8e0b6a1e..12d9e269 100644 --- a/tests/persistence/failure/update_unsized.rs +++ b/tests/persistence/failure/update_unsized.rs @@ -2,11 +2,13 @@ use super::*; use crate::remove_dir_if_exists; + #[test] fn test_update_unsized_same_size() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_unsized_same_size", NonUniqueUnsizedWorkTable::name_snake_case(), + NonUniqueUnsizedWorkTable::version(), ); let runtime = get_runtime(); @@ -95,7 +97,9 @@ fn test_update_unsized_same_size() { // Phase 3: Verify { - let engine = NonUniqueUnsizedPersistenceEngine::new(config).await.unwrap(); + let engine = NonUniqueUnsizedPersistenceEngine::new(config) + .await + .unwrap(); let table = NonUniqueUnsizedWorkTable::load(engine).await.unwrap(); assert!(table.select(row1_pk).is_some()); @@ -111,6 +115,7 @@ fn test_update_unsized_larger_all_success() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_unsized_larger_success", NonUniqueUnsizedWorkTable::name_snake_case(), + NonUniqueUnsizedWorkTable::version(), ); let runtime = get_runtime(); @@ -183,7 +188,9 @@ fn test_update_unsized_larger_all_success() { // Phase 3: Verify { - let engine = NonUniqueUnsizedPersistenceEngine::new(config).await.unwrap(); + let engine = NonUniqueUnsizedPersistenceEngine::new(config) + .await + .unwrap(); let table = NonUniqueUnsizedWorkTable::load(engine).await.unwrap(); let row = table.select(row_pk).unwrap(); @@ -198,6 +205,7 @@ fn test_update_unsized_larger_middle_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_unsized_larger_middle", NonUniqueUnsizedWorkTable::name_snake_case(), + NonUniqueUnsizedWorkTable::version(), ); let runtime = get_runtime(); @@ -295,7 +303,9 @@ fn test_update_unsized_larger_middle_fail() { // Phase 3: Verify { - let engine = NonUniqueUnsizedPersistenceEngine::new(config).await.unwrap(); + let engine = NonUniqueUnsizedPersistenceEngine::new(config) + .await + .unwrap(); let table = NonUniqueUnsizedWorkTable::load(engine).await.unwrap(); let row2 = table.select(row2_pk).unwrap(); @@ -318,6 +328,7 @@ fn test_update_unsized_larger_last_fail() { let config = DiskConfig::new_with_table_name( "tests/data/failure/update_unsized_larger_last", NonUniqueUnsizedWorkTable::name_snake_case(), + NonUniqueUnsizedWorkTable::version(), ); let runtime = get_runtime(); @@ -399,7 +410,9 @@ fn test_update_unsized_larger_last_fail() { // Phase 3: Verify { - let engine = NonUniqueUnsizedPersistenceEngine::new(config).await.unwrap(); + let engine = NonUniqueUnsizedPersistenceEngine::new(config) + .await + .unwrap(); let table = NonUniqueUnsizedWorkTable::load(engine).await.unwrap(); // Row2: unchanged (failed to update) @@ -414,4 +427,4 @@ fn test_update_unsized_larger_last_fail() { table.wait_for_ops().await; } }); -} \ No newline at end of file +} diff --git a/tests/persistence/mod.rs b/tests/persistence/mod.rs index 9b455cd6..2c0d8e7e 100644 --- a/tests/persistence/mod.rs +++ b/tests/persistence/mod.rs @@ -52,7 +52,7 @@ worktable!( pub async fn get_empty_test_wt() -> TestPersistWorkTable { let config = - DiskConfig::new_with_table_name("tests/data", TestPersistWorkTable::name_snake_case()); + DiskConfig::new_with_table_name("tests/data", TestPersistWorkTable::name_snake_case(), TestPersistWorkTable::version()); let engine = TestPersistPersistenceEngine::new(config).await.unwrap(); TestPersistWorkTable::new(engine).await.unwrap() } diff --git a/tests/persistence/read.rs b/tests/persistence/read.rs index 528f41ea..651f873c 100644 --- a/tests/persistence/read.rs +++ b/tests/persistence/read.rs @@ -4,8 +4,8 @@ use worktable::prelude::*; // TODO: Fix naming. use crate::persistence::{ - TEST_PERSIST_INNER_SIZE, TEST_PERSIST_PAGE_SIZE, TestPersistPersistenceEngine, - TestPersistWorkTable, get_empty_test_wt, get_test_wt, + get_empty_test_wt, get_test_wt, TestPersistPersistenceEngine, + TestPersistWorkTable, TEST_PERSIST_INNER_SIZE, TEST_PERSIST_PAGE_SIZE, }; use crate::remove_dir_if_exists; @@ -130,6 +130,7 @@ async fn test_space_parse() { let config = DiskConfig::new_with_table_name( "tests/data/expected", TestPersistWorkTable::name_snake_case(), + TestPersistWorkTable::version(), ); let engine = TestPersistPersistenceEngine::new(config).await.unwrap(); let table = TestPersistWorkTable::load(engine).await.unwrap(); @@ -148,6 +149,7 @@ async fn test_space_parse_no_file() { let config = DiskConfig::new_with_table_name( "tests/non-existent", TestPersistWorkTable::name_snake_case(), + TestPersistWorkTable::version(), ); let engine = TestPersistPersistenceEngine::new(config).await.unwrap(); let table = TestPersistWorkTable::load(engine).await.unwrap(); diff --git a/tests/persistence/s3/mod.rs b/tests/persistence/s3/mod.rs index 8c54b17f..45acfa7e 100644 --- a/tests/persistence/s3/mod.rs +++ b/tests/persistence/s3/mod.rs @@ -31,6 +31,7 @@ fn test_s3_engine_compiles() { disk: DiskConfig::new_with_table_name( "tests/data/s3/compile_test", TestS3WorkTable::name_snake_case(), + TestS3WorkTable::version(), ), s3: S3Config { bucket_name: "test".to_string(), diff --git a/tests/persistence/space_index/indexset_compatibility.rs b/tests/persistence/space_index/indexset_compatibility.rs index dcca0b90..c7e373ad 100644 --- a/tests/persistence/space_index/indexset_compatibility.rs +++ b/tests/persistence/space_index/indexset_compatibility.rs @@ -1,7 +1,7 @@ mod sized { use std::fs::copy; - use data_bucket::{INNER_PAGE_SIZE, Link}; + use data_bucket::{Link, INNER_PAGE_SIZE}; use indexset::concurrent::map::BTreeMap; use worktable::prelude::{SpaceIndex, SpaceIndexOps}; @@ -17,6 +17,7 @@ mod sized { let mut space_index = SpaceIndex::::new( "tests/data/space_index/indexset/process_create_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -54,6 +55,7 @@ mod sized { let mut space_index = SpaceIndex::::new( "tests/data/space_index/indexset/process_insert_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -91,6 +93,7 @@ mod sized { let mut space_index = SpaceIndex::::new( "tests/data/space_index/indexset/process_insert_at_big_amount.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -134,11 +137,11 @@ mod unsized_ { use std::fs::copy; use crate::{check_if_files_are_same, remove_file_if_exists}; - use data_bucket::{INNER_PAGE_SIZE, Link}; + use data_bucket::{Link, INNER_PAGE_SIZE}; use indexset::concurrent::map::BTreeMap; use indexset::core::pair::Pair; - use worktable::UnsizedNode; use worktable::prelude::{SpaceIndexOps, SpaceIndexUnsized}; + use worktable::UnsizedNode; #[tokio::test] async fn test_indexset_node_creation() { @@ -150,6 +153,7 @@ mod unsized_ { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/indexset/process_create_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -188,6 +192,7 @@ mod unsized_ { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/indexset/process_insert_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -226,6 +231,7 @@ mod unsized_ { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/indexset/process_insert_at_big_amount.wt.idx", 0.into(), + 1, ) .await .unwrap(); diff --git a/tests/persistence/space_index/unsized_write.rs b/tests/persistence/space_index/unsized_write.rs index 0c7acd8c..904a757b 100644 --- a/tests/persistence/space_index/unsized_write.rs +++ b/tests/persistence/space_index/unsized_write.rs @@ -1,6 +1,6 @@ use std::fs::copy; -use data_bucket::{INNER_PAGE_SIZE, Link}; +use data_bucket::{Link, INNER_PAGE_SIZE}; use indexset::cdc::change::ChangeEvent; use indexset::core::pair::Pair; use worktable::prelude::{SpaceIndexOps, SpaceIndexUnsized}; @@ -20,6 +20,7 @@ mod run_first { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_create_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -60,6 +61,7 @@ mod run_first { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_create_second_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -100,6 +102,7 @@ mod run_first { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_remove_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -140,6 +143,7 @@ mod run_first { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_insert_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -189,6 +193,7 @@ mod run_first { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_insert_at_big_amount.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -264,6 +269,7 @@ async fn test_space_index_process_remove_at() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_remove_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -313,6 +319,7 @@ async fn test_space_index_process_remove_at_node_id() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_remove_at_node_id.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -362,6 +369,7 @@ async fn test_space_index_process_insert_at_with_node_id_update() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_insert_at_with_node_id_update.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -412,6 +420,7 @@ async fn test_space_index_process_insert_at_removed_place() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_insert_at_removed_place.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -508,6 +517,7 @@ async fn test_space_index_process_create_node_after_remove() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_create_node_after_remove.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -547,6 +557,7 @@ async fn test_space_index_process_split_node() { let mut space_index = SpaceIndexUnsized::::new( "tests/data/space_index_unsized/process_split_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); diff --git a/tests/persistence/space_index/write.rs b/tests/persistence/space_index/write.rs index 4fe2c92c..74a6d060 100644 --- a/tests/persistence/space_index/write.rs +++ b/tests/persistence/space_index/write.rs @@ -1,6 +1,6 @@ use std::fs::copy; -use data_bucket::{INNER_PAGE_SIZE, Link}; +use data_bucket::{Link, INNER_PAGE_SIZE}; use indexset::cdc::change::ChangeEvent; use indexset::core::pair::Pair; use worktable::prelude::{SpaceIndex, SpaceIndexOps}; @@ -18,6 +18,7 @@ mod run_first { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_create_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -58,6 +59,7 @@ mod run_first { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_create_second_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -95,6 +97,7 @@ mod run_first { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_insert_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -144,6 +147,7 @@ mod run_first { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_insert_at_big_amount.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -217,6 +221,7 @@ mod run_first { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_remove_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -258,6 +263,7 @@ async fn test_space_index_process_insert_at_with_node_id_update() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_insert_at_with_node_id_update.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -304,6 +310,7 @@ async fn test_space_index_process_remove_at() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_remove_at.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -351,6 +358,7 @@ async fn test_space_index_process_remove_at_node_id() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_remove_at_node_id.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -400,6 +408,7 @@ async fn test_space_index_process_insert_at_removed_place() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_insert_at_removed_place.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -495,6 +504,7 @@ async fn test_space_index_process_create_node_after_remove() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_create_node_after_remove.wt.idx", 0.into(), + 1, ) .await .unwrap(); @@ -532,6 +542,7 @@ async fn test_space_index_process_split_node() { let mut space_index = SpaceIndex::::new( "tests/data/space_index/process_split_node.wt.idx", 0.into(), + 1, ) .await .unwrap(); diff --git a/tests/persistence/sync/failure.rs b/tests/persistence/sync/failure.rs index 69e3e0fa..e59feac6 100644 --- a/tests/persistence/sync/failure.rs +++ b/tests/persistence/sync/failure.rs @@ -11,6 +11,7 @@ fn test_failed_update_by_pk_doesnt_corrupt_persistence() { let config = DiskConfig::new_with_table_name( "tests/data/sync/failure_update_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -87,6 +88,7 @@ fn test_failed_update_by_unique_index_doesnt_corrupt_persistence() { let config = DiskConfig::new_with_table_name( "tests/data/sync/failure_update_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -163,6 +165,7 @@ fn test_failed_delete_by_pk_doesnt_corrupt_persistence() { let config = DiskConfig::new_with_table_name( "tests/data/sync/failure_delete_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/failure_multi_index.rs b/tests/persistence/sync/failure_multi_index.rs index dea8f8fb..5b7cd421 100644 --- a/tests/persistence/sync/failure_multi_index.rs +++ b/tests/persistence/sync/failure_multi_index.rs @@ -34,6 +34,7 @@ fn test_multi_index_insert_failure_doesnt_corrupt_persistence() { let config = DiskConfig::new_with_table_name( "tests/data/sync/failure_multi_index_insert", MultiUniqueIdxWorkTable::name_snake_case(), + MultiUniqueIdxWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/many_strings.rs b/tests/persistence/sync/many_strings.rs index 97303f00..95127cb7 100644 --- a/tests/persistence/sync/many_strings.rs +++ b/tests/persistence/sync/many_strings.rs @@ -1,9 +1,8 @@ +use crate::remove_dir_if_exists; use worktable::prelude::PersistedWorkTable; use worktable::prelude::*; use worktable_codegen::worktable; -use crate::remove_dir_if_exists; - worktable! ( name: TestSync, persist: true, @@ -24,6 +23,7 @@ fn test_space_update_query_pk_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_and_other_sync/update_query_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -96,6 +96,7 @@ fn test_space_update_query_pk_many_times_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_and_other_sync/update_query_pk_many", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/mod.rs b/tests/persistence/sync/mod.rs index e12b9a80..5a9e5dc2 100644 --- a/tests/persistence/sync/mod.rs +++ b/tests/persistence/sync/mod.rs @@ -45,6 +45,7 @@ fn test_wait_for_ops_for_empty() { let config = DiskConfig::new_with_table_name( "tests/data/sync/wait", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -71,6 +72,7 @@ fn test_space_insert_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/insert", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -112,6 +114,7 @@ fn test_space_insert_many_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/insert_many", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -163,6 +166,7 @@ fn test_space_update_full_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/update_full", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -214,6 +218,7 @@ fn test_space_update_query_pk_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/update_query_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -260,6 +265,7 @@ fn test_space_update_query_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/update_query_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -306,6 +312,7 @@ fn test_space_update_query_non_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/update_query_non_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -352,6 +359,7 @@ fn test_space_delete_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/delete", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -394,6 +402,7 @@ fn test_space_delete_query_sync() { let config = DiskConfig::new_with_table_name( "tests/data/sync/delete_query", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/option.rs b/tests/persistence/sync/option.rs index a37d0f58..59046b79 100644 --- a/tests/persistence/sync/option.rs +++ b/tests/persistence/sync/option.rs @@ -31,6 +31,7 @@ fn test_option_insert_none_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/insert_none", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -74,6 +75,7 @@ fn test_option_insert_some_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/insert_some", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -117,6 +119,7 @@ fn test_option_update_full_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/update_full", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -170,6 +173,7 @@ fn test_option_update_by_id_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/update_by_id", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -218,6 +222,7 @@ fn test_option_update_none_to_some_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/none_to_some", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -266,6 +271,7 @@ fn test_option_update_some_to_none_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/some_to_none", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -314,6 +320,7 @@ fn test_option_update_by_another_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/update_by_another", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -362,6 +369,7 @@ fn test_option_update_by_exchange_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/update_by_exchange", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -410,6 +418,7 @@ fn test_option_multiple_rows_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/multiple_rows", TestOptionSyncWorkTable::name_snake_case(), + TestOptionSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -490,6 +499,7 @@ fn test_option_indexed_insert_none_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_insert_none", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -535,6 +545,7 @@ fn test_option_indexed_insert_some_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_insert_some", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -580,6 +591,7 @@ fn test_option_indexed_update_none_to_some_by_id_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_none_to_some", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -630,6 +642,7 @@ fn test_option_indexed_update_some_to_none_by_id_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_some_to_none", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -680,6 +693,7 @@ fn test_option_indexed_update_by_another_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_update_by_another", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -730,6 +744,7 @@ fn test_option_indexed_multiple_rows_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_multiple_rows", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -803,6 +818,7 @@ fn test_option_indexed_full_row_update_sync() { let config = DiskConfig::new_with_table_name( "tests/data/option_sync/indexed_full_update", TestOptionSyncIndexWorkTable::name_snake_case(), + TestOptionSyncIndexWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/string_primary_index.rs b/tests/persistence/sync/string_primary_index.rs index 04fa7456..c50b8895 100644 --- a/tests/persistence/sync/string_primary_index.rs +++ b/tests/persistence/sync/string_primary_index.rs @@ -34,6 +34,7 @@ fn test_space_insert_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/insert", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -74,6 +75,7 @@ fn test_space_insert_many_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/insert_many", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -123,6 +125,7 @@ fn test_space_update_full_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/update_full", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -173,6 +176,7 @@ fn test_space_update_query_pk_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/update_query_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -218,6 +222,7 @@ fn test_space_update_query_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/update_query_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -264,6 +269,7 @@ fn test_space_update_query_non_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/update_query_non_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -310,6 +316,7 @@ fn test_space_delete_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/delete", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -358,6 +365,7 @@ fn test_space_delete_query_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_primary_sync/delete_query", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/string_re_read.rs b/tests/persistence/sync/string_re_read.rs index 9e55fc4c..bffeb5f6 100644 --- a/tests/persistence/sync/string_re_read.rs +++ b/tests/persistence/sync/string_re_read.rs @@ -33,6 +33,7 @@ fn test_key() { let config = DiskConfig::new_with_table_name( "tests/data/key/key", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -102,6 +103,7 @@ fn test_key_delete_scenario() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete_scenario", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -221,6 +223,7 @@ fn test_key_delete() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -295,6 +298,7 @@ fn test_key_delete_all() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete_all", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -372,6 +376,7 @@ fn test_key_delete_all_and_insert() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete_all_and_insert", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -468,6 +473,7 @@ fn test_key_delete_by_unique() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete_unique", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -545,6 +551,7 @@ fn test_key_delete_by_non_unique() { let config = DiskConfig::new_with_table_name( "tests/data/key/delete_non_unique", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -621,6 +628,7 @@ fn test_toc_not_updated_when_index_value_same_but_link_changes() { let config = DiskConfig::new_with_table_name( "tests/data/key/toc_link_bug", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -714,7 +722,11 @@ fn test_toc_not_updated_when_index_value_same_but_link_changes() { assert_eq!(table.select_all().execute().unwrap().len(), 3); assert_eq!( - table.select_by_first("same_first".to_string()).execute().unwrap().len(), + table + .select_by_first("same_first".to_string()) + .execute() + .unwrap() + .len(), 3 ); } @@ -726,6 +738,7 @@ fn test_big_amount_reread() { let config = DiskConfig::new_with_table_name( "tests/data/key/big_amount", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -790,6 +803,7 @@ fn test_unique_index_same_value_link_changes() { let config = DiskConfig::new_with_table_name( "tests/data/key/unique_link_change", StringReReadWorkTable::name_snake_case(), + StringReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -870,8 +884,16 @@ fn test_unique_index_same_value_link_changes() { let table = StringReReadWorkTable::load(engine).await.unwrap(); assert_eq!(table.select_all().execute().unwrap().len(), 2); - assert!(table.select_by_second("unique_second".to_string()).is_some()); - assert!(table.select_by_second("unique_second_2".to_string()).is_some()); + assert!( + table + .select_by_second("unique_second".to_string()) + .is_some() + ); + assert!( + table + .select_by_second("unique_second_2".to_string()) + .is_some() + ); let row1 = table.select_by_second("unique_second".to_string()).unwrap(); assert_eq!(row1.first, "first_updated"); diff --git a/tests/persistence/sync/string_secondary_index.rs b/tests/persistence/sync/string_secondary_index.rs index c8f94a90..c3a142d7 100644 --- a/tests/persistence/sync/string_secondary_index.rs +++ b/tests/persistence/sync/string_secondary_index.rs @@ -34,6 +34,7 @@ fn test_space_insert_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/insert", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -75,6 +76,7 @@ fn test_space_insert_many_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/insert_many", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -126,6 +128,7 @@ fn test_space_update_full_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/update_full", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -184,6 +187,7 @@ fn test_space_update_query_pk_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/update_query_pk", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -238,6 +242,7 @@ fn test_space_update_query_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/update_query_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -288,6 +293,7 @@ fn test_space_update_query_non_unique_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/update_query_non_unique", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -345,6 +351,7 @@ fn test_space_delete_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/delete", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -387,6 +394,7 @@ fn test_space_delete_query_sync() { let config = DiskConfig::new_with_table_name( "tests/data/unsized_secondary_sync/delete_query", TestSyncWorkTable::name_snake_case(), + TestSyncWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/string_update_timeout.rs b/tests/persistence/sync/string_update_timeout.rs index 4af4b4ce..d54c6dd4 100644 --- a/tests/persistence/sync/string_update_timeout.rs +++ b/tests/persistence/sync/string_update_timeout.rs @@ -44,6 +44,7 @@ fn test_string_update_doesnt_block_persistence() { let config = DiskConfig::new_with_table_name( "tests/data/sync/string_update_timeout", UserWorkTable::name_snake_case(), + UserWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/persistence/sync/uuid_.rs b/tests/persistence/sync/uuid_.rs index 29334dcf..4cc85f1b 100644 --- a/tests/persistence/sync/uuid_.rs +++ b/tests/persistence/sync/uuid_.rs @@ -24,6 +24,7 @@ fn test_uuid() { let config = DiskConfig::new_with_table_name( "tests/data/uuid/reread", UuidReReadWorkTable::name_snake_case(), + UuidReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() @@ -87,6 +88,7 @@ fn test_big_amount_reread() { let config = DiskConfig::new_with_table_name( "tests/data/uuid/big_amount", UuidReReadWorkTable::name_snake_case(), + UuidReReadWorkTable::version(), ); let runtime = tokio::runtime::Builder::new_multi_thread() diff --git a/tests/worktable_version/basic.rs b/tests/worktable_version/basic.rs new file mode 100644 index 00000000..ac230f91 --- /dev/null +++ b/tests/worktable_version/basic.rs @@ -0,0 +1,91 @@ +use crate::remove_dir_if_exists; + +use worktable::prelude::*; +use worktable_codegen::{worktable, worktable_version}; + +worktable!( + name: User, + persist: true, + columns: { + id: u64 primary_key autoincrement, + name: String, + email: String, + }, + indexes: { + name_idx: name, + }, +); + +worktable_version!( + name: UserV1, + columns: { + id: u64 primary_key autoincrement, + name: String, + email: String, + }, + indexes: { + name_idx: name, + }, +); + +#[test] +fn test_version_reads_persisted_data() { + let config = DiskConfig::new_with_table_name( + "tests/data/version/basic", + UserWorkTable::name_snake_case(), + UserWorkTable::version(), + ); + + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .enable_io() + .enable_time() + .build() + .unwrap(); + + runtime.block_on(async { + remove_dir_if_exists("tests/data/version/basic".to_string()).await; + + { + let engine = UserPersistenceEngine::new(config.clone()).await.unwrap(); + let table = UserWorkTable::load(engine).await.unwrap(); + + table + .insert(UserRow { + id: table.get_next_pk().into(), + name: "Alice".to_string(), + email: "alice@example.com".to_string(), + }) + .unwrap(); + + table + .insert(UserRow { + id: table.get_next_pk().into(), + name: "Bob".to_string(), + email: "bob@example.com".to_string(), + }) + .unwrap(); + + table.wait_for_ops().await + } + + { + let engine = ReadOnlyPersistenceEngine::create(config.clone()) + .await + .unwrap(); + let table = UserV1WorkTable::load(engine).await.unwrap(); + + // Verify count + assert_eq!(table.count(), 2); + + // Verify data via select_all + let rows = table.select_all().execute().unwrap(); + assert_eq!(rows.len(), 2); + + // Check specific values exist + let names: Vec<_> = rows.iter().map(|r| r.name.clone()).collect(); + assert!(names.contains(&"Alice".to_string())); + assert!(names.contains(&"Bob".to_string())); + } + }); +} diff --git a/tests/worktable_version/mod.rs b/tests/worktable_version/mod.rs new file mode 100644 index 00000000..2e83168b --- /dev/null +++ b/tests/worktable_version/mod.rs @@ -0,0 +1 @@ +mod basic; \ No newline at end of file