From 33a68a02574c815c748dd5383c421621e220d68d Mon Sep 17 00:00:00 2001 From: waralexrom <108349432+waralexrom@users.noreply.github.com> Date: Tue, 17 Mar 2026 21:17:40 +0100 Subject: [PATCH 1/2] fix(tesseract): Incorrect ubounded rolling window without granularity (#10508) --- .../postgres/sql-generation.test.ts | 18 ++ .../planners/multi_stage/applied_state.rs | 78 ++++++ .../multi_stage/multi_stage_query_planner.rs | 27 ++- .../cube_bridge/mock_base_tools.rs | 6 +- .../src/test_fixtures/cube_bridge/mod.rs | 1 + .../test_fixtures/cube_bridge/time_series.rs | 228 ++++++++++++++++++ .../yaml_files/common/rolling_window.yaml | 24 ++ .../cubesqlplanner/src/tests/mod.rs | 1 + .../tests/rolling_window_sql_generation.rs | 121 ++++++++++ ..._window_both_unbounded_no_granularity.snap | 10 + ...ndow_leading_unbounded_no_granularity.snap | 11 + ...dow_trailing_unbounded_no_granularity.snap | 11 + ...w_trailing_unbounded_with_granularity.snap | 24 ++ 13 files changed, 556 insertions(+), 4 deletions(-) create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/time_series.rs create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/rolling_window.yaml create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/rolling_window_sql_generation.rs create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_both_unbounded_no_granularity.snap create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_leading_unbounded_no_granularity.snap create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_no_granularity.snap create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_with_granularity.snap diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts index 010bc1ae59b9d..97854c4834c38 100644 --- a/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts +++ b/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts @@ -1696,6 +1696,24 @@ SELECT 1 AS revenue, cast('2024-01-01' AS timestamp) as time UNION ALL visitors__created_at_day: '2017-01-10T00:00:00.000Z', }])); + it('rolling window with unbounded without time dimension', async () => runQueryTest({ + measures: [ + 'visitors.countRollingUnbounded', + ], + timeDimensions: [ + { + dimension: 'visitors.created_at', + dateRange: ['2017-01-05', '2017-01-10'] + } + ], + order: [{ + id: 'visitors.created_at' + }], + timezone: 'America/Los_Angeles' + }, [{ + visitors__count_rolling_unbounded: '6' + }])); + it('two rolling windows with two time dimension granularities', async () => runQueryTest({ measures: [ 'visitors.countRollingUnbounded', diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs index ebf54eac8548a..761498bae8fe3 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs @@ -303,6 +303,84 @@ impl MultiStageAppliedState { false } + /// Replace InDateRange filter with bounded version for rolling window without granularity. + /// Unlike `replace_regular_date_range_filter` which uses time_series CTE references, + /// this keeps parameter-based filters suitable for queries without a time_series CTE. + pub fn replace_date_range_for_rolling_window_without_granularity( + &mut self, + member_name: &String, + trailing: &Option, + leading: &Option, + ) { + let trailing_unbounded = trailing.as_deref() == Some("unbounded"); + let leading_unbounded = leading.as_deref() == Some("unbounded"); + + if !trailing_unbounded && !leading_unbounded { + return; + } + + if trailing_unbounded && leading_unbounded { + // Both unbounded — remove the date range filter entirely + self.time_dimensions_filters.retain(|item| match item { + FilterItem::Item(itm) => { + !(&itm.member_name() == member_name + && matches!(itm.filter_operator(), FilterOperator::InDateRange)) + } + _ => true, + }); + } else if trailing_unbounded { + // Remove lower bound: InDateRange(from, to) → BeforeOrOnDate(to) + self.time_dimensions_filters = self + .time_dimensions_filters + .iter() + .map(|item| match item { + FilterItem::Item(itm) + if &itm.member_name() == member_name + && matches!(itm.filter_operator(), FilterOperator::InDateRange) => + { + let values = itm.values(); + let to_value = if values.len() >= 2 { + vec![values[1].clone()] + } else { + values.clone() + }; + FilterItem::Item(itm.change_operator( + FilterOperator::BeforeOrOnDate, + to_value, + itm.use_raw_values(), + )) + } + other => other.clone(), + }) + .collect(); + } else { + // leading unbounded: remove upper bound: InDateRange(from, to) → AfterOrOnDate(from) + self.time_dimensions_filters = self + .time_dimensions_filters + .iter() + .map(|item| match item { + FilterItem::Item(itm) + if &itm.member_name() == member_name + && matches!(itm.filter_operator(), FilterOperator::InDateRange) => + { + let values = itm.values(); + let from_value = if !values.is_empty() { + vec![values[0].clone()] + } else { + values.clone() + }; + FilterItem::Item(itm.change_operator( + FilterOperator::AfterOrOnDate, + from_value, + itm.use_raw_values(), + )) + } + other => other.clone(), + }) + .collect(); + } + } + pub fn replace_regular_date_range_filter( &mut self, member_name: &String, diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs index dd85eb489d0a5..92e20e428fece 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs @@ -474,9 +474,11 @@ impl MultiStageQueryPlanner { } if time_dimensions.is_empty() { + let base_state = + self.replace_date_range_for_rolling_window(&rolling_window, state.clone()); let rolling_base = self.add_rolling_window_base( member.clone(), - state.clone(), + base_state, ungrouped, descriptions, )?; @@ -698,6 +700,29 @@ impl MultiStageQueryPlanner { } } + /// Adjust date range filters for rolling window when there's no granularity. + /// Without granularity there's no time_series CTE, so we replace InDateRange + /// with BeforeOrOnDate/AfterOrOnDate that use parameters directly. + fn replace_date_range_for_rolling_window( + &self, + rolling_window: &RollingWindow, + state: Rc, + ) -> Rc { + let mut new_state = state.clone_state(); + for filter_item in state.time_dimensions_filters() { + if let FilterItem::Item(filter) = filter_item { + if matches!(filter.filter_operator(), FilterOperator::InDateRange) { + new_state.replace_date_range_for_rolling_window_without_granularity( + &filter.member_name(), + &rolling_window.trailing, + &rolling_window.leading, + ); + } + } + } + Rc::new(new_state) + } + fn make_rolling_base_state( &self, time_dimension: Rc, diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_base_tools.rs b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_base_tools.rs index c6584cf70d2de..c5bc69e4c706c 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_base_tools.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_base_tools.rs @@ -66,10 +66,10 @@ impl BaseTools for MockBaseTools { fn generate_time_series( &self, - _granularity: String, - _date_range: Vec, + granularity: String, + date_range: Vec, ) -> Result>, CubeError> { - todo!("generate_time_series not implemented in mock") + super::time_series::generate_time_series(&granularity, &date_range) } fn generate_custom_time_series( diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mod.rs b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mod.rs index c468058ab1294..d18fdba5b5477 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mod.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mod.rs @@ -35,6 +35,7 @@ mod mock_sql_templates_render; mod mock_sql_utils; mod mock_struct_with_sql_member; mod mock_timeshift_definition; +pub mod time_series; pub use base_query_options::{members_from_strings, MockBaseQueryOptions}; pub use mock_base_tools::MockBaseTools; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/time_series.rs b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/time_series.rs new file mode 100644 index 0000000000000..39f7a200493bd --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/time_series.rs @@ -0,0 +1,228 @@ +use chrono::{Datelike, Duration, NaiveDate, NaiveDateTime, NaiveTime, Timelike}; +use cubenativeutils::CubeError; + +const TIMESTAMP_PRECISION: usize = 3; + +/// Generates a time series for a given granularity and date range. +/// Matches `timeSeries()` from `@cubejs-backend/shared/src/time.ts`. +pub fn generate_time_series( + granularity: &str, + date_range: &[String], +) -> Result>, CubeError> { + if date_range.len() != 2 { + return Err(CubeError::internal( + "date_range must have exactly 2 elements".to_string(), + )); + } + + let start = parse_date(&date_range[0])?; + let end = parse_date(&date_range[1])?; + + let snap = snap_fn(granularity) + .ok_or_else(|| CubeError::user(format!("Unsupported time granularity: {granularity}")))?; + let advance = advance_fn(granularity).unwrap(); + let period_end = period_end_fn(granularity).unwrap(); + + let mut current = snap(start); + let mut result = Vec::new(); + while current <= end { + let to = period_end(current); + result.push(vec![format_from(current), format_to(to)]); + current = advance(current); + } + + Ok(result) +} + +type DateFn = fn(NaiveDateTime) -> NaiveDateTime; + +/// Snap datetime to the start of its granularity period +fn snap_fn(g: &str) -> Option { + Some(match g { + "second" => |dt| dt.with_nanosecond(0).unwrap(), + "minute" => |dt| make(dt.date(), dt.hour(), dt.minute(), 0), + "hour" => |dt| make(dt.date(), dt.hour(), 0, 0), + "day" => |dt| day_start(dt.date()), + "week" => |dt| { + let days_from_mon = dt.date().weekday().num_days_from_monday(); + day_start(dt.date() - Duration::days(days_from_mon as i64)) + }, + "month" => |dt| day_start(NaiveDate::from_ymd_opt(dt.year(), dt.month(), 1).unwrap()), + "quarter" => |dt| { + let q_month = (dt.month() - 1) / 3 * 3 + 1; + day_start(NaiveDate::from_ymd_opt(dt.year(), q_month, 1).unwrap()) + }, + "year" => |dt| day_start(NaiveDate::from_ymd_opt(dt.year(), 1, 1).unwrap()), + _ => return None, + }) +} + +/// Advance to the next period +fn advance_fn(g: &str) -> Option { + Some(match g { + "second" => |dt| dt + Duration::seconds(1), + "minute" => |dt| dt + Duration::minutes(1), + "hour" => |dt| dt + Duration::hours(1), + "day" => |dt| dt + Duration::days(1), + "week" => |dt| dt + Duration::weeks(1), + "month" => |dt| add_months(dt, 1), + "quarter" => |dt| add_months(dt, 3), + "year" => |dt| add_months(dt, 12), + _ => return None, + }) +} + +/// Get the end of the current period +fn period_end_fn(g: &str) -> Option { + Some(match g { + "second" => |dt| dt, // same second + "minute" => |dt| make(dt.date(), dt.hour(), dt.minute(), 59), + "hour" => |dt| make(dt.date(), dt.hour(), 59, 59), + "day" => |dt| day_end(dt.date()), + "week" => |dt| day_end(dt.date() + Duration::days(6)), + "month" => |dt| day_end(last_day_of_month(dt.year(), dt.month())), + "quarter" => |dt| { + let last_month = (dt.month() - 1) / 3 * 3 + 3; + day_end(last_day_of_month(dt.year(), last_month)) + }, + "year" => |dt| day_end(NaiveDate::from_ymd_opt(dt.year(), 12, 31).unwrap()), + _ => return None, + }) +} + +fn make(date: NaiveDate, h: u32, m: u32, s: u32) -> NaiveDateTime { + date.and_time(NaiveTime::from_hms_opt(h, m, s).unwrap()) +} + +fn day_start(d: NaiveDate) -> NaiveDateTime { + make(d, 0, 0, 0) +} + +fn day_end(d: NaiveDate) -> NaiveDateTime { + make(d, 23, 59, 59) +} + +fn add_months(dt: NaiveDateTime, months: u32) -> NaiveDateTime { + let total = dt.month0() + months; + let new_year = dt.year() + (total / 12) as i32; + let new_month = total % 12 + 1; + day_start(NaiveDate::from_ymd_opt(new_year, new_month, 1).unwrap()) +} + +fn last_day_of_month(year: i32, month: u32) -> NaiveDate { + let (y, m) = if month == 12 { + (year + 1, 1) + } else { + (year, month + 1) + }; + NaiveDate::from_ymd_opt(y, m, 1).unwrap() - Duration::days(1) +} + +fn parse_date(s: &str) -> Result { + NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") + .or_else(|_| NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S")) + .or_else(|_| { + NaiveDate::parse_from_str(s, "%Y-%m-%d").map(|d| d.and_hms_opt(0, 0, 0).unwrap()) + }) + .map_err(|_| CubeError::internal(format!("Cannot parse date: '{s}'"))) +} + +fn format_from(dt: NaiveDateTime) -> String { + format!( + "{}.{}", + dt.format("%Y-%m-%dT%H:%M:%S"), + "0".repeat(TIMESTAMP_PRECISION) + ) +} + +fn format_to(dt: NaiveDateTime) -> String { + format!( + "{}.{}", + dt.format("%Y-%m-%dT%H:%M:%S"), + "9".repeat(TIMESTAMP_PRECISION) + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_day() { + let r = generate_time_series("day", &["2025-10-07".into(), "2025-10-09".into()]).unwrap(); + assert_eq!(r.len(), 3); + assert_eq!( + r[0], + vec!["2025-10-07T00:00:00.000", "2025-10-07T23:59:59.999"] + ); + assert_eq!( + r[2], + vec!["2025-10-09T00:00:00.000", "2025-10-09T23:59:59.999"] + ); + } + + #[test] + fn test_month() { + let r = generate_time_series("month", &["2025-01-15".into(), "2025-03-10".into()]).unwrap(); + assert_eq!(r.len(), 3); + assert_eq!(r[0][0], "2025-01-01T00:00:00.000"); + assert_eq!(r[0][1], "2025-01-31T23:59:59.999"); + assert_eq!(r[1][1], "2025-02-28T23:59:59.999"); + } + + #[test] + fn test_week() { + // 2025-10-07 is Tuesday, snaps to Monday 2025-10-06 + let r = generate_time_series("week", &["2025-10-07".into(), "2025-10-14".into()]).unwrap(); + assert_eq!(r.len(), 2); + assert_eq!(r[0][0], "2025-10-06T00:00:00.000"); + assert_eq!(r[0][1], "2025-10-12T23:59:59.999"); + } + + #[test] + fn test_quarter() { + let r = + generate_time_series("quarter", &["2025-01-15".into(), "2025-07-10".into()]).unwrap(); + assert_eq!(r.len(), 3); + assert_eq!(r[0][0], "2025-01-01T00:00:00.000"); + assert_eq!(r[0][1], "2025-03-31T23:59:59.999"); + assert_eq!(r[2][0], "2025-07-01T00:00:00.000"); + } + + #[test] + fn test_year() { + let r = generate_time_series("year", &["2024-06-15".into(), "2025-03-10".into()]).unwrap(); + assert_eq!(r.len(), 2); + assert_eq!( + r[0], + vec!["2024-01-01T00:00:00.000", "2024-12-31T23:59:59.999"] + ); + assert_eq!( + r[1], + vec!["2025-01-01T00:00:00.000", "2025-12-31T23:59:59.999"] + ); + } + + #[test] + fn test_hour() { + let r = generate_time_series( + "hour", + &[ + "2025-10-07T10:30:00.000".into(), + "2025-10-07T12:15:00.000".into(), + ], + ) + .unwrap(); + assert_eq!(r.len(), 3); + assert_eq!(r[0][0], "2025-10-07T10:00:00.000"); + assert_eq!(r[0][1], "2025-10-07T10:59:59.999"); + } + + #[test] + fn test_unsupported() { + assert!( + generate_time_series("millennium", &["2025-01-01".into(), "2025-01-02".into()]) + .is_err() + ); + } +} diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/rolling_window.yaml b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/rolling_window.yaml new file mode 100644 index 0000000000000..03d6d227d53ee --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/rolling_window.yaml @@ -0,0 +1,24 @@ +cubes: + - name: test_cube + sql: "SELECT * FROM test_data" + dimensions: + - name: created_at + sql: created_at + type: time + measures: + - name: val + type: sum + sql: val + rolling_window: + trailing: unbounded + - name: val_leading + type: sum + sql: val + rolling_window: + leading: unbounded + - name: val_both + type: sum + sql: val + rolling_window: + trailing: unbounded + leading: unbounded diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/mod.rs b/rust/cubesqlplanner/cubesqlplanner/src/tests/mod.rs index ffb988e85dd0b..9dd6fd8735da8 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/tests/mod.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/mod.rs @@ -7,5 +7,6 @@ mod join_hints_collector; mod measure_symbol; mod member_expressions_on_views; mod pre_aggregation_sql_generation; +mod rolling_window_sql_generation; mod subquery_dimensions; mod utils; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/rolling_window_sql_generation.rs b/rust/cubesqlplanner/cubesqlplanner/src/tests/rolling_window_sql_generation.rs new file mode 100644 index 0000000000000..7c9c2e01864ed --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/rolling_window_sql_generation.rs @@ -0,0 +1,121 @@ +use crate::test_fixtures::cube_bridge::MockSchema; +use crate::test_fixtures::test_utils::TestContext; +use indoc::indoc; + +fn create_context() -> TestContext { + let schema = MockSchema::from_yaml_file("common/rolling_window.yaml"); + TestContext::new(schema).unwrap() +} + +#[test] +fn test_rolling_window_trailing_unbounded_no_granularity() { + let test_context = create_context(); + + let query_yaml = indoc! {r#" + measures: + - test_cube.val + time_dimensions: + - dimension: test_cube.created_at + dateRange: + - "2025-10-07" + - "2025-10-08" + "#}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for trailing unbounded"); + + assert!( + !sql.contains(">= $_0_$"), + "Trailing unbounded should not have a lower time bound (>=), got: {sql}" + ); + assert!( + !sql.contains("time_series"), + "Without granularity should not reference time_series CTE, got: {sql}" + ); + + insta::assert_snapshot!(sql); +} + +#[test] +fn test_rolling_window_leading_unbounded_no_granularity() { + let test_context = create_context(); + + let query_yaml = indoc! {r#" + measures: + - test_cube.val_leading + time_dimensions: + - dimension: test_cube.created_at + dateRange: + - "2025-10-07" + - "2025-10-08" + "#}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for leading unbounded"); + + assert!( + !sql.contains("<= $_1_$"), + "Leading unbounded should not have an upper time bound (<=), got: {sql}" + ); + + insta::assert_snapshot!(sql); +} + +#[test] +fn test_rolling_window_both_unbounded_no_granularity() { + let test_context = create_context(); + + let query_yaml = indoc! {r#" + measures: + - test_cube.val_both + time_dimensions: + - dimension: test_cube.created_at + dateRange: + - "2025-10-07" + - "2025-10-08" + "#}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for both unbounded"); + + assert!( + !sql.contains(">= $_0_$"), + "Both unbounded should not have a lower time bound (>=), got: {sql}" + ); + assert!( + !sql.contains("<= $_1_$"), + "Both unbounded should not have an upper time bound (<=), got: {sql}" + ); + + insta::assert_snapshot!(sql); +} + +#[test] +fn test_rolling_window_trailing_unbounded_with_granularity() { + let test_context = create_context(); + + let query_yaml = indoc! {r#" + measures: + - test_cube.val + time_dimensions: + - dimension: test_cube.created_at + granularity: day + dateRange: + - "2025-10-07" + - "2025-10-08" + "#}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for trailing unbounded with granularity"); + + assert!( + !sql.contains(">= \"time_series\".\"date_from\""), + "JOIN should not have lower bound with trailing unbounded, got: {sql}" + ); + + insta::assert_snapshot!(sql); +} diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_both_unbounded_no_granularity.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_both_unbounded_no_granularity.snap new file mode 100644 index 0000000000000..b5fcbbcea160b --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_both_unbounded_no_granularity.snap @@ -0,0 +1,10 @@ +--- +source: cubesqlplanner/src/tests/rolling_window_sql_generation.rs +expression: sql +--- + WITH +cte_0 AS ( SELECT sum("test_cube".val) "test_cube__val_both" + FROM test_data AS "test_cube") +SELECT "fk_aggregate"."test_cube__val_both" "test_cube__val_both" +FROM (SELECT * +FROM cte_0 AS "cte_0") AS "fk_aggregate" diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_leading_unbounded_no_granularity.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_leading_unbounded_no_granularity.snap new file mode 100644 index 0000000000000..679afd7b539ec --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_leading_unbounded_no_granularity.snap @@ -0,0 +1,11 @@ +--- +source: cubesqlplanner/src/tests/rolling_window_sql_generation.rs +expression: sql +--- + WITH +cte_0 AS ( SELECT sum("test_cube".val) "test_cube__val_leading" + FROM test_data AS "test_cube" + WHERE ("test_cube".created_at >= $_0_$::timestamptz)) +SELECT "fk_aggregate"."test_cube__val_leading" "test_cube__val_leading" +FROM (SELECT * +FROM cte_0 AS "cte_0") AS "fk_aggregate" diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_no_granularity.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_no_granularity.snap new file mode 100644 index 0000000000000..49366e389fcc7 --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_no_granularity.snap @@ -0,0 +1,11 @@ +--- +source: cubesqlplanner/src/tests/rolling_window_sql_generation.rs +expression: sql +--- + WITH +cte_0 AS ( SELECT sum("test_cube".val) "test_cube__val" + FROM test_data AS "test_cube" + WHERE ("test_cube".created_at <= $_0_$::timestamptz)) +SELECT "fk_aggregate"."test_cube__val" "test_cube__val" +FROM (SELECT * +FROM cte_0 AS "cte_0") AS "fk_aggregate" diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_with_granularity.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_with_granularity.snap new file mode 100644 index 0000000000000..c84953ac38730 --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__rolling_window_sql_generation__rolling_window_trailing_unbounded_with_granularity.snap @@ -0,0 +1,24 @@ +--- +source: cubesqlplanner/src/tests/rolling_window_sql_generation.rs +expression: sql +--- + WITH +time_series AS ( SELECT date_from::timestamp AS "date_from", + date_to::timestamp AS "date_to" + FROM( + VALUES ('2025-10-07T00:00:00.000', '2025-10-07T23:59:59.999'), ('2025-10-08T00:00:00.000', '2025-10-08T23:59:59.999')) AS dates (date_from, date_to)), +cte_1 AS ( SELECT date_trunc('day', ("test_cube".created_at::timestamptz AT TIME ZONE 'UTC')) "test_cube__created_at_day", sum("test_cube".val) "test_cube__val" + FROM test_data AS "test_cube" + WHERE (("test_cube".created_at::timestamptz AT TIME ZONE 'UTC') <= (SELECT max("date_to") "value" + FROM time_series)) + GROUP BY 1 + ORDER BY 1 ASC), +cte_2 AS ( SELECT "time_series"."date_from" "test_cube__created_at_day", sum("rolling_source"."test_cube__val") "test_cube__val" + FROM time_series AS "time_series" + LEFT JOIN cte_1 AS "rolling_source" ON "rolling_source"."test_cube__created_at_day" <= "time_series"."date_to" + GROUP BY 1 + ORDER BY 1 ASC) +SELECT "fk_aggregate"."test_cube__created_at_day" "test_cube__created_at_day", "fk_aggregate"."test_cube__val" "test_cube__val" +FROM (SELECT * +FROM cte_2 AS "cte_2") AS "fk_aggregate" +ORDER BY 1 ASC From 244c04b962bacb165c46142d67b10c37efcef54c Mon Sep 17 00:00:00 2001 From: waralexrom <108349432+waralexrom@users.noreply.github.com> Date: Tue, 17 Mar 2026 21:17:57 +0100 Subject: [PATCH 2/2] chore(tesseract): Tests for segments in views (#10509) --- .../postgres/segments-in-view.test.ts | 133 ++++++++++++++++++ .../test_fixtures/cube_bridge/mock_schema.rs | 7 +- .../yaml_files/common/segments_in_view.yaml | 48 +++++++ .../src/tests/common_sql_generation.rs | 42 ++++++ ...gment_with_subquery_dimension_in_view.snap | 12 ++ ...uery_dimension_in_view_with_dimension.snap | 14 ++ 6 files changed, 255 insertions(+), 1 deletion(-) create mode 100644 packages/cubejs-schema-compiler/test/integration/postgres/segments-in-view.test.ts create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/segments_in_view.yaml create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view.snap create mode 100644 rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view_with_dimension.snap diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/segments-in-view.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/segments-in-view.test.ts new file mode 100644 index 0000000000000..ff51b645ab361 --- /dev/null +++ b/packages/cubejs-schema-compiler/test/integration/postgres/segments-in-view.test.ts @@ -0,0 +1,133 @@ +import { PostgresQuery } from '../../../src/adapter/PostgresQuery'; +import { prepareJsCompiler } from '../../unit/PrepareCompiler'; +import { dbRunner } from './PostgresDBRunner'; + +describe('Segments in View with SubQuery Dimensions', () => { + jest.setTimeout(200000); + + const { compiler, joinGraph, cubeEvaluator } = prepareJsCompiler(` +cube(\`Accounts\`, { + sql: \` + SELECT 1 AS id, 'US' AS region UNION ALL + SELECT 2 AS id, 'US' AS region UNION ALL + SELECT 3 AS id, 'EU' AS region UNION ALL + SELECT 4 AS id, 'EU' AS region UNION ALL + SELECT 5 AS id, 'AP' AS region + \`, + + joins: { + Tickets: { + relationship: \`one_to_many\`, + sql: \`\${CUBE}.id = \${Tickets}.account_id\`, + }, + }, + + dimensions: { + id: { + sql: \`id\`, + type: \`number\`, + primaryKey: true, + public: true, + }, + + region: { + sql: \`\${CUBE}.region\`, + type: \`string\`, + }, + + ticketCount: { + sql: \`\${Tickets.count}\`, + type: \`number\`, + subQuery: true, + }, + }, + + segments: { + hasNoTickets: { + sql: \`(\${ticketCount} = 0)\`, + }, + }, + + measures: { + count: { + type: \`count\`, + }, + }, +}); + +cube(\`Tickets\`, { + sql: \` + SELECT 1 AS id, 1 AS account_id UNION ALL + SELECT 2 AS id, 1 AS account_id UNION ALL + SELECT 3 AS id, 3 AS account_id UNION ALL + SELECT 4 AS id, 5 AS account_id + \`, + + dimensions: { + id: { + sql: \`id\`, + type: \`number\`, + primaryKey: true, + }, + + accountId: { + sql: \`\${CUBE}.account_id\`, + type: \`number\`, + }, + }, + + measures: { + count: { + type: \`count\`, + }, + }, +}); + +view(\`accountOverview\`, { + cubes: [ + { + join_path: Accounts, + includes: [ + \`hasNoTickets\`, + \`count\`, + \`region\`, + ], + }, + ], +}); + `); + + async function runQueryTest(q, expectedResult) { + await compiler.compile(); + const query = new PostgresQuery({ joinGraph, cubeEvaluator, compiler }, q); + + console.log(query.buildSqlAndParams()); + + const res = await dbRunner.testQuery(query.buildSqlAndParams()); + console.log(JSON.stringify(res)); + + expect(res).toEqual( + expectedResult + ); + } + + it('segment with subquery dimension in view', async () => runQueryTest({ + measures: ['accountOverview.count'], + segments: ['accountOverview.hasNoTickets'], + }, [{ + account_overview__count: '2', + }])); + + it('segment with subquery dimension in view with dimension', async () => runQueryTest({ + measures: ['accountOverview.count'], + segments: ['accountOverview.hasNoTickets'], + dimensions: ['accountOverview.region'], + order: [{ id: 'accountOverview.region' }], + }, [{ + account_overview__region: 'EU', + account_overview__count: '1', + }, { + account_overview__region: 'US', + account_overview__count: '1', + }])); +}); diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_schema.rs b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_schema.rs index 440c452209f90..18cf7a103ce7a 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_schema.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/cube_bridge/mock_schema.rs @@ -546,11 +546,16 @@ impl MockViewBuilder { ); } + let original_type = &measure.static_data().measure_type; + let view_type = match original_type.as_str() { + "number" | "string" | "time" | "boolean" => original_type.clone(), + _ => "number".to_string(), + }; all_measures.insert( view_name, Rc::new( MockMeasureDefinition::builder() - .measure_type(measure.static_data().measure_type.clone()) + .measure_type(view_type) .sql(view_member_sql) .build(), ), diff --git a/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/segments_in_view.yaml b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/segments_in_view.yaml new file mode 100644 index 0000000000000..71f70c7dc1c8a --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/test_fixtures/schemas/yaml_files/common/segments_in_view.yaml @@ -0,0 +1,48 @@ +cubes: + - name: Accounts + sql: "SELECT * FROM accounts" + joins: + - name: Tickets + relationship: one_to_many + sql: "{CUBE}.id = {Tickets.accountId}" + dimensions: + - name: id + type: number + sql: id + primary_key: true + - name: region + type: string + sql: "{CUBE}.region" + - name: ticketCount + type: number + sql: "{Tickets.count}" + sub_query: true + segments: + - name: hasNoTickets + sql: "({ticketCount} = 0)" + measures: + - name: count + type: count + + - name: Tickets + sql: "SELECT * FROM tickets" + dimensions: + - name: id + type: number + sql: id + primary_key: true + - name: accountId + type: number + sql: "{CUBE}.account_id" + measures: + - name: count + type: count + +views: + - name: accountOverview + cubes: + - join_path: Accounts + includes: + - hasNoTickets + - count + - region diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/common_sql_generation.rs b/rust/cubesqlplanner/cubesqlplanner/src/tests/common_sql_generation.rs index 8ea65268a9481..f2a603282fda5 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/tests/common_sql_generation.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/common_sql_generation.rs @@ -318,3 +318,45 @@ fn test_query_level_join_hints() { "SQL should NOT use A->F join, got: {sql}" ); } + +#[test] +fn test_segment_with_subquery_dimension_in_view() { + let schema = MockSchema::from_yaml_file("common/segments_in_view.yaml"); + let test_context = TestContext::new(schema).unwrap(); + + let query_yaml = indoc! {" + measures: + - accountOverview.count + segments: + - accountOverview.hasNoTickets + "}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for segment with subquery dimension in view"); + + insta::assert_snapshot!(sql); +} + +#[test] +fn test_segment_with_subquery_dimension_in_view_with_dimension() { + let schema = MockSchema::from_yaml_file("common/segments_in_view.yaml"); + let test_context = TestContext::new(schema).unwrap(); + + let query_yaml = indoc! {" + measures: + - accountOverview.count + segments: + - accountOverview.hasNoTickets + dimensions: + - accountOverview.region + order: + - id: accountOverview.region + "}; + + let sql = test_context + .build_sql(query_yaml) + .expect("Should generate SQL for segment with subquery dimension in view with dimension"); + + insta::assert_snapshot!(sql); +} diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view.snap new file mode 100644 index 0000000000000..686585b75c566 --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view.snap @@ -0,0 +1,12 @@ +--- +source: cubesqlplanner/src/tests/common_sql_generation.rs +expression: sql +--- +SELECT count("accounts".id) "account_overview__count" +FROM accounts AS "accounts" +LEFT JOIN (SELECT "accounts".id "accounts__id", count("tickets".id) "ticket_count" +FROM accounts AS "accounts" +LEFT JOIN tickets AS "tickets" ON "accounts".id = "tickets".account_id +GROUP BY 1 +ORDER BY 2 DESC) AS "Accounts_ticketCount_subquery" ON (("Accounts_ticketCount_subquery"."accounts__id" = "accounts".id)) +WHERE (("Accounts_ticketCount_subquery"."ticket_count" = 0)) diff --git a/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view_with_dimension.snap b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view_with_dimension.snap new file mode 100644 index 0000000000000..938bd69402d08 --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/tests/snapshots/cubesqlplanner__tests__common_sql_generation__segment_with_subquery_dimension_in_view_with_dimension.snap @@ -0,0 +1,14 @@ +--- +source: cubesqlplanner/src/tests/common_sql_generation.rs +expression: sql +--- +SELECT "accounts".region "account_overview__region", count("accounts".id) "account_overview__count" +FROM accounts AS "accounts" +LEFT JOIN (SELECT "accounts".id "accounts__id", count("tickets".id) "ticket_count" +FROM accounts AS "accounts" +LEFT JOIN tickets AS "tickets" ON "accounts".id = "tickets".account_id +GROUP BY 1 +ORDER BY 2 DESC) AS "Accounts_ticketCount_subquery" ON (("Accounts_ticketCount_subquery"."accounts__id" = "accounts".id)) +WHERE (("Accounts_ticketCount_subquery"."ticket_count" = 0)) +GROUP BY 1 +ORDER BY 1 ASC