diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java index adb3e8793b8dfb..4f033638436676 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java @@ -1269,7 +1269,16 @@ private void normalizeSelectColumns(TNormalizedOlapScanNode normalizedOlapScanNo .flatMap(tupleId -> normalizer.getDescriptorTable().getTupleDesc(tupleId).getSlots().stream()) .collect(Collectors.toList()); List> selectColumns = slots.stream() - .map(slot -> Pair.of(slot.getId(), slot.getColumn().getName())) + .map(slot -> { + // For variant subcolumns, use the materialized column name (e.g. "data.int_1") + // to distinguish different subcolumns of the same variant column in cache digest. + List subColPath = slot.getSubColLables(); + String colName = slot.getColumn().getName(); + if (subColPath != null && !subColPath.isEmpty()) { + colName = colName + "." + String.join(".", subColPath); + } + return Pair.of(slot.getId(), colName); + }) .collect(Collectors.toList()); for (Column partitionColumn : olapTable.getPartitionInfo().getPartitionColumns()) { boolean selectPartitionColumn = false; diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java index 5648484f49d544..5e61879718801e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java @@ -110,7 +110,14 @@ protected void runBeforeAll() throws Exception { + "distributed by hash(k1) buckets 3\n" + "properties('replication_num' = '1')"; - createTables(nonPart, part1, part2, multiLeveParts); + String variantTable = "create table db1.variant_tbl(" + + " k1 int,\n" + + " data variant)\n" + + "DUPLICATE KEY(k1)\n" + + "distributed by hash(k1) buckets 3\n" + + "properties('replication_num' = '1')"; + + createTables(nonPart, part1, part2, multiLeveParts, variantTable); connectContext.getSessionVariable().setDisableNereidsRules("PRUNE_EMPTY_PARTITION"); connectContext.getSessionVariable().setEnableQueryCache(true); @@ -358,6 +365,22 @@ public void phasesDistinctAgg() { Assertions.assertEquals(fourPhaseAggPlans, threePhaseAggPlans); } + @Test + public void testVariantSubColumnDigest() throws Exception { + // Different variant subcolumns should produce different digests + String digest1 = getDigest( + "select cast(data['int_1'] as int), count(*) from db1.variant_tbl group by cast(data['int_1'] as int)"); + String digest2 = getDigest( + "select cast(data['int_nested'] as int), count(*) from db1.variant_tbl group by cast(data['int_nested'] as int)"); + Assertions.assertNotEquals(digest1, digest2, + "Queries on different variant subcolumns must have different cache digests"); + + // Same variant subcolumn with different aliases should produce same digest + String digest3 = getDigest( + "select cast(data['int_1'] as int) as a, count(*) as cnt from db1.variant_tbl group by cast(data['int_1'] as int)"); + Assertions.assertEquals(digest1, digest3); + } + private String getDigest(String sql) throws Exception { return Hex.encodeHexString(getQueryCacheParam(sql).digest); }