[UT] Add more cases to cover nested mv create (#62996)

Signed-off-by: shuming.li <ming.moriarty@gmail.com>
This commit is contained in:
shuming.li 2025-10-10 18:27:18 +08:00 committed by GitHub
parent 1785077e7b
commit 6f69933e11
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 247 additions and 52 deletions

View File

@ -91,7 +91,6 @@ import mockit.MockUp;
import mockit.Mocked;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
@ -100,7 +99,6 @@ import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.time.LocalDateTime;
import java.util.Arrays;
@ -119,20 +117,13 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
public class CreateMaterializedViewTest extends MVTestBase {
private static final Logger LOG = LogManager.getLogger(CreateMaterializedViewTest.class);
public String name;
@TempDir
public static File temp;
private static ConnectContext connectContext;
private static Database testDb;
private static GlobalStateMgr currentState;
@BeforeAll
public static void beforeClass() throws Exception {
ConnectorPlanTestBase.doInit(newFolder(temp, "junit").toURI().toString());
// set default config for async mvs
UtFrameUtils.setDefaultConfigForAsyncMVTest(connectContext);
Config.default_mv_refresh_immediate = true;
@ -333,10 +324,20 @@ public class CreateMaterializedViewTest extends MVTestBase {
testDb = currentState.getLocalMetastore().getDb("test");
UtFrameUtils.setUpForPersistTest();
try {
ConnectorPlanTestBase.mockAllCatalogs(connectContext, newFolder(temp, "junit").toURI().toString());
} catch (Exception e) {
//
}
}
@AfterAll
public static void afterClass() throws Exception {
@BeforeEach
public void setup(TestInfo testInfo) throws Exception {
Optional<Method> testMethod = testInfo.getTestMethod();
if (testMethod.isPresent()) {
this.name = testMethod.get().getName();
}
super.before();
}
private static void dropMv(String mvName) throws Exception {
@ -5797,23 +5798,6 @@ public class CreateMaterializedViewTest extends MVTestBase {
starRocksAssert.refreshMV(connectContext, "mv_table_with_external_table");
}
private static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
@BeforeEach
public void setup(TestInfo testInfo) {
Optional<Method> testMethod = testInfo.getTestMethod();
if (testMethod.isPresent()) {
this.name = testMethod.get().getName();
}
}
@Test
public void testCreateMVWithFixedLengthChar1() throws Exception {
starRocksAssert.withTable("CREATE TABLE tt1(dt date, val int, col1 char(8), col2 varchar(8));");
@ -5861,4 +5845,68 @@ public class CreateMaterializedViewTest extends MVTestBase {
Assertions.assertEquals(1048576, scalarType0.getLength());
Config.transform_type_prefer_string_for_varchar = false;
}
@Test
public void testPartitionByDateTruncWithNestedMV1() throws Exception {
{
String sql = "create materialized view mv1 " +
"partition by date_trunc('month', k1) " +
"distributed by hash(k2) buckets 10 " +
"refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\"\n" +
") " +
"as select k1, k2 from tbl1;";
starRocksAssert.withMaterializedView(sql);
}
{
String sql = "create materialized view mv2 " +
"partition by date_trunc('month', k1) " +
"distributed by hash(k2) buckets 10 " +
"refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\"\n" +
") " +
"as select k1, k2 from mv1;";
starRocksAssert.withMaterializedView(sql);
}
}
@Test
public void testPartitionByDateTruncWithNestedMV2() throws Exception {
{
String sql = "create materialized view mv1 " +
"partition by date_trunc('month', date) " +
"distributed by random " +
"REFRESH DEFERRED MANUAL " +
"PROPERTIES (\n" +
"'replication_num' = '1'\n" +
") \n" +
"as select v1.date, v1.id from iceberg0.partitioned_db.t2 as v1; ";
starRocksAssert.withMaterializedView(sql);
}
{
String sql = "create materialized view mv2 " +
"partition by date_trunc('month', date) " +
"distributed by random " +
"REFRESH DEFERRED MANUAL " +
"PROPERTIES (\n" +
"'replication_num' = '1'\n" +
") \n" +
"as select v1.date, v1.id from mv1 as v1; ";
starRocksAssert.withMaterializedView(sql);
}
}
@Test
public void testPartitionByDateTruncWithNestedMV3() throws Exception {
String sql = "create materialized view mv1 " +
"partition by date_trunc('month', date) " +
"distributed by random " +
"REFRESH DEFERRED MANUAL " +
"PROPERTIES (\n" +
"'replication_num' = '1'\n" +
") \n" +
"as select 'This is a test',123,v1.date, v1.id from iceberg0.partitioned_db.t2 as v1; ";
starRocksAssert.withMaterializedView(sql);
}
}

View File

@ -74,6 +74,8 @@ public class MockIcebergMetadata implements ConnectorMetadata {
public static final String MOCKED_UNPARTITIONED_TABLE_NAME0 = "t0";
public static final String MOCKED_PARTITIONED_TABLE_NAME1 = "t1";
// date partition table
public static final String MOCKED_PARTITIONED_TABLE_NAME2 = "t2";
// string partition table
public static final String MOCKED_STRING_PARTITIONED_TABLE_NAME1 = "part_tbl1";
@ -95,7 +97,9 @@ public class MockIcebergMetadata implements ConnectorMetadata {
public static final String MOCKED_PARTITIONED_EVOLUTION_DATE_MONTH_IDENTITY_TABLE_NAME = "t0_date_month_identity_evolution";
private static final List<String> PARTITION_TABLE_NAMES = ImmutableList.of(MOCKED_PARTITIONED_TABLE_NAME1,
MOCKED_STRING_PARTITIONED_TABLE_NAME1, MOCKED_STRING_PARTITIONED_TABLE_NAME2,
MOCKED_PARTITIONED_TABLE_NAME2,
MOCKED_STRING_PARTITIONED_TABLE_NAME1,
MOCKED_STRING_PARTITIONED_TABLE_NAME2,
MOCKED_STRING_PARTITIONED_TABLE_NAME3);
private static final List<String> PARTITION_TRANSFORM_TABLE_NAMES =
@ -183,6 +187,9 @@ public class MockIcebergMetadata implements ConnectorMetadata {
if (tblName.equals(MOCKED_PARTITIONED_TABLE_NAME1)) {
icebergTableInfoMap.put(tblName, new IcebergTableInfo(icebergTable, PARTITION_NAMES_0,
100, columnStatisticMap));
} else if (tblName.equals(MOCKED_PARTITIONED_TABLE_NAME2)) {
icebergTableInfoMap.put(tblName, new IcebergTableInfo(icebergTable, PARTITION_NAMES_0,
100, columnStatisticMap));
} else {
icebergTableInfoMap.put(tblName, new IcebergTableInfo(icebergTable, PARTITION_NAMES_1,
100, columnStatisticMap));
@ -214,6 +221,10 @@ public class MockIcebergMetadata implements ConnectorMetadata {
return ImmutableList.of(new Column("id", Type.INT, true),
new Column("data", Type.STRING, true),
new Column("date", Type.STRING, true));
} else if (tblName.equals(MOCKED_PARTITIONED_TABLE_NAME2)) {
return ImmutableList.of(new Column("id", Type.INT, true),
new Column("data", Type.STRING, true),
new Column("date", Type.DATE, true));
} else {
return Arrays.asList(new Column("a", Type.VARCHAR), new Column("b", Type.VARCHAR),
new Column("c", Type.INT), new Column("d", Type.VARCHAR));
@ -231,6 +242,10 @@ public class MockIcebergMetadata implements ConnectorMetadata {
return new Schema(required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get()),
required(5, "date", Types.StringType.get()));
} else if (tblName.equals(MOCKED_PARTITIONED_TABLE_NAME2)) {
return new Schema(required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get()),
required(5, "date", Types.DateType.get()));
} else {
return new Schema(required(3, "a", Types.StringType.get()),
required(4, "b", Types.StringType.get()),
@ -259,7 +274,13 @@ public class MockIcebergMetadata implements ConnectorMetadata {
new File(getStarRocksHome() + "/" + MOCKED_PARTITIONED_DB_NAME + "/"
+ MOCKED_PARTITIONED_TABLE_NAME1), MOCKED_PARTITIONED_TABLE_NAME1,
schema, spec, 1);
} else if (tblName.equals(MOCKED_PARTITIONED_TABLE_NAME2)) {
PartitionSpec spec =
PartitionSpec.builderFor(schema).identity("date").build();
return TestTables.create(
new File(getStarRocksHome() + "/" + MOCKED_PARTITIONED_DB_NAME + "/"
+ MOCKED_PARTITIONED_TABLE_NAME2), MOCKED_PARTITIONED_TABLE_NAME2,
schema, spec, 1);
} else {
PartitionSpec spec =
PartitionSpec.builderFor(schema).identity("d").build();

View File

@ -27,8 +27,6 @@ import org.junit.jupiter.api.MethodOrderer.MethodName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import static com.starrocks.sql.plan.ConnectorPlanTestBase.MOCK_PAIMON_CATALOG_NAME;
@ -114,13 +112,4 @@ public class PartitionBasedMvRefreshProcessorPaimonTest extends MVTestBase {
TaskRun taskRun = TaskRunBuilder.newBuilder(task).build();
initAndExecuteTaskRun(taskRun);
}
private static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
}

View File

@ -37,8 +37,6 @@ import org.junit.jupiter.api.MethodOrderer.MethodName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -380,13 +378,4 @@ public class PartitionBasedMvRefreshTest extends MVTestBase {
starRocksAssert.dropTable("join_base_t2");
starRocksAssert.dropMaterializedView("join_mv1");
}
private static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
}

View File

@ -94,6 +94,7 @@ import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
@ -799,4 +800,13 @@ public abstract class MVTestBase extends StarRocksTestBase {
public static void enableMVRewriteConsiderDataLayout() {
Config.mv_rewrite_consider_data_layout_mode = "enable";
}
public static File newFolder(File root, String... subDirs) throws IOException {
String subFolder = String.join("/", subDirs);
File result = new File(root, subFolder);
if (!result.mkdirs()) {
throw new IOException("Couldn't create folders " + root);
}
return result;
}
}

View File

@ -0,0 +1,85 @@
-- name: test_create_mv_with_iceberg
create external catalog mv_iceberg_${uuid0}
properties
(
"type" = "iceberg",
"iceberg.catalog.type" = "hive",
"hive.metastore.uris" = "${iceberg_catalog_hive_metastore_uris}"
);
-- result:
-- !result
set catalog mv_iceberg_${uuid0};
-- result:
-- !result
create database mv_iceberg_db_${uuid0};
-- result:
-- !result
use mv_iceberg_db_${uuid0};
-- result:
-- !result
CREATE TABLE t1 (
num int,
dt date
)
PARTITION BY (dt);
-- result:
-- !result
INSERT INTO t1 VALUES
(1,"2020-06-15"),(2,"2020-06-18"),(3,"2020-06-21"),(4,"2020-06-24"),
(1,"2020-07-02"),(2,"2020-07-05"),(3,"2020-07-08"),(4,"2020-07-11"),
(1,"2020-07-16"),(2,"2020-07-19"),(3,"2020-07-22"),(4,"2020-07-25"),
(2,"2020-06-15"),(3,"2020-06-18"),(4,"2020-06-21"),(5,"2020-06-24"),
(2,"2020-07-02"),(3,"2020-07-05"),(4,"2020-07-08"),(5,"2020-07-11");
-- result:
-- !result
set catalog default_catalog;
-- result:
-- !result
create database db_${uuid0};
-- result:
-- !result
use db_${uuid0};
-- result:
-- !result
set new_planner_optimize_timeout=10000;
-- result:
-- !result
CREATE MATERIALIZED VIEW test_mv1
PARTITION BY date_trunc('month', dt)
REFRESH DEFERRED MANUAL
PROPERTIES ("replication_num" = "1")
AS SELECT v1.num, v1.dt from mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0}.t1 as v1;
-- result:
-- !result
REFRESH MATERIALIZED VIEW test_mv1 WITH SYNC MODE;
select count(1) from test_mv1;
-- result:
20
-- !result
CREATE MATERIALIZED VIEW test_mv2
PARTITION BY date_trunc('month', dt)
REFRESH DEFERRED MANUAL
PROPERTIES ("replication_num" = "1")
AS SELECT v1.num, v1.dt from test_mv1 as v1;
-- result:
-- !result
REFRESH MATERIALIZED VIEW test_mv2 WITH SYNC MODE;
select count(1) from test_mv2;
-- result:
20
-- !result
drop materialized view default_catalog.db_${uuid0}.test_mv1;
-- result:
-- !result
drop materialized view default_catalog.db_${uuid0}.test_mv2;
-- result:
-- !result
drop table mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0}.t1 force;
-- result:
-- !result
drop database default_catalog.db_${uuid0} force;
-- result:
-- !result
drop database mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0} force;
-- result:
-- !result

View File

@ -0,0 +1,53 @@
-- name: test_create_mv_with_iceberg
create external catalog mv_iceberg_${uuid0}
properties
(
"type" = "iceberg",
"iceberg.catalog.type" = "hive",
"hive.metastore.uris" = "${iceberg_catalog_hive_metastore_uris}"
);
-- create iceberg table
set catalog mv_iceberg_${uuid0};
create database mv_iceberg_db_${uuid0};
use mv_iceberg_db_${uuid0};
CREATE TABLE t1 (
num int,
dt date
)
PARTITION BY (dt);
INSERT INTO t1 VALUES
(1,"2020-06-15"),(2,"2020-06-18"),(3,"2020-06-21"),(4,"2020-06-24"),
(1,"2020-07-02"),(2,"2020-07-05"),(3,"2020-07-08"),(4,"2020-07-11"),
(1,"2020-07-16"),(2,"2020-07-19"),(3,"2020-07-22"),(4,"2020-07-25"),
(2,"2020-06-15"),(3,"2020-06-18"),(4,"2020-06-21"),(5,"2020-06-24"),
(2,"2020-07-02"),(3,"2020-07-05"),(4,"2020-07-08"),(5,"2020-07-11");
-- create mv
set catalog default_catalog;
create database db_${uuid0};
use db_${uuid0};
set new_planner_optimize_timeout=10000;
CREATE MATERIALIZED VIEW test_mv1
PARTITION BY date_trunc('month', dt)
REFRESH DEFERRED MANUAL
PROPERTIES ("replication_num" = "1")
AS SELECT v1.num, v1.dt from mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0}.t1 as v1;
REFRESH MATERIALIZED VIEW test_mv1 WITH SYNC MODE;
select count(1) from test_mv1;
CREATE MATERIALIZED VIEW test_mv2
PARTITION BY date_trunc('month', dt)
REFRESH DEFERRED MANUAL
PROPERTIES ("replication_num" = "1")
AS SELECT v1.num, v1.dt from test_mv1 as v1;
REFRESH MATERIALIZED VIEW test_mv2 WITH SYNC MODE;
select count(1) from test_mv2;
drop materialized view default_catalog.db_${uuid0}.test_mv1;
drop materialized view default_catalog.db_${uuid0}.test_mv2;
drop table mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0}.t1 force;
drop database default_catalog.db_${uuid0} force;
drop database mv_iceberg_${uuid0}.mv_iceberg_db_${uuid0} force;