@@ -251,93 +251,93 @@ TEST_F(PrintPlanWithStatsTest, innerJoinWithTableScan) {
251251 {" runningGetOutputWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" }});
252252}
253253
254- TEST_F (PrintPlanWithStatsTest, partialAggregateWithTableScan) {
255- RowTypePtr rowType{
256- ROW ({" c0" , " c1" , " c2" , " c3" , " c4" , " c5" },
257- {BIGINT (), INTEGER (), SMALLINT (), REAL (), DOUBLE (), VARCHAR ()})};
258- auto vectors = makeVectors (rowType, 10 , 1'000 );
259- createDuckDbTable (vectors);
260-
261- const std::vector<int32_t > numPrefetchSplits = {0 , 2 };
262- for (const auto & numPrefetchSplit : numPrefetchSplits) {
263- SCOPED_TRACE (fmt::format (" numPrefetchSplit {}" , numPrefetchSplit));
264- asyncDataCache_->clear ();
265- auto filePath = TempFilePath::create ();
266- writeToFile (filePath->getPath (), vectors);
267-
268- auto op =
269- PlanBuilder ()
270- .tableScan (rowType)
271- .partialAggregation (
272- {" c5" }, {" max(c0)" , " sum(c1)" , " sum(c2)" , " sum(c3)" , " sum(c4)" })
273- .planNode ();
274-
275- auto task =
276- AssertQueryBuilder (op, duckDbQueryRunner_)
277- .config (
278- core::QueryConfig::kMaxSplitPreloadPerDriver ,
279- std::to_string (numPrefetchSplit))
280- .splits (makeHiveConnectorSplits ({filePath}))
281- .assertResults (
282- " SELECT c5, max(c0), sum(c1), sum(c2), sum(c3), sum(c4) FROM tmp group by c5" );
283- ensureTaskCompletion (task.get ());
284- compareOutputs (
285- ::testing::UnitTest::GetInstance ()->current_test_info()->name(),
286- printPlanWithStats(*op, task->taskStats ()),
287- {{" -- Aggregation\\ [1\\ ]\\ [PARTIAL \\ [c5\\ ] a0 := max\\ (ROW\\ [\" c0\"\\ ]\\ ), a1 := sum\\ (ROW\\ [\" c1\"\\ ]\\ ), a2 := sum\\ (ROW\\ [\" c2\"\\ ]\\ ), a3 := sum\\ (ROW\\ [\" c3\"\\ ]\\ ), a4 := sum\\ (ROW\\ [\" c4\"\\ ]\\ )\\ ] -> c5:VARCHAR, a0:BIGINT, a1:BIGINT, a2:BIGINT, a3:DOUBLE, a4:DOUBLE" },
288- {" Output: .+, Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)" },
289- {" -- TableScan\\ [0\\ ]\\ [table: hive_table\\ ] -> c0:BIGINT, c1:INTEGER, c2:SMALLINT, c3:REAL, c4:DOUBLE, c5:VARCHAR" },
290- {" Input: 10000 rows \\ (.+\\ ), Output: 10000 rows \\ (.+\\ ), Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, Splits: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)" }});
291-
292- compareOutputs (
293- ::testing::UnitTest::GetInstance ()->current_test_info()->name(),
294- printPlanWithStats(*op, task->taskStats (), true),
295- {{" -- Aggregation\\ [1\\ ]\\ [PARTIAL \\ [c5\\ ] a0 := max\\ (ROW\\ [\" c0\"\\ ]\\ ), a1 := sum\\ (ROW\\ [\" c1\"\\ ]\\ ), a2 := sum\\ (ROW\\ [\" c2\"\\ ]\\ ), a3 := sum\\ (ROW\\ [\" c3\"\\ ]\\ ), a4 := sum\\ (ROW\\ [\" c4\"\\ ]\\ )\\ ] -> c5:VARCHAR, a0:BIGINT, a1:BIGINT, a2:BIGINT, a3:DOUBLE, a4:DOUBLE" },
296- {" Output: .+, Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)" },
297- {" dataSourceLazyCpuNanos\\ s+sum: .+, count: .+, min: .+, max: .+" },
298- {" dataSourceLazyInputBytes\\ s+sum: .+, count: .+, min: .+, max: .+" },
299- {" dataSourceLazyWallNanos\\ s+sum: .+, count: .+, min: .+, max: .+" },
300- {" distinctKey0\\ s+sum: .+, count: 1, min: .+, max: .+" },
301- {" driverCpuTimeNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
302- {" hashtable.capacity\\ s+sum: (?:1273|1252), count: 1, min: (?:1273|1252), max: (?:1273|1252), avg: (?:1273|1252)" },
303- {" hashtable.numDistinct\\ s+sum: (?:849|835), count: 1, min: (?:849|835), max: (?:849|835), avg: (?:849|835)" },
304- {" hashtable.numRehashes\\ s+sum: 1, count: 1, min: 1, max: 1, avg: 1" },
305- {" hashtable.numTombstones\\ s+sum: 0, count: 1, min: 0, max: 0, avg: 0" },
306- {" loadedToValueHook\\ s+sum: 50000, count: 5, min: 10000, max: 10000, avg: 10000" },
307- {" runningAddInputWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
308- {" runningFinishWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
309- {" runningGetOutputWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
310- {" -- TableScan\\ [0\\ ]\\ [table: hive_table\\ ] -> c0:BIGINT, c1:INTEGER, c2:SMALLINT, c3:REAL, c4:DOUBLE, c5:VARCHAR" },
311- {" Input: 10000 rows \\ (.+\\ ), Output: 10000 rows \\ (.+\\ ), Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, Splits: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)" },
312- {" connectorSplitSize[ ]* sum: .+, count: .+, min: .+, max: .+" },
313- {" dataSourceAddSplitWallNanos[ ]* sum: .+, count: 1, min: .+, max: .+" },
314- {" dataSourceReadWallNanos[ ]* sum: .+, count: .+, min: .+, max: .+" },
315- {" driverCpuTimeNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
316- {" footerBufferOverread[ ]* sum: .+, count: 1, min: .+, max: .+" },
317- {" ioWaitWallNanos [ ]* sum: .+, count: .+ min: .+, max: .+" },
318- {" numPrefetch [ ]* sum: .+, count: .+, min: .+, max: .+" },
319- {" numRamRead [ ]* sum: 7, count: 1, min: 7, max: 7, avg: 7" },
320- {" numStripes[ ]* sum: .+, count: 1, min: .+, max: .+" },
321- {" overreadBytes[ ]* sum: 0B, count: 1, min: 0B, max: 0B, avg: 0B" },
322-
323- {" prefetchBytes [ ]* sum: .+, count: 1, min: .+, max: .+" },
324- {" processedSplits [ ]* sum: 1, count: 1, min: 1, max: 1, avg: 1" },
325- {" processedStrides [ ]* sum: 1, count: 1, min: 1, max: 1, avg: 1" },
326- {" processedUnits [ ]* sum: .+, count: .+, min: .+, max: .+" },
327- {" preloadedSplits[ ]+sum: .+, count: .+, min: .+, max: .+" ,
328- true },
329- {" ramReadBytes [ ]* sum: .+, count: .+, min: .+, max: .+" },
330- {" readyPreloadedSplits[ ]+sum: .+, count: .+, min: .+, max: .+" ,
331- true },
332- {" runningAddInputWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
333- {" runningFinishWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
334- {" runningGetOutputWallNanos\\ s+sum: .+, count: 1, min: .+, max: .+" },
335- {" storageReadBytes [ ]* sum: .+, count: .+, min: .+, max: .+" },
336- {" totalRemainingFilterWallNanos\\ s+sum: .+, count: .+, min: .+, max: .+" },
337- {" totalScanTime [ ]* sum: .+, count: .+, min: .+, max: .+" },
338- {" unitLoadNanos[ ]* sum: .+, count: .+, min: .+, max: .+, avg: .+" }});
339- }
340- }
254+ // TEST_F(PrintPlanWithStatsTest, partialAggregateWithTableScan) {
255+ // RowTypePtr rowType{
256+ // ROW({"c0", "c1", "c2", "c3", "c4", "c5"},
257+ // {BIGINT(), INTEGER(), SMALLINT(), REAL(), DOUBLE(), VARCHAR()})};
258+ // auto vectors = makeVectors(rowType, 10, 1'000);
259+ // createDuckDbTable(vectors);
260+
261+ // const std::vector<int32_t> numPrefetchSplits = {0, 2};
262+ // for (const auto& numPrefetchSplit : numPrefetchSplits) {
263+ // SCOPED_TRACE(fmt::format("numPrefetchSplit {}", numPrefetchSplit));
264+ // asyncDataCache_->clear();
265+ // auto filePath = TempFilePath::create();
266+ // writeToFile(filePath->getPath(), vectors);
267+
268+ // auto op =
269+ // PlanBuilder()
270+ // .tableScan(rowType)
271+ // .partialAggregation(
272+ // {"c5"}, {"max(c0)", "sum(c1)", "sum(c2)", "sum(c3)", "sum(c4)"})
273+ // .planNode();
274+
275+ // auto task =
276+ // AssertQueryBuilder(op, duckDbQueryRunner_)
277+ // .config(
278+ // core::QueryConfig::kMaxSplitPreloadPerDriver,
279+ // std::to_string(numPrefetchSplit))
280+ // .splits(makeHiveConnectorSplits({filePath}))
281+ // .assertResults(
282+ // "SELECT c5, max(c0), sum(c1), sum(c2), sum(c3), sum(c4) FROM tmp group by c5");
283+ // ensureTaskCompletion(task.get());
284+ // compareOutputs(
285+ // ::testing::UnitTest::GetInstance()->current_test_info()->name(),
286+ // printPlanWithStats(*op, task->taskStats()),
287+ // {{"-- Aggregation\\[1\\]\\[PARTIAL \\[c5\\] a0 := max\\(ROW\\[\"c0\"\\]\\), a1 := sum\\(ROW\\[\"c1\"\\]\\), a2 := sum\\(ROW\\[\"c2\"\\]\\), a3 := sum\\(ROW\\[\"c3\"\\]\\), a4 := sum\\(ROW\\[\"c4\"\\]\\)\\] -> c5:VARCHAR, a0:BIGINT, a1:BIGINT, a2:BIGINT, a3:DOUBLE, a4:DOUBLE"},
288+ // {" Output: .+, Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)"},
289+ // {" -- TableScan\\[0\\]\\[table: hive_table\\] -> c0:BIGINT, c1:INTEGER, c2:SMALLINT, c3:REAL, c4:DOUBLE, c5:VARCHAR"},
290+ // {" Input: 10000 rows \\(.+\\), Output: 10000 rows \\(.+\\), Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, Splits: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)"}});
291+
292+ // compareOutputs(
293+ // ::testing::UnitTest::GetInstance()->current_test_info()->name(),
294+ // printPlanWithStats(*op, task->taskStats(), true),
295+ // {{"-- Aggregation\\[1\\]\\[PARTIAL \\[c5\\] a0 := max\\(ROW\\[\"c0\"\\]\\), a1 := sum\\(ROW\\[\"c1\"\\]\\), a2 := sum\\(ROW\\[\"c2\"\\]\\), a3 := sum\\(ROW\\[\"c3\"\\]\\), a4 := sum\\(ROW\\[\"c4\"\\]\\)\\] -> c5:VARCHAR, a0:BIGINT, a1:BIGINT, a2:BIGINT, a3:DOUBLE, a4:DOUBLE"},
296+ // {" Output: .+, Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)"},
297+ // {" dataSourceLazyCpuNanos\\s+sum: .+, count: .+, min: .+, max: .+"},
298+ // {" dataSourceLazyInputBytes\\s+sum: .+, count: .+, min: .+, max: .+"},
299+ // {" dataSourceLazyWallNanos\\s+sum: .+, count: .+, min: .+, max: .+"},
300+ // {" distinctKey0\\s+sum: .+, count: 1, min: .+, max: .+"},
301+ // {" driverCpuTimeNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
302+ // {" hashtable.capacity\\s+sum: (?:1273|1252), count: 1, min: (?:1273|1252), max: (?:1273|1252), avg: (?:1273|1252)"},
303+ // {" hashtable.numDistinct\\s+sum: (?:849|835), count: 1, min: (?:849|835), max: (?:849|835), avg: (?:849|835)"},
304+ // {" hashtable.numRehashes\\s+sum: 1, count: 1, min: 1, max: 1, avg: 1"},
305+ // {" hashtable.numTombstones\\s+sum: 0, count: 1, min: 0, max: 0, avg: 0"},
306+ // {" loadedToValueHook\\s+sum: 50000, count: 5, min: 10000, max: 10000, avg: 10000"},
307+ // {" runningAddInputWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
308+ // {" runningFinishWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
309+ // {" runningGetOutputWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
310+ // {" -- TableScan\\[0\\]\\[table: hive_table\\] -> c0:BIGINT, c1:INTEGER, c2:SMALLINT, c3:REAL, c4:DOUBLE, c5:VARCHAR"},
311+ // {" Input: 10000 rows \\(.+\\), Output: 10000 rows \\(.+\\), Cpu time: .+, Blocked wall time: .+, Peak memory: .+, Memory allocations: .+, Threads: 1, Splits: 1, CPU breakdown: B/I/O/F (.+/.+/.+/.+)"},
312+ // {" connectorSplitSize[ ]* sum: .+, count: .+, min: .+, max: .+"},
313+ // {" dataSourceAddSplitWallNanos[ ]* sum: .+, count: 1, min: .+, max: .+"},
314+ // {" dataSourceReadWallNanos[ ]* sum: .+, count: .+, min: .+, max: .+"},
315+ // {" driverCpuTimeNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
316+ // {" footerBufferOverread[ ]* sum: .+, count: 1, min: .+, max: .+"},
317+ // {" ioWaitWallNanos [ ]* sum: .+, count: .+ min: .+, max: .+"},
318+ // {" numPrefetch [ ]* sum: .+, count: .+, min: .+, max: .+"},
319+ // {" numRamRead [ ]* sum: 7, count: 1, min: 7, max: 7, avg: 7"},
320+ // {" numStripes[ ]* sum: .+, count: 1, min: .+, max: .+"},
321+ // {" overreadBytes[ ]* sum: 0B, count: 1, min: 0B, max: 0B, avg: 0B"},
322+
323+ // {" prefetchBytes [ ]* sum: .+, count: 1, min: .+, max: .+"},
324+ // {" processedSplits [ ]* sum: 1, count: 1, min: 1, max: 1, avg: 1"},
325+ // {" processedStrides [ ]* sum: 1, count: 1, min: 1, max: 1, avg: 1"},
326+ // {" processedUnits [ ]* sum: .+, count: .+, min: .+, max: .+"},
327+ // {" preloadedSplits[ ]+sum: .+, count: .+, min: .+, max: .+",
328+ // true},
329+ // {" ramReadBytes [ ]* sum: .+, count: .+, min: .+, max: .+"},
330+ // {" readyPreloadedSplits[ ]+sum: .+, count: .+, min: .+, max: .+",
331+ // true},
332+ // {" runningAddInputWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
333+ // {" runningFinishWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
334+ // {" runningGetOutputWallNanos\\s+sum: .+, count: 1, min: .+, max: .+"},
335+ // {" storageReadBytes [ ]* sum: .+, count: .+, min: .+, max: .+"},
336+ // {" totalRemainingFilterWallNanos\\s+sum: .+, count: .+, min: .+, max: .+"},
337+ // {" totalScanTime [ ]* sum: .+, count: .+, min: .+, max: .+"},
338+ // {" unitLoadNanos[ ]* sum: .+, count: .+, min: .+, max: .+, avg: .+"}});
339+ // }
340+ // }
341341
342342TEST_F (PrintPlanWithStatsTest, tableWriterWithTableScan) {
343343 RowTypePtr rowType{
0 commit comments