diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 98febd914c2724..8c5fc5e3a98e42 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,63 @@ -## Proposed changes +### What problem does this PR solve? + + + Issue Number: close #xxx - + +Related PR: #xxx + +Problem Summary: + +### Check List (For Committer) + +- Test + + - [ ] Regression test + - [ ] Unit Test + - [ ] Manual test (add detailed scripts or steps below) + - [ ] No need to test or manual test. Explain why: + - [ ] This is a refactor/code format and no logic has been changed. + - [ ] Previous test can cover this change. + - [ ] No colde files have been changed. + - [ ] Other reason + +- Behavior changed: + + - [ ] No. + - [ ] Yes. + +- Does this need documentation? + + - [ ] No. + - [ ] Yes. + +- Release note + + + + None + +### Check List (For Reviewer who merge this PR) + +- [ ] Confirm the release note +- [ ] Confirm test cases +- [ ] Confirm document +- [ ] Add branch pick label diff --git a/.github/workflows/auto-cherry-pick.yml b/.github/workflows/auto-cherry-pick.yml index 4ee2614f0c952f..7d97e498ba3263 100644 --- a/.github/workflows/auto-cherry-pick.yml +++ b/.github/workflows/auto-cherry-pick.yml @@ -45,7 +45,7 @@ jobs: pip install PyGithub - name: Check SHA run: | - expected_sha="1941de05514e15c216067778e0287b4c3ebcd6f6042ee189a12257bfd0cdd9f764e18c7dae5de868e9b7128ce3be98dc8f78252932cee7d55552fc0cf8b69496" + expected_sha="80b7c6087f2a3e4f4c7f035a52e8e7b05ce00f27aa5c1bd52179df685c912447f94a96145fd3204a3958d8ed9777de5a5183b120e99e0e95bbca0366d69b0ac0" calculated_sha=$(sha512sum tools/auto-pick-script.py | awk '{ print $1 }') if [ "$calculated_sha" != "$expected_sha" ]; then echo "SHA mismatch! Expected: $expected_sha, but got: $calculated_sha" diff --git a/be/CMakeLists.txt b/be/CMakeLists.txt index d617aa173d93f8..1d79048f96511c 100644 --- a/be/CMakeLists.txt +++ b/be/CMakeLists.txt @@ -342,6 +342,10 @@ if (ENABLE_INJECTION_POINT) set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DENABLE_INJECTION_POINT") endif() +if (ENABLE_CACHE_LOCK_DEBUG) + set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DENABLE_CACHE_LOCK_DEBUG") +endif() + # Enable memory tracker, which allows BE to limit the memory of tasks such as query, load, # and compaction,and observe the memory of BE through be_ip:http_port/MemTracker. # Adding the option `USE_MEM_TRACKER=OFF sh build.sh` when compiling can turn off the memory tracker, @@ -782,6 +786,7 @@ install(DIRECTORY DESTINATION ${OUTPUT_DIR}/conf) install(FILES ${BASE_DIR}/../bin/start_be.sh ${BASE_DIR}/../bin/stop_be.sh + ${BASE_DIR}/../tools/jeprof PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_WRITE GROUP_EXECUTE WORLD_READ WORLD_EXECUTE diff --git a/be/src/cloud/cloud_base_compaction.cpp b/be/src/cloud/cloud_base_compaction.cpp index f431eaf850bbd1..88d83000e95dfa 100644 --- a/be/src/cloud/cloud_base_compaction.cpp +++ b/be/src/cloud/cloud_base_compaction.cpp @@ -124,7 +124,8 @@ Status CloudBaseCompaction::prepare_compact() { for (auto& rs : _input_rowsets) { _input_row_num += rs->num_rows(); _input_segments += rs->num_segments(); - _input_rowsets_size += rs->data_disk_size(); + _input_rowsets_data_size += rs->data_disk_size(); + _input_rowsets_total_size += rs->total_disk_size(); } LOG_INFO("start CloudBaseCompaction, tablet_id={}, range=[{}-{}]", _tablet->tablet_id(), _input_rowsets.front()->start_version(), _input_rowsets.back()->end_version()) @@ -132,7 +133,9 @@ Status CloudBaseCompaction::prepare_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size); + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size); return st; } @@ -270,17 +273,21 @@ Status CloudBaseCompaction::execute_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size) + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total", _input_rowsets_total_size) .tag("output_rows", _output_rowset->num_rows()) .tag("output_segments", _output_rowset->num_segments()) - .tag("output_data_size", _output_rowset->data_disk_size()); + .tag("output_rowset_data_size", _output_rowset->data_disk_size()) + .tag("output_rowset_index_size", _output_rowset->index_disk_size()) + .tag("output_rowset_total_size", _output_rowset->total_disk_size()); //_compaction_succeed = true; _state = CompactionState::SUCCESS; DorisMetrics::instance()->base_compaction_deltas_total->increment(_input_rowsets.size()); - DorisMetrics::instance()->base_compaction_bytes_total->increment(_input_rowsets_size); - base_output_size << _output_rowset->data_disk_size(); + DorisMetrics::instance()->base_compaction_bytes_total->increment(_input_rowsets_total_size); + base_output_size << _output_rowset->total_disk_size(); return Status::OK(); } @@ -302,8 +309,8 @@ Status CloudBaseCompaction::modify_rowsets() { compaction_job->set_output_cumulative_point(cloud_tablet()->cumulative_layer_point()); compaction_job->set_num_input_rows(_input_row_num); compaction_job->set_num_output_rows(_output_rowset->num_rows()); - compaction_job->set_size_input_rowsets(_input_rowsets_size); - compaction_job->set_size_output_rowsets(_output_rowset->data_disk_size()); + compaction_job->set_size_input_rowsets(_input_rowsets_total_size); + compaction_job->set_size_output_rowsets(_output_rowset->total_disk_size()); compaction_job->set_num_input_segments(_input_segments); compaction_job->set_num_output_segments(_output_rowset->num_segments()); compaction_job->set_num_input_rowsets(_input_rowsets.size()); diff --git a/be/src/cloud/cloud_cumulative_compaction.cpp b/be/src/cloud/cloud_cumulative_compaction.cpp index aad1bd7bfe7d2d..8eb92577693487 100644 --- a/be/src/cloud/cloud_cumulative_compaction.cpp +++ b/be/src/cloud/cloud_cumulative_compaction.cpp @@ -164,7 +164,9 @@ Status CloudCumulativeCompaction::prepare_compact() { for (auto& rs : _input_rowsets) { _input_row_num += rs->num_rows(); _input_segments += rs->num_segments(); - _input_rowsets_size += rs->data_disk_size(); + _input_rowsets_data_size += rs->data_disk_size(); + _input_rowsets_index_size += rs->index_disk_size(); + _input_rowsets_total_size += rs->total_disk_size(); } LOG_INFO("start CloudCumulativeCompaction, tablet_id={}, range=[{}-{}]", _tablet->tablet_id(), _input_rowsets.front()->start_version(), _input_rowsets.back()->end_version()) @@ -172,7 +174,9 @@ Status CloudCumulativeCompaction::prepare_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size) + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size) .tag("tablet_max_version", cloud_tablet()->max_version_unlocked()) .tag("cumulative_point", cloud_tablet()->cumulative_layer_point()) .tag("num_rowsets", cloud_tablet()->fetch_add_approximate_num_rowsets(0)) @@ -201,10 +205,14 @@ Status CloudCumulativeCompaction::execute_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size) + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size) .tag("output_rows", _output_rowset->num_rows()) .tag("output_segments", _output_rowset->num_segments()) - .tag("output_data_size", _output_rowset->data_disk_size()) + .tag("output_rowset_data_size", _output_rowset->data_disk_size()) + .tag("output_rowset_index_size", _output_rowset->index_disk_size()) + .tag("output_rowset_total_size", _output_rowset->total_disk_size()) .tag("tablet_max_version", _tablet->max_version_unlocked()) .tag("cumulative_point", cloud_tablet()->cumulative_layer_point()) .tag("num_rowsets", cloud_tablet()->fetch_add_approximate_num_rowsets(0)) @@ -213,8 +221,9 @@ Status CloudCumulativeCompaction::execute_compact() { _state = CompactionState::SUCCESS; DorisMetrics::instance()->cumulative_compaction_deltas_total->increment(_input_rowsets.size()); - DorisMetrics::instance()->cumulative_compaction_bytes_total->increment(_input_rowsets_size); - cumu_output_size << _output_rowset->data_disk_size(); + DorisMetrics::instance()->cumulative_compaction_bytes_total->increment( + _input_rowsets_total_size); + cumu_output_size << _output_rowset->total_disk_size(); return Status::OK(); } @@ -243,8 +252,8 @@ Status CloudCumulativeCompaction::modify_rowsets() { compaction_job->set_output_cumulative_point(new_cumulative_point); compaction_job->set_num_input_rows(_input_row_num); compaction_job->set_num_output_rows(_output_rowset->num_rows()); - compaction_job->set_size_input_rowsets(_input_rowsets_size); - compaction_job->set_size_output_rowsets(_output_rowset->data_disk_size()); + compaction_job->set_size_input_rowsets(_input_rowsets_total_size); + compaction_job->set_size_output_rowsets(_output_rowset->total_disk_size()); compaction_job->set_num_input_segments(_input_segments); compaction_job->set_num_output_segments(_output_rowset->num_segments()); compaction_job->set_num_input_rowsets(_input_rowsets.size()); @@ -351,7 +360,8 @@ Status CloudCumulativeCompaction::modify_rowsets() { stats.num_rows(), stats.data_size()); } } - if (_tablet->keys_type() == KeysType::UNIQUE_KEYS && + if (config::enable_delete_bitmap_merge_on_compaction && + _tablet->keys_type() == KeysType::UNIQUE_KEYS && _tablet->enable_unique_key_merge_on_write() && _input_rowsets.size() != 1) { process_old_version_delete_bitmap(); } diff --git a/be/src/cloud/cloud_cumulative_compaction_policy.cpp b/be/src/cloud/cloud_cumulative_compaction_policy.cpp index f9af469e56f60a..5a9879387b2327 100644 --- a/be/src/cloud/cloud_cumulative_compaction_policy.cpp +++ b/be/src/cloud/cloud_cumulative_compaction_policy.cpp @@ -209,7 +209,7 @@ int64_t CloudSizeBasedCumulativeCompactionPolicy::new_cumulative_point( // if rowsets have no delete version, check output_rowset total disk size satisfies promotion size. return output_rowset->start_version() == last_cumulative_point && (last_delete_version.first != -1 || - output_rowset->data_disk_size() >= cloud_promotion_size(tablet) || + output_rowset->total_disk_size() >= cloud_promotion_size(tablet) || satisfy_promotion_version) ? output_rowset->end_version() + 1 : last_cumulative_point; diff --git a/be/src/cloud/cloud_delete_bitmap_action.cpp b/be/src/cloud/cloud_delete_bitmap_action.cpp index 60db5896dfab8a..672574a5aa8901 100644 --- a/be/src/cloud/cloud_delete_bitmap_action.cpp +++ b/be/src/cloud/cloud_delete_bitmap_action.cpp @@ -95,6 +95,8 @@ Status CloudDeleteBitmapAction::_handle_show_delete_bitmap_count(HttpRequest* re auto count = tablet->tablet_meta()->delete_bitmap().get_delete_bitmap_count(); auto cardinality = tablet->tablet_meta()->delete_bitmap().cardinality(); auto size = tablet->tablet_meta()->delete_bitmap().get_size(); + LOG(INFO) << "show_delete_bitmap_count,tablet_id=" << tablet_id << ",count=" << count + << ",cardinality=" << cardinality << ",size=" << size; rapidjson::Document root; root.SetObject(); diff --git a/be/src/cloud/cloud_full_compaction.cpp b/be/src/cloud/cloud_full_compaction.cpp index 2e11891045c250..c27b728c93d29b 100644 --- a/be/src/cloud/cloud_full_compaction.cpp +++ b/be/src/cloud/cloud_full_compaction.cpp @@ -98,7 +98,9 @@ Status CloudFullCompaction::prepare_compact() { for (auto& rs : _input_rowsets) { _input_row_num += rs->num_rows(); _input_segments += rs->num_segments(); - _input_rowsets_size += rs->data_disk_size(); + _input_rowsets_data_size += rs->data_disk_size(); + _input_rowsets_index_size += rs->index_disk_size(); + _input_rowsets_total_size += rs->total_disk_size(); } LOG_INFO("start CloudFullCompaction, tablet_id={}, range=[{}-{}]", _tablet->tablet_id(), _input_rowsets.front()->start_version(), _input_rowsets.back()->end_version()) @@ -106,7 +108,9 @@ Status CloudFullCompaction::prepare_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size); + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size); return st; } @@ -162,16 +166,20 @@ Status CloudFullCompaction::execute_compact() { .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size) + .tag("input_rowsets_data_size", _input_rowsets_data_size) + .tag("input_rowsets_index_size", _input_rowsets_index_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size) .tag("output_rows", _output_rowset->num_rows()) .tag("output_segments", _output_rowset->num_segments()) - .tag("output_data_size", _output_rowset->data_disk_size()); + .tag("output_rowset_data_size", _output_rowset->data_disk_size()) + .tag("output_rowset_index_size", _output_rowset->index_disk_size()) + .tag("output_rowset_total_size", _output_rowset->total_disk_size()); _state = CompactionState::SUCCESS; DorisMetrics::instance()->full_compaction_deltas_total->increment(_input_rowsets.size()); - DorisMetrics::instance()->full_compaction_bytes_total->increment(_input_rowsets_size); - full_output_size << _output_rowset->data_disk_size(); + DorisMetrics::instance()->full_compaction_bytes_total->increment(_input_rowsets_total_size); + full_output_size << _output_rowset->total_disk_size(); return Status::OK(); } @@ -193,8 +201,12 @@ Status CloudFullCompaction::modify_rowsets() { compaction_job->set_output_cumulative_point(_output_rowset->end_version() + 1); compaction_job->set_num_input_rows(_input_row_num); compaction_job->set_num_output_rows(_output_rowset->num_rows()); - compaction_job->set_size_input_rowsets(_input_rowsets_size); - compaction_job->set_size_output_rowsets(_output_rowset->data_disk_size()); + compaction_job->set_size_input_rowsets(_input_rowsets_total_size); + compaction_job->set_size_output_rowsets(_output_rowset->total_disk_size()); + DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_compaction_data_size", { + compaction_job->set_size_input_rowsets(1); + compaction_job->set_size_output_rowsets(10000001); + }) compaction_job->set_num_input_segments(_input_segments); compaction_job->set_num_output_segments(_output_rowset->num_segments()); compaction_job->set_num_input_rowsets(_input_rowsets.size()); @@ -341,7 +353,7 @@ Status CloudFullCompaction::_cloud_full_compaction_update_delete_bitmap(int64_t .tag("input_rowsets", _input_rowsets.size()) .tag("input_rows", _input_row_num) .tag("input_segments", _input_segments) - .tag("input_data_size", _input_rowsets_size) + .tag("input_rowsets_total_size", _input_rowsets_total_size) .tag("update_bitmap_size", delete_bitmap->delete_bitmap.size()); _tablet->tablet_meta()->delete_bitmap().merge(*delete_bitmap); return Status::OK(); diff --git a/be/src/cloud/cloud_meta_mgr.cpp b/be/src/cloud/cloud_meta_mgr.cpp index 02497f6a044b91..57f3c7f80098d8 100644 --- a/be/src/cloud/cloud_meta_mgr.cpp +++ b/be/src/cloud/cloud_meta_mgr.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,7 @@ #include "olap/olap_common.h" #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_factory.h" +#include "olap/rowset/rowset_fwd.h" #include "olap/storage_engine.h" #include "olap/tablet_meta.h" #include "runtime/client_cache.h" @@ -750,6 +752,7 @@ Status CloudMetaMgr::commit_rowset(const RowsetMeta& rs_meta, Status ret_st; TEST_INJECTION_POINT_RETURN_WITH_VALUE("CloudMetaMgr::commit_rowset", ret_st); } + check_table_size_correctness(rs_meta); CreateRowsetRequest req; CreateRowsetResponse resp; req.set_cloud_unique_id(config::cloud_unique_id); @@ -1051,7 +1054,8 @@ Status CloudMetaMgr::update_delete_bitmap(const CloudTablet& tablet, int64_t loc Status CloudMetaMgr::update_delete_bitmap_without_lock(const CloudTablet& tablet, DeleteBitmap* delete_bitmap) { - VLOG_DEBUG << "update_delete_bitmap_without_lock , tablet_id: " << tablet.tablet_id(); + LOG(INFO) << "update_delete_bitmap_without_lock , tablet_id: " << tablet.tablet_id() + << ",delete_bitmap size:" << delete_bitmap->delete_bitmap.size(); UpdateDeleteBitmapRequest req; UpdateDeleteBitmapResponse res; req.set_cloud_unique_id(config::cloud_unique_id); @@ -1124,4 +1128,124 @@ Status CloudMetaMgr::remove_old_version_delete_bitmap( return st; } +void CloudMetaMgr::check_table_size_correctness(const RowsetMeta& rs_meta) { + if (!config::enable_table_size_correctness_check) { + return; + } + int64_t total_segment_size = get_segment_file_size(rs_meta); + int64_t total_inverted_index_size = get_inverted_index_file_szie(rs_meta); + if (rs_meta.data_disk_size() != total_segment_size || + rs_meta.index_disk_size() != total_inverted_index_size || + rs_meta.data_disk_size() + rs_meta.index_disk_size() != rs_meta.total_disk_size()) { + LOG(WARNING) << "[Cloud table table size check failed]:" + << " tablet id: " << rs_meta.tablet_id() + << ", rowset id:" << rs_meta.rowset_id() + << ", rowset data disk size:" << rs_meta.data_disk_size() + << ", rowset real data disk size:" << total_segment_size + << ", rowset index disk size:" << rs_meta.index_disk_size() + << ", rowset real index disk size:" << total_inverted_index_size + << ", rowset total disk size:" << rs_meta.total_disk_size() + << ", rowset segment path:" + << StorageResource().remote_segment_path(rs_meta.tablet_id(), + rs_meta.rowset_id().to_string(), 0); + DCHECK(false); + } +} + +int64_t CloudMetaMgr::get_segment_file_size(const RowsetMeta& rs_meta) { + int64_t total_segment_size = 0; + const auto fs = const_cast(rs_meta).fs(); + if (!fs) { + LOG(WARNING) << "get fs failed, resource_id={}" << rs_meta.resource_id(); + } + for (int64_t seg_id = 0; seg_id < rs_meta.num_segments(); seg_id++) { + std::string segment_path = StorageResource().remote_segment_path( + rs_meta.tablet_id(), rs_meta.rowset_id().to_string(), seg_id); + int64_t segment_file_size = 0; + auto st = fs->file_size(segment_path, &segment_file_size); + if (!st.ok()) { + segment_file_size = 0; + if (st.is()) { + LOG(INFO) << "cloud table size correctness check get segment size 0 because " + "file not exist! msg:" + << st.msg() << ", segment path:" << segment_path; + } else { + LOG(WARNING) << "cloud table size correctness check get segment size failed! msg:" + << st.msg() << ", segment path:" << segment_path; + } + } + total_segment_size += segment_file_size; + } + return total_segment_size; +} + +int64_t CloudMetaMgr::get_inverted_index_file_szie(const RowsetMeta& rs_meta) { + int64_t total_inverted_index_size = 0; + const auto fs = const_cast(rs_meta).fs(); + if (!fs) { + LOG(WARNING) << "get fs failed, resource_id={}" << rs_meta.resource_id(); + } + if (rs_meta.tablet_schema()->get_inverted_index_storage_format() == + InvertedIndexStorageFormatPB::V1) { + auto indices = rs_meta.tablet_schema()->indexes(); + for (auto& index : indices) { + // only get file_size for inverted index + if (index.index_type() != IndexType::INVERTED) { + continue; + } + for (int seg_id = 0; seg_id < rs_meta.num_segments(); ++seg_id) { + std::string segment_path = StorageResource().remote_segment_path( + rs_meta.tablet_id(), rs_meta.rowset_id().to_string(), seg_id); + int64_t file_size = 0; + + std::string inverted_index_file_path = + InvertedIndexDescriptor::get_index_file_path_v1( + InvertedIndexDescriptor::get_index_file_path_prefix(segment_path), + index.index_id(), index.get_index_suffix()); + auto st = fs->file_size(inverted_index_file_path, &file_size); + if (!st.ok()) { + file_size = 0; + if (st.is()) { + LOG(INFO) << "cloud table size correctness check get inverted index v1 " + "0 because file not exist! msg:" + << st.msg() + << ", inverted index path:" << inverted_index_file_path; + } else { + LOG(WARNING) + << "cloud table size correctness check get inverted index v1 " + "size failed! msg:" + << st.msg() << ", inverted index path:" << inverted_index_file_path; + } + } + total_inverted_index_size += file_size; + } + } + } else { + for (int seg_id = 0; seg_id < rs_meta.num_segments(); ++seg_id) { + int64_t file_size = 0; + std::string segment_path = StorageResource().remote_segment_path( + rs_meta.tablet_id(), rs_meta.rowset_id().to_string(), seg_id); + + std::string inverted_index_file_path = InvertedIndexDescriptor::get_index_file_path_v2( + InvertedIndexDescriptor::get_index_file_path_prefix(segment_path)); + auto st = fs->file_size(inverted_index_file_path, &file_size); + if (!st.ok()) { + file_size = 0; + if (st.is()) { + LOG(INFO) << "cloud table size correctness check get inverted index v2 " + "0 because file not exist! msg:" + << st.msg() << ", inverted index path:" << inverted_index_file_path; + } else { + LOG(WARNING) << "cloud table size correctness check get inverted index v2 " + "size failed! msg:" + << st.msg() + << ", inverted index path:" << inverted_index_file_path; + } + } + total_inverted_index_size += file_size; + } + } + return total_inverted_index_size; +} + } // namespace doris::cloud diff --git a/be/src/cloud/cloud_meta_mgr.h b/be/src/cloud/cloud_meta_mgr.h index 79cdb3fd3d1f8c..a48381f056e446 100644 --- a/be/src/cloud/cloud_meta_mgr.h +++ b/be/src/cloud/cloud_meta_mgr.h @@ -113,6 +113,9 @@ class CloudMetaMgr { Status sync_tablet_delete_bitmap(CloudTablet* tablet, int64_t old_max_version, std::ranges::range auto&& rs_metas, const TabletStatsPB& stats, const TabletIndexPB& idx, DeleteBitmap* delete_bitmap); + void check_table_size_correctness(const RowsetMeta& rs_meta); + int64_t get_segment_file_size(const RowsetMeta& rs_meta); + int64_t get_inverted_index_file_szie(const RowsetMeta& rs_meta); }; } // namespace cloud diff --git a/be/src/cloud/cloud_rowset_builder.cpp b/be/src/cloud/cloud_rowset_builder.cpp index 192da0f17efa82..2e6764b33aa79c 100644 --- a/be/src/cloud/cloud_rowset_builder.cpp +++ b/be/src/cloud/cloud_rowset_builder.cpp @@ -106,7 +106,7 @@ void CloudRowsetBuilder::update_tablet_stats() { tablet->fetch_add_approximate_num_rowsets(1); tablet->fetch_add_approximate_num_segments(_rowset->num_segments()); tablet->fetch_add_approximate_num_rows(_rowset->num_rows()); - tablet->fetch_add_approximate_data_size(_rowset->data_disk_size()); + tablet->fetch_add_approximate_data_size(_rowset->total_disk_size()); tablet->fetch_add_approximate_cumu_num_rowsets(1); tablet->fetch_add_approximate_cumu_num_deltas(_rowset->num_segments()); tablet->write_count.fetch_add(1, std::memory_order_relaxed); diff --git a/be/src/cloud/cloud_rowset_writer.cpp b/be/src/cloud/cloud_rowset_writer.cpp index 642077b7e983ec..ebc411697ee4b1 100644 --- a/be/src/cloud/cloud_rowset_writer.cpp +++ b/be/src/cloud/cloud_rowset_writer.cpp @@ -115,13 +115,14 @@ Status CloudRowsetWriter::build(RowsetSharedPtr& rowset) { } else { _rowset_meta->add_segments_file_size(seg_file_size.value()); } - - if (auto idx_files_info = _idx_files_info.get_inverted_files_info(_segment_start_id); - !idx_files_info.has_value()) [[unlikely]] { - LOG(ERROR) << "expected inverted index files info, but none presents: " - << idx_files_info.error(); - } else { - _rowset_meta->add_inverted_index_files_info(idx_files_info.value()); + if (rowset_schema->has_inverted_index()) { + if (auto idx_files_info = _idx_files.inverted_index_file_info(_segment_start_id); + !idx_files_info.has_value()) [[unlikely]] { + LOG(ERROR) << "expected inverted index files info, but none presents: " + << idx_files_info.error(); + } else { + _rowset_meta->add_inverted_index_files_info(idx_files_info.value()); + } } RETURN_NOT_OK_STATUS_WITH_WARN(RowsetFactory::create_rowset(rowset_schema, _context.tablet_path, diff --git a/be/src/cloud/cloud_schema_change_job.cpp b/be/src/cloud/cloud_schema_change_job.cpp index b7e3be93e853bb..896804578d7db9 100644 --- a/be/src/cloud/cloud_schema_change_job.cpp +++ b/be/src/cloud/cloud_schema_change_job.cpp @@ -344,7 +344,7 @@ Status CloudSchemaChangeJob::_convert_historical_rowsets(const SchemaChangeParam sc_job->add_txn_ids(rs->txn_id()); sc_job->add_output_versions(rs->end_version()); num_output_rows += rs->num_rows(); - size_output_rowsets += rs->data_disk_size(); + size_output_rowsets += rs->total_disk_size(); num_output_segments += rs->num_segments(); } sc_job->set_num_output_rows(num_output_rows); diff --git a/be/src/cloud/cloud_tablet.cpp b/be/src/cloud/cloud_tablet.cpp index c046259b0da71c..d3b131d055d35c 100644 --- a/be/src/cloud/cloud_tablet.cpp +++ b/be/src/cloud/cloud_tablet.cpp @@ -412,7 +412,7 @@ int CloudTablet::delete_expired_stale_rowsets() { void CloudTablet::update_base_size(const Rowset& rs) { // Define base rowset as the rowset of version [2-x] if (rs.start_version() == 2) { - _base_size = rs.data_disk_size(); + _base_size = rs.total_disk_size(); } } @@ -433,7 +433,7 @@ void CloudTablet::recycle_cached_data(const std::vector& rowset // TODO: Segment::file_cache_key auto file_key = Segment::file_cache_key(rs->rowset_id().to_string(), seg_id); auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key); - file_cache->remove_if_cached(file_key); + file_cache->remove_if_cached_async(file_key); } } } diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp index 31170b731f4e75..d8308c7eb97d8e 100644 --- a/be/src/common/config.cpp +++ b/be/src/common/config.cpp @@ -540,7 +540,6 @@ DEFINE_mInt32(streaming_load_rpc_max_alive_time_sec, "1200"); DEFINE_Int32(tablet_writer_open_rpc_timeout_sec, "60"); // You can ignore brpc error '[E1011]The server is overcrowded' when writing data. DEFINE_mBool(tablet_writer_ignore_eovercrowded, "true"); -DEFINE_mBool(exchange_sink_ignore_eovercrowded, "true"); DEFINE_mInt32(slave_replica_writer_rpc_timeout_sec, "60"); // Whether to enable stream load record function, the default is false. // False: disable stream load record @@ -903,7 +902,8 @@ DEFINE_mInt64(small_column_size_buffer, "100"); // Perform the always_true check at intervals determined by runtime_filter_sampling_frequency DEFINE_mInt32(runtime_filter_sampling_frequency, "64"); - +DEFINE_mInt32(execution_max_rpc_timeout_sec, "3600"); +DEFINE_mBool(execution_ignore_eovercrowded, "true"); // cooldown task configs DEFINE_Int32(cooldown_thread_num, "5"); DEFINE_mInt64(generate_cooldown_task_interval_sec, "20"); @@ -925,6 +925,9 @@ DEFINE_mBool(enable_query_like_bloom_filter, "true"); DEFINE_Int32(doris_remote_scanner_thread_pool_thread_num, "48"); // number of s3 scanner thread pool queue size DEFINE_Int32(doris_remote_scanner_thread_pool_queue_size, "102400"); +DEFINE_mInt64(block_cache_wait_timeout_ms, "1000"); +DEFINE_mInt64(cache_lock_long_tail_threshold, "1000"); +DEFINE_Int64(file_cache_recycle_keys_size, "1000000"); // limit the queue of pending batches which will be sent by a single nodechannel DEFINE_mInt64(nodechannel_pending_queue_max_bytes, "67108864"); @@ -1039,7 +1042,7 @@ DEFINE_Int32(inverted_index_read_buffer_size, "4096"); // tree depth for bkd index DEFINE_Int32(max_depth_in_bkd_tree, "32"); // index compaction -DEFINE_mBool(inverted_index_compaction_enable, "false"); +DEFINE_mBool(inverted_index_compaction_enable, "true"); // Only for debug, do not use in production DEFINE_mBool(debug_inverted_index_compaction, "false"); // index by RAM directory @@ -1352,6 +1355,10 @@ DEFINE_mInt32(check_score_rounds_num, "1000"); DEFINE_Int32(query_cache_size, "512"); +DEFINE_mBool(enable_delete_bitmap_merge_on_compaction, "false"); +// Enable validation to check the correctness of table size. +DEFINE_Bool(enable_table_size_correctness_check, "false"); + // clang-format off #ifdef BE_TEST // test s3 diff --git a/be/src/common/config.h b/be/src/common/config.h index 585c4dc45ccef9..f827e0f7dae7ea 100644 --- a/be/src/common/config.h +++ b/be/src/common/config.h @@ -587,7 +587,6 @@ DECLARE_mInt32(streaming_load_rpc_max_alive_time_sec); DECLARE_Int32(tablet_writer_open_rpc_timeout_sec); // You can ignore brpc error '[E1011]The server is overcrowded' when writing data. DECLARE_mBool(tablet_writer_ignore_eovercrowded); -DECLARE_mBool(exchange_sink_ignore_eovercrowded); DECLARE_mInt32(slave_replica_writer_rpc_timeout_sec); // Whether to enable stream load record function, the default is false. // False: disable stream load record @@ -958,6 +957,8 @@ DECLARE_mInt64(big_column_size_buffer); DECLARE_mInt64(small_column_size_buffer); DECLARE_mInt32(runtime_filter_sampling_frequency); +DECLARE_mInt32(execution_max_rpc_timeout_sec); +DECLARE_mBool(execution_ignore_eovercrowded); // cooldown task configs DECLARE_Int32(cooldown_thread_num); @@ -984,6 +985,9 @@ DECLARE_mInt64(nodechannel_pending_queue_max_bytes); // The batch size for sending data by brpc streaming client DECLARE_mInt64(brpc_streaming_client_batch_bytes); +DECLARE_mInt64(block_cache_wait_timeout_ms); +DECLARE_mInt64(cache_lock_long_tail_threshold); +DECLARE_Int64(file_cache_recycle_keys_size); DECLARE_Bool(enable_brpc_builtin_services); @@ -1438,6 +1442,10 @@ DECLARE_mInt32(check_score_rounds_num); // MB DECLARE_Int32(query_cache_size); +DECLARE_mBool(enable_delete_bitmap_merge_on_compaction); +// Enable validation to check the correctness of table size. +DECLARE_Bool(enable_table_size_correctness_check); + #ifdef BE_TEST // test s3 DECLARE_String(test_s3_resource); diff --git a/be/src/exec/tablet_info.cpp b/be/src/exec/tablet_info.cpp index 0816a1ac698657..f1c0ad60e06455 100644 --- a/be/src/exec/tablet_info.cpp +++ b/be/src/exec/tablet_info.cpp @@ -788,6 +788,7 @@ Status VOlapTablePartitionParam::replace_partitions( // add new partitions with new id. _partitions.emplace_back(part); + VLOG_NOTICE << "params add new partition " << part->id; // replace items in _partition_maps if (_is_in_partition) { diff --git a/be/src/exprs/runtime_filter.cpp b/be/src/exprs/runtime_filter.cpp index 84a964f5c3865c..bd4cd3353b8068 100644 --- a/be/src/exprs/runtime_filter.cpp +++ b/be/src/exprs/runtime_filter.cpp @@ -1146,8 +1146,11 @@ Status IRuntimeFilter::send_filter_size(RuntimeState* state, uint64_t local_filt request->set_filter_size(local_filter_size); request->set_filter_id(_filter_id); - callback->cntl_->set_timeout_ms(std::min(3600, state->execution_timeout()) * 1000); - callback->cntl_->ignore_eovercrowded(); + + callback->cntl_->set_timeout_ms(get_execution_rpc_timeout_ms(state->execution_timeout())); + if (config::execution_ignore_eovercrowded) { + callback->cntl_->ignore_eovercrowded(); + } stub->send_filter_size(closure->cntl_.get(), closure->request_.get(), closure->response_.get(), closure.get()); @@ -1184,8 +1187,12 @@ Status IRuntimeFilter::push_to_remote(const TNetworkAddress* addr) { merge_filter_request->set_is_pipeline(true); auto column_type = _wrapper->column_type(); RETURN_IF_CATCH_EXCEPTION(merge_filter_request->set_column_type(to_proto(column_type))); - merge_filter_callback->cntl_->set_timeout_ms(wait_time_ms()); - merge_filter_callback->cntl_->ignore_eovercrowded(); + + merge_filter_callback->cntl_->set_timeout_ms( + get_execution_rpc_timeout_ms(_state->execution_timeout)); + if (config::execution_ignore_eovercrowded) { + merge_filter_callback->cntl_->ignore_eovercrowded(); + } if (get_ignored()) { merge_filter_request->set_filter_type(PFilterType::UNKNOW_FILTER); diff --git a/be/src/http/action/jeprofile_actions.cpp b/be/src/http/action/jeprofile_actions.cpp index f805d61d5b0b87..47399c575a3f6d 100644 --- a/be/src/http/action/jeprofile_actions.cpp +++ b/be/src/http/action/jeprofile_actions.cpp @@ -18,69 +18,101 @@ #include "http/action/jeprofile_actions.h" #include -#include #include -#include -#include -#include -#include #include -#include "common/config.h" -#include "common/object_pool.h" #include "http/ev_http_server.h" #include "http/http_channel.h" #include "http/http_handler.h" #include "http/http_handler_with_auth.h" -#include "http/http_method.h" -#include "io/fs/local_file_system.h" +#include "http/http_headers.h" +#include "http/http_request.h" +#include "runtime/memory/heap_profiler.h" namespace doris { -class HttpRequest; -static std::mutex kJeprofileActionMutex; -class JeHeapAction : public HttpHandlerWithAuth { -public: - JeHeapAction(ExecEnv* exec_env) : HttpHandlerWithAuth(exec_env) {} - virtual ~JeHeapAction() = default; +const static std::string HEADER_JSON = "application/json"; - virtual void handle(HttpRequest* req) override; -}; - -void JeHeapAction::handle(HttpRequest* req) { - std::lock_guard lock(kJeprofileActionMutex); -#ifndef USE_JEMALLOC - std::string str = "jemalloc heap dump is not available without setting USE_JEMALLOC"; - HttpChannel::send_reply(req, str); +static bool compile_check(HttpRequest* req) { +#if defined(ADDRESS_SANITIZER) || defined(LEAK_SANITIZER) || defined(THREAD_SANITIZER) + HttpChannel::send_reply( + req, HttpStatus::INTERNAL_SERVER_ERROR, + "Jemalloc heap dump is not available with ASAN(address sanitizer) builds.\n"); + return false; +#elif !defined(USE_JEMALLOC) + HttpChannel::send_reply(req, HttpStatus::INTERNAL_SERVER_ERROR, + "jemalloc heap dump is not available without setting USE_JEMALLOC.\n"); + return false; #else - std::stringstream tmp_jeprof_file_name; - std::time_t now = std::time(nullptr); - // Build a temporary file name that is hopefully unique. - tmp_jeprof_file_name << config::jeprofile_dir << "/jeheap_dump." << now << "." << getpid() - << "." << rand() << ".heap"; - const std::string& tmp_file_name_str = tmp_jeprof_file_name.str(); - const char* file_name_ptr = tmp_file_name_str.c_str(); - int result = jemallctl("prof.dump", nullptr, nullptr, &file_name_ptr, sizeof(const char*)); - std::stringstream response; - if (result == 0) { - response << "Jemalloc heap dump success, dump file path: " << tmp_jeprof_file_name.str() - << "\n"; - } else { - response << "Jemalloc heap dump failed, je_mallctl return: " << result << "\n"; - } - HttpChannel::send_reply(req, response.str()); + return true; #endif } -Status JeprofileActions::setup(doris::ExecEnv* exec_env, doris::EvHttpServer* http_server, - doris::ObjectPool& pool) { - if (!config::jeprofile_dir.empty()) { - RETURN_IF_ERROR(io::global_local_filesystem()->create_directory(config::jeprofile_dir)); +void SetJeHeapProfileActiveActions::handle(HttpRequest* req) { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str()); + if (compile_check(req)) { + if (req->param("prof_value") == "true") { + HeapProfiler::instance()->heap_profiler_start(); + HttpChannel::send_reply( + req, HttpStatus::OK, + "heap profiler started\nJemalloc will only track and sample the memory " + "allocated and freed after the heap profiler started, it cannot analyze the " + "memory allocated and freed before. Therefore, dumping the heap profile " + "immediately after start heap profiler may prompt `No nodes to print`. If you " + "want to analyze the memory that has been allocated in the past, you can only " + "restart the BE process and start heap profiler immediately.\n"); + } else { + HeapProfiler::instance()->heap_profiler_stop(); + HttpChannel::send_reply(req, HttpStatus::OK, "heap profiler stoped\n"); + } + } +} + +void DumpJeHeapProfileToDotActions::handle(HttpRequest* req) { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str()); + if (compile_check(req)) { + if (!HeapProfiler::instance()->check_heap_profiler()) { + HttpChannel::send_reply( + req, HttpStatus::INTERNAL_SERVER_ERROR, + "`curl http://be_host:be_webport/jeheap/prof/true` to start heap profiler\n"); + } + std::string dot = HeapProfiler::instance()->dump_heap_profile_to_dot(); + if (dot.empty()) { + HttpChannel::send_reply(req, HttpStatus::INTERNAL_SERVER_ERROR, + "dump heap profile to dot failed, see be.INFO\n"); + } else { + dot += "\n-------------------------------------------------------\n"; + dot += "Copy the text after `digraph` in the above output to " + "http://www.webgraphviz.com to generate a dot graph.\n" + "after start heap profiler, if there is no operation, will print `No nodes to " + "print`." + "If there are many errors: `addr2line: Dwarf Error`," + "or other FAQ, reference doc: " + "https://doris.apache.org/community/developer-guide/debug-tool/#4-qa\n"; + HttpChannel::send_reply(req, HttpStatus::OK, dot); + } + } +} + +void DumpJeHeapProfileActions::handle(HttpRequest* req) { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str()); + if (compile_check(req)) { + if (!HeapProfiler::instance()->check_heap_profiler()) { + HttpChannel::send_reply( + req, HttpStatus::INTERNAL_SERVER_ERROR, + "`curl http://be_host:be_webport/jeheap/prof/true` to start heap profiler\n"); + } + std::string profile_file_name = HeapProfiler::instance()->dump_heap_profile(); + if (profile_file_name.empty()) { + HttpChannel::send_reply(req, HttpStatus::INTERNAL_SERVER_ERROR, + "jemalloc heap dump failed\n"); + } else { + HttpChannel::send_reply(req, HttpStatus::OK, + fmt::format("jemalloc heap dump success, dump file path: {}\n", + profile_file_name)); + } } - http_server->register_handler(HttpMethod::GET, "/jeheap/dump", - pool.add(new JeHeapAction(exec_env))); - return Status::OK(); } } // namespace doris diff --git a/be/src/http/action/jeprofile_actions.h b/be/src/http/action/jeprofile_actions.h index 2ebeb3c9ffdc92..f1336ac4691d57 100644 --- a/be/src/http/action/jeprofile_actions.h +++ b/be/src/http/action/jeprofile_actions.h @@ -15,17 +15,35 @@ // specific language governing permissions and limitations // under the License. -#ifndef DORIS_JEPROFILE_ACTIONS_H -#define DORIS_JEPROFILE_ACTIONS_H -#include "common/status.h" +#pragma once + +#include "http/http_handler.h" +#include "http/http_handler_with_auth.h" + namespace doris { -class EvHttpServer; + +class HttpRequest; class ExecEnv; -class ObjectPool; -class JeprofileActions { + +class SetJeHeapProfileActiveActions final : public HttpHandlerWithAuth { +public: + SetJeHeapProfileActiveActions(ExecEnv* exec_env) : HttpHandlerWithAuth(exec_env) {} + ~SetJeHeapProfileActiveActions() override = default; + void handle(HttpRequest* req) override; +}; + +class DumpJeHeapProfileToDotActions final : public HttpHandlerWithAuth { +public: + DumpJeHeapProfileToDotActions(ExecEnv* exec_env) : HttpHandlerWithAuth(exec_env) {} + ~DumpJeHeapProfileToDotActions() override = default; + void handle(HttpRequest* req) override; +}; + +class DumpJeHeapProfileActions final : public HttpHandlerWithAuth { public: - static Status setup(ExecEnv* exec_env, EvHttpServer* http_server, ObjectPool& pool); + DumpJeHeapProfileActions(ExecEnv* exec_env) : HttpHandlerWithAuth(exec_env) {} + ~DumpJeHeapProfileActions() override = default; + void handle(HttpRequest* req) override; }; } // namespace doris -#endif //DORIS_JEPROFILE_ACTIONS_H diff --git a/be/src/io/cache/block_file_cache.cpp b/be/src/io/cache/block_file_cache.cpp index cd502d16547f9b..f2f1f22365297b 100644 --- a/be/src/io/cache/block_file_cache.cpp +++ b/be/src/io/cache/block_file_cache.cpp @@ -54,6 +54,8 @@ BlockFileCache::BlockFileCache(const std::string& cache_base_path, _max_query_cache_size(cache_settings.max_query_cache_size) { _cur_cache_size_metrics = std::make_shared>(_cache_base_path.c_str(), "file_cache_cache_size", 0); + _cache_capacity_metrics = std::make_shared>( + _cache_base_path.c_str(), "file_cache_capacity", _capacity); _cur_ttl_cache_size_metrics = std::make_shared>( _cache_base_path.c_str(), "file_cache_ttl_cache_size", 0); _cur_normal_queue_element_count_metrics = std::make_shared>( @@ -117,6 +119,8 @@ BlockFileCache::BlockFileCache(const std::string& cache_base_path, _ttl_queue = LRUQueue(std::numeric_limits::max(), std::numeric_limits::max(), std::numeric_limits::max()); + _recycle_keys = std::make_shared>( + config::file_cache_recycle_keys_size); if (cache_settings.storage == "memory") { _storage = std::make_unique(); _cache_base_path = "memory"; @@ -161,8 +165,7 @@ FileCacheType BlockFileCache::string_to_cache_type(const std::string& str) { BlockFileCache::QueryFileCacheContextHolderPtr BlockFileCache::get_query_context_holder( const TUniqueId& query_id) { - std::lock_guard cache_lock(_mutex); - + SCOPED_CACHE_LOCK(_mutex); if (!config::enable_file_cache_query_limit) { return {}; } @@ -180,7 +183,7 @@ BlockFileCache::QueryFileCacheContextPtr BlockFileCache::get_query_context( } void BlockFileCache::remove_query_context(const TUniqueId& query_id) { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); const auto& query_iter = _query_map.find(query_id); if (query_iter != _query_map.end() && query_iter->second.use_count() <= 1) { @@ -225,7 +228,7 @@ void BlockFileCache::QueryFileCacheContext::reserve(const UInt128Wrapper& hash, } Status BlockFileCache::initialize() { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return initialize_unlocked(cache_lock); } @@ -436,7 +439,7 @@ std::string BlockFileCache::clear_file_cache_async() { int64_t num_cells_to_delete = 0; int64_t num_files_all = 0; { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); if (!_async_clear_file_cache) { for (auto& [_, offset_to_cell] : _files) { ++num_files_all; @@ -672,7 +675,7 @@ FileBlocksHolder BlockFileCache::get_or_set(const UInt128Wrapper& hash, size_t o CacheContext& context) { FileBlock::Range range(offset, offset + size - 1); - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); if (auto iter = _key_to_time.find(hash); context.cache_type == FileCacheType::INDEX && iter != _key_to_time.end()) { context.cache_type = FileCacheType::TTL; @@ -749,7 +752,7 @@ BlockFileCache::FileBlockCell* BlockFileCache::add_cell(const UInt128Wrapper& ha } size_t BlockFileCache::try_release() { - std::lock_guard l(_mutex); + SCOPED_CACHE_LOCK(_mutex); std::vector trash; for (auto& [hash, blocks] : _files) { for (auto& [offset, cell] : blocks) { @@ -761,7 +764,7 @@ size_t BlockFileCache::try_release() { for (auto& cell : trash) { FileBlockSPtr file_block = cell->file_block; std::lock_guard lc(cell->file_block->_mutex); - remove(file_block, l, lc); + remove(file_block, cache_lock, lc); } LOG(INFO) << "Released " << trash.size() << " blocks in file cache " << _cache_base_path; return trash.size(); @@ -811,6 +814,18 @@ void BlockFileCache::remove_file_blocks(std::vector& to_evict, std::for_each(to_evict.begin(), to_evict.end(), remove_file_block_if); } +void BlockFileCache::remove_file_blocks_async(std::vector& to_evict, + std::lock_guard& cache_lock) { + auto remove_file_block_if = [&](FileBlockCell* cell) { + FileBlockSPtr file_block = cell->file_block; + if (file_block) { + std::lock_guard block_lock(file_block->_mutex); + remove(file_block, cache_lock, block_lock, /*sync*/ false); + } + }; + std::for_each(to_evict.begin(), to_evict.end(), remove_file_block_if); +} + void BlockFileCache::remove_file_blocks_and_clean_time_maps( std::vector& to_evict, std::lock_guard& cache_lock) { auto remove_file_block_and_clean_time_maps_if = [&](FileBlockCell* cell) { @@ -1096,7 +1111,7 @@ bool BlockFileCache::remove_if_ttl_file_unlock(const UInt128Wrapper& file_key, b } void BlockFileCache::remove_if_cached(const UInt128Wrapper& file_key) { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); bool is_ttl_file = remove_if_ttl_file_unlock(file_key, true, cache_lock); if (!is_ttl_file) { auto iter = _files.find(file_key); @@ -1112,6 +1127,23 @@ void BlockFileCache::remove_if_cached(const UInt128Wrapper& file_key) { } } +void BlockFileCache::remove_if_cached_async(const UInt128Wrapper& file_key) { + SCOPED_CACHE_LOCK(_mutex); + bool is_ttl_file = remove_if_ttl_file_unlock(file_key, true, cache_lock); + if (!is_ttl_file) { + auto iter = _files.find(file_key); + std::vector to_remove; + if (iter != _files.end()) { + for (auto& [_, cell] : iter->second) { + if (cell.releasable()) { + to_remove.push_back(&cell); + } + } + } + remove_file_blocks_async(to_remove, cache_lock); + } +} + std::vector BlockFileCache::get_other_cache_type(FileCacheType cur_cache_type) { switch (cur_cache_type) { case FileCacheType::INDEX: @@ -1262,7 +1294,7 @@ bool BlockFileCache::try_reserve_for_lru(const UInt128Wrapper& hash, template requires IsXLock && IsXLock -void BlockFileCache::remove(FileBlockSPtr file_block, T& cache_lock, U& block_lock) { +void BlockFileCache::remove(FileBlockSPtr file_block, T& cache_lock, U& block_lock, bool sync) { auto hash = file_block->get_hash_value(); auto offset = file_block->offset(); auto type = file_block->cache_type(); @@ -1282,9 +1314,24 @@ void BlockFileCache::remove(FileBlockSPtr file_block, T& cache_lock, U& block_lo key.offset = offset; key.meta.type = type; key.meta.expiration_time = expiration_time; - Status st = _storage->remove(key); - if (!st.ok()) { - LOG_WARNING("").error(st); + if (sync) { + Status st = _storage->remove(key); + if (!st.ok()) { + LOG_WARNING("").error(st); + } + } else { + // the file will be deleted in the bottom half + // so there will be a window that the file is not in the cache but still in the storage + // but it's ok, because the rowset is stale already + // in case something unexpected happen, set the _recycle_keys queue to zero to fallback + bool ret = _recycle_keys->push(key); + if (!ret) { + LOG_WARNING("Failed to push recycle key to queue, do it synchronously"); + Status st = _storage->remove(key); + if (!st.ok()) { + LOG_WARNING("").error(st); + } + } } } _cur_cache_size -= file_block->range().size(); @@ -1299,8 +1346,18 @@ void BlockFileCache::remove(FileBlockSPtr file_block, T& cache_lock, U& block_lo *_num_removed_blocks << 1; } +void BlockFileCache::recycle_stale_rowset_async_bottom_half() { + FileCacheKey key; + while (_recycle_keys->pop(key)) { + Status st = _storage->remove(key); + if (!st.ok()) { + LOG_WARNING("").error(st); + } + } +} + size_t BlockFileCache::get_used_cache_size(FileCacheType cache_type) const { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return get_used_cache_size_unlocked(cache_type, cache_lock); } @@ -1310,7 +1367,7 @@ size_t BlockFileCache::get_used_cache_size_unlocked(FileCacheType cache_type, } size_t BlockFileCache::get_available_cache_size(FileCacheType cache_type) const { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return get_available_cache_size_unlocked(cache_type, cache_lock); } @@ -1321,7 +1378,7 @@ size_t BlockFileCache::get_available_cache_size_unlocked( } size_t BlockFileCache::get_file_blocks_num(FileCacheType cache_type) const { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return get_file_blocks_num_unlocked(cache_type, cache_lock); } @@ -1405,7 +1462,7 @@ std::string BlockFileCache::LRUQueue::to_string( } std::string BlockFileCache::dump_structure(const UInt128Wrapper& hash) { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return dump_structure_unlocked(hash, cache_lock); } @@ -1423,7 +1480,7 @@ std::string BlockFileCache::dump_structure_unlocked(const UInt128Wrapper& hash, } std::string BlockFileCache::dump_single_cache_type(const UInt128Wrapper& hash, size_t offset) { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); return dump_single_cache_type_unlocked(hash, offset, cache_lock); } @@ -1486,7 +1543,7 @@ std::string BlockFileCache::reset_capacity(size_t new_capacity) { ss << "finish reset_capacity, path=" << _cache_base_path; auto start_time = steady_clock::time_point(); { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); if (new_capacity < _capacity && new_capacity < _cur_cache_size) { int64_t need_remove_size = _cur_cache_size - new_capacity; auto remove_blocks = [&](LRUQueue& queue) -> int64_t { @@ -1527,6 +1584,7 @@ std::string BlockFileCache::reset_capacity(size_t new_capacity) { } old_capacity = _capacity; _capacity = new_capacity; + _cache_capacity_metrics->set_value(_capacity); } auto use_time = duration_cast(steady_clock::time_point() - start_time); LOG(INFO) << "Finish tag deleted block. path=" << _cache_base_path @@ -1594,10 +1652,11 @@ void BlockFileCache::run_background_operation() { break; } } + recycle_stale_rowset_async_bottom_half(); recycle_deleted_blocks(); // gc int64_t cur_time = UnixSeconds(); - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); while (!_time_to_key.empty()) { auto begin = _time_to_key.begin(); if (cur_time < begin->first) { @@ -1643,7 +1702,7 @@ void BlockFileCache::run_background_operation() { void BlockFileCache::modify_expiration_time(const UInt128Wrapper& hash, uint64_t new_expiration_time) { - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); // 1. If new_expiration_time is equal to zero if (new_expiration_time == 0) { remove_if_ttl_file_unlock(hash, false, cache_lock); @@ -1708,7 +1767,7 @@ BlockFileCache::get_hot_blocks_meta(const UInt128Wrapper& hash) const { int64_t cur_time = std::chrono::duration_cast( std::chrono::steady_clock::now().time_since_epoch()) .count(); - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); std::vector> blocks_meta; if (auto iter = _files.find(hash); iter != _files.end()) { for (auto& pair : _files.find(hash)->second) { @@ -1777,7 +1836,7 @@ std::string BlockFileCache::clear_file_cache_directly() { using namespace std::chrono; std::stringstream ss; auto start = steady_clock::now(); - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); LOG_INFO("start clear_file_cache_directly").tag("path", _cache_base_path); std::string clear_msg; @@ -1815,7 +1874,7 @@ std::string BlockFileCache::clear_file_cache_directly() { std::map BlockFileCache::get_blocks_by_key(const UInt128Wrapper& hash) { std::map offset_to_block; - std::lock_guard cache_lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); if (_files.contains(hash)) { for (auto& [offset, cell] : _files[hash]) { if (cell.file_block->state() == FileBlock::State::DOWNLOADED) { @@ -1830,7 +1889,7 @@ std::map BlockFileCache::get_blocks_by_key(const UInt128W } void BlockFileCache::update_ttl_atime(const UInt128Wrapper& hash) { - std::lock_guard lock(_mutex); + SCOPED_CACHE_LOCK(_mutex); if (auto iter = _files.find(hash); iter != _files.end()) { for (auto& [_, cell] : iter->second) { cell.update_atime(); @@ -1868,5 +1927,5 @@ std::map BlockFileCache::get_stats() { template void BlockFileCache::remove(FileBlockSPtr file_block, std::lock_guard& cache_lock, - std::lock_guard& block_lock); + std::lock_guard& block_lock, bool sync); } // namespace doris::io diff --git a/be/src/io/cache/block_file_cache.h b/be/src/io/cache/block_file_cache.h index ac30e2411fa81b..4bedc725692653 100644 --- a/be/src/io/cache/block_file_cache.h +++ b/be/src/io/cache/block_file_cache.h @@ -19,6 +19,7 @@ #include +#include #include #include #include @@ -27,15 +28,51 @@ #include "io/cache/file_block.h" #include "io/cache/file_cache_common.h" #include "io/cache/file_cache_storage.h" +#include "util/threadpool.h" namespace doris::io { +// Note: the cache_lock is scoped, so do not add do...while(0) here. +#ifdef ENABLE_CACHE_LOCK_DEBUG +#define SCOPED_CACHE_LOCK(MUTEX) \ + std::chrono::time_point start_time = \ + std::chrono::steady_clock::now(); \ + std::lock_guard cache_lock(MUTEX); \ + std::chrono::time_point acq_time = \ + std::chrono::steady_clock::now(); \ + auto duration = \ + std::chrono::duration_cast(acq_time - start_time).count(); \ + if (duration > config::cache_lock_long_tail_threshold) \ + LOG(WARNING) << "Lock wait time " << std::to_string(duration) << "ms. " \ + << get_stack_trace_by_boost() << std::endl; \ + LockScopedTimer cache_lock_timer; +#else +#define SCOPED_CACHE_LOCK(MUTEX) std::lock_guard cache_lock(MUTEX); +#endif + template concept IsXLock = std::same_as> || std::same_as>; class FSFileCacheStorage; +class LockScopedTimer { +public: + LockScopedTimer() : start_(std::chrono::steady_clock::now()) {} + + ~LockScopedTimer() { + auto end = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(end - start_).count(); + if (duration > 500) { + LOG(WARNING) << "Lock held time " << std::to_string(duration) << "ms. " + << get_stack_trace_by_boost(); + } + } + +private: + std::chrono::time_point start_; +}; + // The BlockFileCache is responsible for the management of the blocks // The current strategies are lru and ttl. class BlockFileCache { @@ -119,6 +156,7 @@ class BlockFileCache { // remove all blocks that belong to the key void remove_if_cached(const UInt128Wrapper& key); + void remove_if_cached_async(const UInt128Wrapper& key); // modify the expiration time about the key void modify_expiration_time(const UInt128Wrapper& key, uint64_t new_expiration_time); @@ -320,7 +358,7 @@ class BlockFileCache { template requires IsXLock && IsXLock - void remove(FileBlockSPtr file_block, T& cache_lock, U& segment_lock); + void remove(FileBlockSPtr file_block, T& cache_lock, U& segment_lock, bool sync = true); FileBlocks get_impl(const UInt128Wrapper& hash, const CacheContext& context, const FileBlock::Range& range, std::lock_guard& cache_lock); @@ -402,12 +440,17 @@ class BlockFileCache { void remove_file_blocks(std::vector&, std::lock_guard&); + void remove_file_blocks_async(std::vector&, std::lock_guard&); + void remove_file_blocks_and_clean_time_maps(std::vector&, std::lock_guard&); void find_evict_candidates(LRUQueue& queue, size_t size, size_t cur_cache_size, size_t& removed_size, std::vector& to_evict, std::lock_guard& cache_lock, bool is_ttl); + + void recycle_stale_rowset_async_bottom_half(); + // info std::string _cache_base_path; size_t _capacity = 0; @@ -446,7 +489,11 @@ class BlockFileCache { LRUQueue _disposable_queue; LRUQueue _ttl_queue; + // keys for async remove + std::shared_ptr> _recycle_keys; + // metrics + std::shared_ptr> _cache_capacity_metrics; std::shared_ptr> _cur_cache_size_metrics; std::shared_ptr> _cur_ttl_cache_size_metrics; std::shared_ptr> _cur_ttl_cache_lru_queue_cache_size_metrics; diff --git a/be/src/io/cache/block_file_cache_profile.cpp b/be/src/io/cache/block_file_cache_profile.cpp index 68e6c1433deaf8..1759d37f9e4314 100644 --- a/be/src/io/cache/block_file_cache_profile.cpp +++ b/be/src/io/cache/block_file_cache_profile.cpp @@ -34,9 +34,9 @@ std::shared_ptr FileCacheProfile::report() { } void FileCacheProfile::update(FileCacheStatistics* stats) { - { - std::lock_guard lock(_mtx); - if (!_profile) { + if (_profile == nullptr) { + std::lock_guard lock(_mtx); + if (_profile == nullptr) { _profile = std::make_shared(); _file_cache_metric = std::make_shared(this); _file_cache_metric->register_entity(); diff --git a/be/src/io/cache/file_block.cpp b/be/src/io/cache/file_block.cpp index b015cbd61110d2..4576b9dbba892f 100644 --- a/be/src/io/cache/file_block.cpp +++ b/be/src/io/cache/file_block.cpp @@ -144,7 +144,7 @@ Status FileBlock::append(Slice data) { Status FileBlock::finalize() { if (_downloaded_size != 0 && _downloaded_size != _block_range.size()) { - std::lock_guard cache_lock(_mgr->_mutex); + SCOPED_CACHE_LOCK(_mgr->_mutex); size_t old_size = _block_range.size(); _block_range.right = _block_range.left + _downloaded_size - 1; size_t new_size = _block_range.size(); @@ -179,7 +179,7 @@ Status FileBlock::change_cache_type_between_ttl_and_others(FileCacheType new_typ } Status FileBlock::change_cache_type_between_normal_and_index(FileCacheType new_type) { - std::lock_guard cache_lock(_mgr->_mutex); + SCOPED_CACHE_LOCK(_mgr->_mutex); std::lock_guard block_lock(_mutex); bool expr = (new_type != FileCacheType::TTL && _key.meta.type != FileCacheType::TTL); if (!expr) { @@ -223,7 +223,7 @@ FileBlock::State FileBlock::wait() { if (_download_state == State::DOWNLOADING) { DCHECK(_downloader_id != 0 && _downloader_id != get_caller_id()); - _cv.wait_for(block_lock, std::chrono::seconds(1)); + _cv.wait_for(block_lock, std::chrono::milliseconds(config::block_cache_wait_timeout_ms)); } return _download_state; @@ -278,14 +278,24 @@ FileBlocksHolder::~FileBlocksHolder() { auto& file_block = *current_file_block_it; BlockFileCache* _mgr = file_block->_mgr; { - std::lock_guard cache_lock(_mgr->_mutex); - std::lock_guard block_lock(file_block->_mutex); - file_block->complete_unlocked(block_lock); - if (file_block.use_count() == 2) { - DCHECK(file_block->state_unlock(block_lock) != FileBlock::State::DOWNLOADING); - // one in cache, one in here - if (file_block->state_unlock(block_lock) == FileBlock::State::EMPTY) { - _mgr->remove(file_block, cache_lock, block_lock); + bool should_remove = false; + { + std::lock_guard block_lock(file_block->_mutex); + file_block->complete_unlocked(block_lock); + if (file_block.use_count() == 2 && + file_block->state_unlock(block_lock) == FileBlock::State::EMPTY) { + should_remove = true; + } + } + if (should_remove) { + SCOPED_CACHE_LOCK(_mgr->_mutex); + std::lock_guard block_lock(file_block->_mutex); + if (file_block.use_count() == 2) { + DCHECK(file_block->state_unlock(block_lock) != FileBlock::State::DOWNLOADING); + // one in cache, one in here + if (file_block->state_unlock(block_lock) == FileBlock::State::EMPTY) { + _mgr->remove(file_block, cache_lock, block_lock); + } } } } diff --git a/be/src/io/cache/fs_file_cache_storage.cpp b/be/src/io/cache/fs_file_cache_storage.cpp index ecdf04c88304f0..bacd0820c66099 100644 --- a/be/src/io/cache/fs_file_cache_storage.cpp +++ b/be/src/io/cache/fs_file_cache_storage.cpp @@ -471,7 +471,8 @@ void FSFileCacheStorage::load_cache_info_into_memory(BlockFileCache* _mgr) const std::vector batch_load_buffer; batch_load_buffer.reserve(scan_length); auto add_cell_batch_func = [&]() { - std::lock_guard cache_lock(_mgr->_mutex); + SCOPED_CACHE_LOCK(_mgr->_mutex); + auto f = [&](const BatchLoadArgs& args) { // in async load mode, a cell may be added twice. if (_mgr->_files.contains(args.hash) && _mgr->_files[args.hash].contains(args.offset)) { diff --git a/be/src/io/fs/local_file_reader.cpp b/be/src/io/fs/local_file_reader.cpp index b4f144a633048e..4a41fa479d9808 100644 --- a/be/src/io/fs/local_file_reader.cpp +++ b/be/src/io/fs/local_file_reader.cpp @@ -34,11 +34,13 @@ #include "common/compiler_util.h" // IWYU pragma: keep #include "cpp/sync_point.h" #include "io/fs/err_utils.h" +#include "olap/data_dir.h" #include "olap/olap_common.h" #include "olap/options.h" #include "runtime/thread_context.h" #include "runtime/workload_management/io_throttle.h" #include "util/async_io.h" +#include "util/debug_points.h" #include "util/doris_metrics.h" namespace doris { @@ -139,6 +141,15 @@ Status LocalFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_ while (bytes_req != 0) { auto res = SYNC_POINT_HOOK_RETURN_VALUE(::pread(_fd, to, bytes_req, offset), "LocalFileReader::pread", _fd, to); + DBUG_EXECUTE_IF("LocalFileReader::read_at_impl.io_error", { + auto sub_path = dp->param("sub_path", ""); + if ((sub_path.empty() && _path.filename().compare(kTestFilePath)) || + (!sub_path.empty() && _path.native().find(sub_path) != std::string::npos)) { + res = -1; + errno = EIO; + LOG(WARNING) << Status::IOError("debug read io error: {}", _path.native()); + } + }); if (UNLIKELY(-1 == res && errno != EINTR)) { return localfs_error(errno, fmt::format("failed to read {}", _path.native())); } diff --git a/be/src/io/fs/local_file_system.cpp b/be/src/io/fs/local_file_system.cpp index 4b44027abbbf2d..0107ed57dc8fb1 100644 --- a/be/src/io/fs/local_file_system.cpp +++ b/be/src/io/fs/local_file_system.cpp @@ -62,9 +62,13 @@ Status LocalFileSystem::create_file_impl(const Path& file, FileWriterPtr* writer int fd = ::open(file.c_str(), O_TRUNC | O_WRONLY | O_CREAT | O_CLOEXEC, 0666); DBUG_EXECUTE_IF("LocalFileSystem.create_file_impl.open_file_failed", { // spare '.testfile' to make bad disk checker happy - if (file.filename().compare(kTestFilePath)) { + auto sub_path = dp->param("sub_path", ""); + if ((sub_path.empty() && file.filename().compare(kTestFilePath)) || + (!sub_path.empty() && file.native().find(sub_path) != std::string::npos)) { ::close(fd); fd = -1; + errno = EIO; + LOG(WARNING) << Status::IOError("debug open io error: {}", file.native()); } }); if (-1 == fd) { @@ -85,6 +89,17 @@ Status LocalFileSystem::open_file_impl(const Path& file, FileReaderSPtr* reader, } int fd = -1; RETRY_ON_EINTR(fd, open(file.c_str(), O_RDONLY)); + DBUG_EXECUTE_IF("LocalFileSystem.create_file_impl.open_file_failed", { + // spare '.testfile' to make bad disk checker happy + auto sub_path = dp->param("sub_path", ""); + if ((sub_path.empty() && file.filename().compare(kTestFilePath)) || + (!sub_path.empty() && file.native().find(sub_path) != std::string::npos)) { + ::close(fd); + fd = -1; + errno = EIO; + LOG(WARNING) << Status::IOError("debug open io error: {}", file.native()); + } + }); if (fd < 0) { return localfs_error(errno, fmt::format("failed to open {}", file.native())); } diff --git a/be/src/io/fs/local_file_writer.cpp b/be/src/io/fs/local_file_writer.cpp index 7301ceae588a0b..c65dee2535e79d 100644 --- a/be/src/io/fs/local_file_writer.cpp +++ b/be/src/io/fs/local_file_writer.cpp @@ -147,6 +147,15 @@ Status LocalFileWriter::appendv(const Slice* data, size_t data_cnt) { RETRY_ON_EINTR(res, SYNC_POINT_HOOK_RETURN_VALUE( ::writev(_fd, iov.data() + completed_iov, iov_count), "LocalFileWriter::writev", _fd)); + DBUG_EXECUTE_IF("LocalFileWriter::appendv.io_error", { + auto sub_path = dp->param("sub_path", ""); + if ((sub_path.empty() && _path.filename().compare(kTestFilePath)) || + (!sub_path.empty() && _path.native().find(sub_path) != std::string::npos)) { + res = -1; + errno = EIO; + LOG(WARNING) << Status::IOError("debug write io error: {}", _path.native()); + } + }); if (UNLIKELY(res < 0)) { return localfs_error(errno, fmt::format("failed to write {}", _path.native())); } diff --git a/be/src/io/hdfs_builder.cpp b/be/src/io/hdfs_builder.cpp index 99ee89596ed9ac..59ca46e86944df 100644 --- a/be/src/io/hdfs_builder.cpp +++ b/be/src/io/hdfs_builder.cpp @@ -20,17 +20,18 @@ #include #include +#include #include -#include #include #include -#include "agent/utils.h" #include "common/config.h" #include "common/logging.h" +#ifdef USE_HADOOP_HDFS +#include "hadoop_hdfs/hdfs.h" +#endif #include "io/fs/hdfs.h" #include "util/string_util.h" -#include "util/uid_util.h" namespace doris { diff --git a/be/src/io/hdfs_util.cpp b/be/src/io/hdfs_util.cpp index 6c1bbf80a1526f..62546c9bbd4ffb 100644 --- a/be/src/io/hdfs_util.cpp +++ b/be/src/io/hdfs_util.cpp @@ -17,10 +17,13 @@ #include "io/hdfs_util.h" +#include +#include #include #include #include +#include #include "common/logging.h" #include "io/fs/err_utils.h" @@ -30,7 +33,7 @@ namespace doris::io { namespace { -Status create_hdfs_fs(const THdfsParams& hdfs_params, const std::string& fs_name, hdfsFS* fs) { +Status _create_hdfs_fs(const THdfsParams& hdfs_params, const std::string& fs_name, hdfsFS* fs) { HDFSCommonBuilder builder; RETURN_IF_ERROR(create_hdfs_builder(hdfs_params, fs_name, &builder)); hdfsFS hdfs_fs = hdfsBuilderConnect(builder.get()); @@ -41,6 +44,38 @@ Status create_hdfs_fs(const THdfsParams& hdfs_params, const std::string& fs_name return Status::OK(); } +// https://brpc.apache.org/docs/server/basics/ +// According to the brpc doc, JNI code checks stack layout and cannot be run in +// bthreads so create a pthread for creating hdfs connection if necessary. +Status create_hdfs_fs(const THdfsParams& hdfs_params, const std::string& fs_name, hdfsFS* fs) { + bool is_pthread = bthread_self() == 0; + LOG(INFO) << "create hfdfs fs, is_pthread=" << is_pthread << " fs_name=" << fs_name; + if (is_pthread) { // running in pthread + return _create_hdfs_fs(hdfs_params, fs_name, fs); + } + + // running in bthread, switch to a pthread and wait + Status st; + auto btx = bthread::butex_create(); + *(int*)btx = 0; + std::thread t([&] { + st = _create_hdfs_fs(hdfs_params, fs_name, fs); + *(int*)btx = 1; + bthread::butex_wake_all(btx); + }); + std::unique_ptr> defer((int*)0x01, [&t, &btx](...) { + if (t.joinable()) t.join(); + bthread::butex_destroy(btx); + }); + timespec tmout {.tv_sec = std::chrono::system_clock::now().time_since_epoch().count() + 60}; + if (int ret = bthread::butex_wait(btx, 1, &tmout); ret != 0) { + std::string msg = "failed to wait _create_hdfs_fs fs_name=" + fs_name; + LOG(WARNING) << msg << " error=" << std::strerror(errno); + st = Status::Error(msg); + } + return st; +} + uint64_t hdfs_hash_code(const THdfsParams& hdfs_params, const std::string& fs_name) { uint64_t hash_code = 0; // The specified fsname is used first. diff --git a/be/src/olap/base_compaction.cpp b/be/src/olap/base_compaction.cpp index 8be29383c1e9b1..8b9cbd75ed33b8 100644 --- a/be/src/olap/base_compaction.cpp +++ b/be/src/olap/base_compaction.cpp @@ -80,7 +80,7 @@ Status BaseCompaction::execute_compact() { tablet()->set_last_base_compaction_success_time(UnixMillis()); DorisMetrics::instance()->base_compaction_deltas_total->increment(_input_rowsets.size()); - DorisMetrics::instance()->base_compaction_bytes_total->increment(_input_rowsets_size); + DorisMetrics::instance()->base_compaction_bytes_total->increment(_input_rowsets_total_size); return Status::OK(); } diff --git a/be/src/olap/compaction.cpp b/be/src/olap/compaction.cpp index dee06a8a79b20c..a76a5d8679d74d 100644 --- a/be/src/olap/compaction.cpp +++ b/be/src/olap/compaction.cpp @@ -188,6 +188,7 @@ Status Compaction::merge_input_rowsets() { Status res; { SCOPED_TIMER(_merge_rowsets_latency_timer); + // 1. Merge segment files and write bkd inverted index if (_is_vertical) { res = Merger::vertical_merge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema, input_rs_readers, _output_rs_writer.get(), @@ -200,17 +201,19 @@ Status Compaction::merge_input_rowsets() { res = Merger::vmerge_rowsets(_tablet, compaction_type(), *_cur_tablet_schema, input_rs_readers, _output_rs_writer.get(), &_stats); } - } - - _tablet->last_compaction_status = res; - if (!res.ok()) { - return res; + _tablet->last_compaction_status = res; + if (!res.ok()) { + return res; + } + // 2. Merge the remaining inverted index files of the string type + RETURN_IF_ERROR(do_inverted_index_compaction()); } COUNTER_UPDATE(_merged_rows_counter, _stats.merged_rows); COUNTER_UPDATE(_filtered_rows_counter, _stats.filtered_rows); + // 3. In the `build`, `_close_file_writers` is called to close the inverted index file writer and write the final compound index file. RETURN_NOT_OK_STATUS_WITH_WARN(_output_rs_writer->build(_output_rowset), fmt::format("rowset writer build failed. output_version: {}", _output_version.to_string())); @@ -254,10 +257,10 @@ int64_t Compaction::get_avg_segment_rows() { if (meta->compaction_policy() == CUMULATIVE_TIME_SERIES_POLICY) { int64_t compaction_goal_size_mbytes = meta->time_series_compaction_goal_size_mbytes(); return (compaction_goal_size_mbytes * 1024 * 1024 * 2) / - (_input_rowsets_size / (_input_row_num + 1) + 1); + (_input_rowsets_data_size / (_input_row_num + 1) + 1); } return config::vertical_compaction_max_segment_size / - (_input_rowsets_size / (_input_row_num + 1) + 1); + (_input_rowsets_data_size / (_input_row_num + 1) + 1); } CompactionMixin::CompactionMixin(StorageEngine& engine, TabletSharedPtr tablet, @@ -302,9 +305,9 @@ Status CompactionMixin::do_compact_ordered_rowsets() { // build output rowset RowsetMetaSharedPtr rowset_meta = std::make_shared(); rowset_meta->set_num_rows(_input_row_num); - rowset_meta->set_total_disk_size(_input_rowsets_size); - rowset_meta->set_data_disk_size(_input_rowsets_size); - rowset_meta->set_index_disk_size(_input_index_size); + rowset_meta->set_total_disk_size(_input_rowsets_data_size + _input_rowsets_index_size); + rowset_meta->set_data_disk_size(_input_rowsets_data_size); + rowset_meta->set_index_disk_size(_input_rowsets_index_size); rowset_meta->set_empty(_input_row_num == 0); rowset_meta->set_num_segments(_input_num_segments); rowset_meta->set_segments_overlap(NONOVERLAPPING); @@ -317,12 +320,13 @@ Status CompactionMixin::do_compact_ordered_rowsets() { void CompactionMixin::build_basic_info() { for (auto& rowset : _input_rowsets) { - _input_rowsets_size += rowset->data_disk_size(); - _input_index_size += rowset->index_disk_size(); + _input_rowsets_data_size += rowset->data_disk_size(); + _input_rowsets_index_size += rowset->index_disk_size(); + _input_rowsets_total_size += rowset->total_disk_size(); _input_row_num += rowset->num_rows(); _input_num_segments += rowset->num_segments(); } - COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_size); + COUNTER_UPDATE(_input_rowsets_data_size_counter, _input_rowsets_data_size); COUNTER_UPDATE(_input_row_num_counter, _input_row_num); COUNTER_UPDATE(_input_segments_num_counter, _input_num_segments); @@ -441,8 +445,12 @@ Status CompactionMixin::execute_compact_impl(int64_t permits) { << ", disk=" << tablet()->data_dir()->path() << ", segments=" << _input_num_segments << ", input_row_num=" << _input_row_num << ", output_row_num=" << _output_rowset->num_rows() - << ", input_rowset_size=" << _input_rowsets_size - << ", output_rowset_size=" << _output_rowset->data_disk_size() + << ", input_rowsets_data_size=" << _input_rowsets_data_size + << ", input_rowsets_index_size=" << _input_rowsets_index_size + << ", input_rowsets_total_size=" << _input_rowsets_total_size + << ", output_rowset_data_size=" << _output_rowset->data_disk_size() + << ", output_rowset_index_size=" << _output_rowset->index_disk_size() + << ", output_rowset_total_size=" << _output_rowset->total_disk_size() << ". elapsed time=" << watch.get_elapse_second() << "s."; _state = CompactionState::SUCCESS; return Status::OK(); @@ -456,8 +464,6 @@ Status CompactionMixin::execute_compact_impl(int64_t permits) { RETURN_IF_ERROR(merge_input_rowsets()); - RETURN_IF_ERROR(do_inverted_index_compaction()); - RETURN_IF_ERROR(modify_rowsets()); auto* cumu_policy = tablet()->cumulative_compaction_policy(); @@ -466,8 +472,8 @@ Status CompactionMixin::execute_compact_impl(int64_t permits) { << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version << ", current_max_version=" << tablet()->max_version().second << ", disk=" << tablet()->data_dir()->path() << ", segments=" << _input_num_segments - << ", input_rowset_size=" << _input_rowsets_size - << ", output_rowset_size=" << _output_rowset->data_disk_size() + << ", input_data_size=" << _input_rowsets_data_size + << ", output_rowset_size=" << _output_rowset->total_disk_size() << ", input_row_num=" << _input_row_num << ", output_row_num=" << _output_rowset->num_rows() << ", filtered_row_num=" << _stats.filtered_rows @@ -613,58 +619,9 @@ Status Compaction::do_inverted_index_compaction() { // dest index files // format: rowsetId_segmentId - std::vector> inverted_index_file_writers( - dest_segment_num); - - // Some columns have already been indexed - // key: seg_id, value: inverted index file size - std::unordered_map compacted_idx_file_size; - for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) { - std::string index_path_prefix { - InvertedIndexDescriptor::get_index_file_path_prefix(ctx.segment_path(seg_id))}; - auto inverted_index_file_reader = std::make_unique( - ctx.fs(), index_path_prefix, - _cur_tablet_schema->get_inverted_index_storage_format()); - bool open_idx_file_cache = false; - auto st = inverted_index_file_reader->init(config::inverted_index_read_buffer_size, - open_idx_file_cache); - if (st.ok()) { - auto index_not_need_to_compact = - DORIS_TRY(inverted_index_file_reader->get_all_directories()); - // V1: each index is a separate file - // V2: all indexes are in a single file - if (_cur_tablet_schema->get_inverted_index_storage_format() != - doris::InvertedIndexStorageFormatPB::V1) { - int64_t fsize = 0; - st = ctx.fs()->file_size( - InvertedIndexDescriptor::get_index_file_path_v2(index_path_prefix), &fsize); - if (!st.ok()) { - LOG(ERROR) << "file size error in index compaction, error:" << st.msg(); - return st; - } - compacted_idx_file_size[seg_id] = fsize; - } - auto inverted_index_file_writer = std::make_unique( - ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), seg_id, - _cur_tablet_schema->get_inverted_index_storage_format()); - RETURN_IF_ERROR(inverted_index_file_writer->initialize(index_not_need_to_compact)); - inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer); - } else if (st.is()) { - auto inverted_index_file_writer = std::make_unique( - ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), seg_id, - _cur_tablet_schema->get_inverted_index_storage_format()); - inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer); - // no index file - compacted_idx_file_size[seg_id] = 0; - } else { - LOG(ERROR) << "inverted_index_file_reader init failed in index compaction, error:" - << st; - return st; - } - } - for (const auto& writer : inverted_index_file_writers) { - writer->set_file_writer_opts(ctx.get_file_writer_options()); - } + auto& inverted_index_file_writers = dynamic_cast(_output_rs_writer.get()) + ->inverted_index_file_writers(); + DCHECK_EQ(inverted_index_file_writers.size(), dest_segment_num); // use tmp file dir to store index files auto tmp_file_dir = ExecEnv::GetInstance()->get_tmp_file_dirs()->get_tmp_file_dir(); @@ -690,29 +647,6 @@ Status Compaction::do_inverted_index_compaction() { auto col = _cur_tablet_schema->column_by_uid(column_uniq_id); const auto* index_meta = _cur_tablet_schema->get_inverted_index(col); - // if index properties are different, index compaction maybe needs to be skipped. - bool is_continue = false; - std::optional> first_properties; - for (const auto& rowset : _input_rowsets) { - const auto* tablet_index = rowset->tablet_schema()->get_inverted_index(col); - const auto& properties = tablet_index->properties(); - if (!first_properties.has_value()) { - first_properties = properties; - } else { - if (properties != first_properties.value()) { - error_handler(index_meta->index_id(), column_uniq_id); - status = Status::Error( - "if index properties are different, index compaction needs to be " - "skipped."); - is_continue = true; - break; - } - } - } - if (is_continue) { - continue; - } - std::vector dest_index_dirs(dest_segment_num); try { std::vector> src_idx_dirs(src_segment_num); @@ -737,40 +671,12 @@ Status Compaction::do_inverted_index_compaction() { } } - std::vector all_inverted_index_file_info(dest_segment_num); - uint64_t inverted_index_file_size = 0; - for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) { - auto inverted_index_file_writer = inverted_index_file_writers[seg_id].get(); - if (Status st = inverted_index_file_writer->close(); !st.ok()) { - status = Status::Error(st.msg()); - } else { - inverted_index_file_size += inverted_index_file_writer->get_index_file_total_size(); - inverted_index_file_size -= compacted_idx_file_size[seg_id]; - } - all_inverted_index_file_info[seg_id] = inverted_index_file_writer->get_index_file_info(); - } // check index compaction status. If status is not ok, we should return error and end this compaction round. if (!status.ok()) { return status; } - - // index compaction should update total disk size and index disk size - _output_rowset->rowset_meta()->set_data_disk_size(_output_rowset->data_disk_size() + - inverted_index_file_size); - _output_rowset->rowset_meta()->set_total_disk_size(_output_rowset->data_disk_size() + - inverted_index_file_size); - _output_rowset->rowset_meta()->set_index_disk_size(_output_rowset->index_disk_size() + - inverted_index_file_size); - - _output_rowset->rowset_meta()->update_inverted_index_files_info(all_inverted_index_file_info); - COUNTER_UPDATE(_output_rowset_data_size_counter, _output_rowset->data_disk_size()); - LOG(INFO) << "succeed to do index compaction" - << ". tablet=" << _tablet->tablet_id() << ", input row number=" << _input_row_num - << ", output row number=" << _output_rowset->num_rows() - << ", input_rowset_size=" << _input_rowsets_size - << ", output_rowset_size=" << _output_rowset->data_disk_size() - << ", inverted index file size=" << inverted_index_file_size + << ". tablet=" << _tablet->tablet_id() << ". elapsed time=" << inverted_watch.get_elapse_second() << "s."; return Status::OK(); @@ -795,6 +701,31 @@ void Compaction::construct_index_compaction_columns(RowsetWriterContext& ctx) { if (!field_is_slice_type(_cur_tablet_schema->column_by_uid(col_unique_id).type())) { continue; } + + // if index properties are different, index compaction maybe needs to be skipped. + bool is_continue = false; + std::optional> first_properties; + for (const auto& rowset : _input_rowsets) { + const auto* tablet_index = + rowset->tablet_schema()->get_inverted_index(col_unique_id, ""); + // no inverted index or index id is different from current index id + if (tablet_index == nullptr || tablet_index->index_id() != index.index_id()) { + is_continue = true; + break; + } + const auto& properties = tablet_index->properties(); + if (!first_properties.has_value()) { + first_properties = properties; + } else { + if (properties != first_properties.value()) { + is_continue = true; + break; + } + } + } + if (is_continue) { + continue; + } auto has_inverted_index = [&](const RowsetSharedPtr& src_rs) { auto* rowset = static_cast(src_rs.get()); if (rowset->is_skip_index_compaction(col_unique_id)) { @@ -887,9 +818,7 @@ Status CompactionMixin::construct_output_rowset_writer(RowsetWriterContext& ctx) if (config::inverted_index_compaction_enable && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS && _tablet->enable_unique_key_merge_on_write()) || - _tablet->keys_type() == KeysType::DUP_KEYS)) && - _cur_tablet_schema->get_inverted_index_storage_format() == - InvertedIndexStorageFormatPB::V1) { + _tablet->keys_type() == KeysType::DUP_KEYS))) { construct_index_compaction_columns(ctx); } ctx.version = _output_version; @@ -1156,8 +1085,6 @@ Status CloudCompactionMixin::execute_compact_impl(int64_t permits) { RETURN_IF_ERROR(merge_input_rowsets()); - RETURN_IF_ERROR(do_inverted_index_compaction()); - RETURN_IF_ERROR(_engine.meta_mgr().commit_rowset(*_output_rowset->rowset_meta().get())); // 4. modify rowsets in memory @@ -1184,9 +1111,7 @@ Status CloudCompactionMixin::construct_output_rowset_writer(RowsetWriterContext& if (config::inverted_index_compaction_enable && (((_tablet->keys_type() == KeysType::UNIQUE_KEYS && _tablet->enable_unique_key_merge_on_write()) || - _tablet->keys_type() == KeysType::DUP_KEYS)) && - _cur_tablet_schema->get_inverted_index_storage_format() == - InvertedIndexStorageFormatPB::V1) { + _tablet->keys_type() == KeysType::DUP_KEYS))) { construct_index_compaction_columns(ctx); } diff --git a/be/src/olap/compaction.h b/be/src/olap/compaction.h index 08afe840280ecf..06ef4268529247 100644 --- a/be/src/olap/compaction.h +++ b/be/src/olap/compaction.h @@ -67,6 +67,7 @@ class Compaction { protected: Status merge_input_rowsets(); + // merge inverted index files Status do_inverted_index_compaction(); void construct_index_compaction_columns(RowsetWriterContext& ctx); @@ -89,10 +90,11 @@ class Compaction { BaseTabletSPtr _tablet; std::vector _input_rowsets; - int64_t _input_rowsets_size {0}; + int64_t _input_rowsets_data_size {0}; + int64_t _input_rowsets_index_size {0}; + int64_t _input_rowsets_total_size {0}; int64_t _input_row_num {0}; int64_t _input_num_segments {0}; - int64_t _input_index_size {0}; Merger::Statistics _stats; diff --git a/be/src/olap/cumulative_compaction.cpp b/be/src/olap/cumulative_compaction.cpp index b762468b3455a4..b961c694ede4d0 100644 --- a/be/src/olap/cumulative_compaction.cpp +++ b/be/src/olap/cumulative_compaction.cpp @@ -125,7 +125,8 @@ Status CumulativeCompaction::execute_compact() { tablet()->set_last_cumu_compaction_success_time(UnixMillis()); } DorisMetrics::instance()->cumulative_compaction_deltas_total->increment(_input_rowsets.size()); - DorisMetrics::instance()->cumulative_compaction_bytes_total->increment(_input_rowsets_size); + DorisMetrics::instance()->cumulative_compaction_bytes_total->increment( + _input_rowsets_total_size); return Status::OK(); } diff --git a/be/src/olap/delete_handler.cpp b/be/src/olap/delete_handler.cpp index 4d5b1ce9add3e0..80fc440ce36a6d 100644 --- a/be/src/olap/delete_handler.cpp +++ b/be/src/olap/delete_handler.cpp @@ -346,6 +346,8 @@ Status DeleteHandler::parse_condition(const std::string& condition_str, TConditi } template + requires(std::is_same_v or + std::is_same_v) Status DeleteHandler::_parse_column_pred(TabletSchemaSPtr complete_schema, TabletSchemaSPtr delete_pred_related_schema, const RepeatedPtrField& sub_pred_list, @@ -353,10 +355,13 @@ Status DeleteHandler::_parse_column_pred(TabletSchemaSPtr complete_schema, for (const auto& sub_predicate : sub_pred_list) { TCondition condition; RETURN_IF_ERROR(parse_condition(sub_predicate, &condition)); - int32_t col_unique_id; - if constexpr (std::is_same_v) { - col_unique_id = sub_predicate.col_unique_id; - } else { + int32_t col_unique_id = -1; + if constexpr (std::is_same_v) { + if (sub_predicate.has_column_unique_id()) [[likely]] { + col_unique_id = sub_predicate.column_unique_id(); + } + } + if (col_unique_id < 0) { const auto& column = *DORIS_TRY(delete_pred_related_schema->column(condition.column_name)); col_unique_id = column.unique_id(); diff --git a/be/src/olap/delete_handler.h b/be/src/olap/delete_handler.h index cc585c0abcf9f6..77de62d31d988e 100644 --- a/be/src/olap/delete_handler.h +++ b/be/src/olap/delete_handler.h @@ -21,6 +21,7 @@ #include #include +#include #include "common/factory_creator.h" #include "common/status.h" @@ -115,6 +116,8 @@ class DeleteHandler { private: template + requires(std::is_same_v or + std::is_same_v) Status _parse_column_pred( TabletSchemaSPtr complete_schema, TabletSchemaSPtr delete_pred_related_schema, const ::google::protobuf::RepeatedPtrField& sub_pred_list, diff --git a/be/src/olap/olap_common.h b/be/src/olap/olap_common.h index c1d3038050fbd4..d3bd0f0a3a2436 100644 --- a/be/src/olap/olap_common.h +++ b/be/src/olap/olap_common.h @@ -305,24 +305,22 @@ struct OlapReaderStatistics { // block_load_ns // block_init_ns // block_init_seek_ns - // block_conditions_filtered_ns - // first_read_ns - // block_first_read_seek_ns + // generate_row_ranges_ns + // predicate_column_read_ns + // predicate_column_read_seek_ns // lazy_read_ns // block_lazy_read_seek_ns int64_t block_init_ns = 0; int64_t block_init_seek_num = 0; int64_t block_init_seek_ns = 0; - int64_t first_read_ns = 0; - int64_t second_read_ns = 0; - int64_t block_first_read_seek_num = 0; - int64_t block_first_read_seek_ns = 0; + int64_t predicate_column_read_ns = 0; + int64_t non_predicate_read_ns = 0; + int64_t predicate_column_read_seek_num = 0; + int64_t predicate_column_read_seek_ns = 0; int64_t lazy_read_ns = 0; int64_t block_lazy_read_seek_num = 0; int64_t block_lazy_read_seek_ns = 0; - int64_t block_convert_ns = 0; - int64_t raw_rows_read = 0; int64_t rows_vec_cond_filtered = 0; @@ -351,11 +349,10 @@ struct OlapReaderStatistics { int64_t rows_del_by_bitmap = 0; // the number of rows filtered by various column indexes. int64_t rows_conditions_filtered = 0; - int64_t block_conditions_filtered_ns = 0; - int64_t block_conditions_filtered_bf_ns = 0; - int64_t block_conditions_filtered_zonemap_ns = 0; - int64_t block_conditions_filtered_zonemap_rp_ns = 0; - int64_t block_conditions_filtered_dict_ns = 0; + int64_t generate_row_ranges_ns = 0; + int64_t generate_row_ranges_by_bf_ns = 0; + int64_t generate_row_ranges_by_zonemap_ns = 0; + int64_t generate_row_ranges_by_dict_ns = 0; int64_t index_load_ns = 0; @@ -372,7 +369,6 @@ struct OlapReaderStatistics { int64_t inverted_index_query_cache_miss = 0; int64_t inverted_index_query_null_bitmap_timer = 0; int64_t inverted_index_query_bitmap_copy_timer = 0; - int64_t inverted_index_query_bitmap_op_timer = 0; int64_t inverted_index_searcher_open_timer = 0; int64_t inverted_index_searcher_search_timer = 0; int64_t inverted_index_searcher_cache_hit = 0; diff --git a/be/src/olap/rowset/beta_rowset.cpp b/be/src/olap/rowset/beta_rowset.cpp index 209aca7fb03b4c..4b51dcc3530476 100644 --- a/be/src/olap/rowset/beta_rowset.cpp +++ b/be/src/olap/rowset/beta_rowset.cpp @@ -183,8 +183,9 @@ Status BetaRowset::load_segment(int64_t seg_id, segment_v2::SegmentSharedPtr* se .file_size = _rowset_meta->segment_file_size(seg_id), }; - auto s = segment_v2::Segment::open(fs, seg_path, seg_id, rowset_id(), _schema, reader_options, - segment, _rowset_meta->inverted_index_file_info(seg_id)); + auto s = segment_v2::Segment::open(fs, seg_path, _rowset_meta->tablet_id(), seg_id, rowset_id(), + _schema, reader_options, segment, + _rowset_meta->inverted_index_file_info(seg_id)); if (!s.ok()) { LOG(WARNING) << "failed to open segment. " << seg_path << " under rowset " << rowset_id() << " : " << s.to_string(); @@ -497,7 +498,7 @@ Status BetaRowset::upload_to(const StorageResource& dest_fs, const RowsetId& new auto st = dest_fs.fs->batch_upload(local_paths, dest_paths); if (st.ok()) { DorisMetrics::instance()->upload_rowset_count->increment(1); - DorisMetrics::instance()->upload_total_byte->increment(data_disk_size()); + DorisMetrics::instance()->upload_total_byte->increment(total_disk_size()); } else { DorisMetrics::instance()->upload_fail_count->increment(1); } @@ -543,8 +544,8 @@ Status BetaRowset::check_current_rowset_segment() { .file_size = _rowset_meta->segment_file_size(seg_id), }; - auto s = segment_v2::Segment::open(fs, seg_path, seg_id, rowset_id(), _schema, - reader_options, &segment, + auto s = segment_v2::Segment::open(fs, seg_path, _rowset_meta->tablet_id(), seg_id, + rowset_id(), _schema, reader_options, &segment, _rowset_meta->inverted_index_file_info(seg_id)); if (!s.ok()) { LOG(WARNING) << "segment can not be opened. file=" << seg_path; diff --git a/be/src/olap/rowset/beta_rowset_writer.cpp b/be/src/olap/rowset/beta_rowset_writer.cpp index 5d1b80f8cd7b23..548b1950b81762 100644 --- a/be/src/olap/rowset/beta_rowset_writer.cpp +++ b/be/src/olap/rowset/beta_rowset_writer.cpp @@ -81,7 +81,7 @@ void build_rowset_meta_with_spec_field(RowsetMeta& rowset_meta, const RowsetMeta& spec_rowset_meta) { rowset_meta.set_num_rows(spec_rowset_meta.num_rows()); rowset_meta.set_total_disk_size(spec_rowset_meta.total_disk_size()); - rowset_meta.set_data_disk_size(spec_rowset_meta.total_disk_size()); + rowset_meta.set_data_disk_size(spec_rowset_meta.data_disk_size()); rowset_meta.set_index_disk_size(spec_rowset_meta.index_disk_size()); // TODO write zonemap to meta rowset_meta.set_empty(spec_rowset_meta.num_rows() == 0); @@ -189,13 +189,67 @@ Result> SegmentFileCollection::segments_file_size(int seg_id return ResultError(st); } +InvertedIndexFileCollection::~InvertedIndexFileCollection() = default; + +Status InvertedIndexFileCollection::add(int seg_id, InvertedIndexFileWriterPtr&& index_writer) { + std::lock_guard lock(_lock); + if (_inverted_index_file_writers.find(seg_id) != _inverted_index_file_writers.end()) + [[unlikely]] { + DCHECK(false); + return Status::InternalError("The seg_id already exists, seg_id is {}", seg_id); + } + _inverted_index_file_writers.emplace(seg_id, std::move(index_writer)); + return Status::OK(); +} + +Status InvertedIndexFileCollection::close() { + std::lock_guard lock(_lock); + for (auto&& [id, writer] : _inverted_index_file_writers) { + RETURN_IF_ERROR(writer->close()); + _total_size += writer->get_index_file_total_size(); + } + + return Status::OK(); +} + +Result> +InvertedIndexFileCollection::inverted_index_file_info(int seg_id_offset) { + std::lock_guard lock(_lock); + + Status st; + std::vector idx_file_info(_inverted_index_file_writers.size()); + bool succ = std::all_of( + _inverted_index_file_writers.begin(), _inverted_index_file_writers.end(), + [&](auto&& it) { + auto&& [seg_id, writer] = it; + + int idx = seg_id - seg_id_offset; + if (idx >= idx_file_info.size()) [[unlikely]] { + auto err_msg = + fmt::format("invalid seg_id={} num_file_writers={} seg_id_offset={}", + seg_id, idx_file_info.size(), seg_id_offset); + DCHECK(false) << err_msg; + st = Status::InternalError(err_msg); + return false; + } + idx_file_info[idx] = _inverted_index_file_writers[seg_id]->get_index_file_info(); + return true; + }); + + if (succ) { + return idx_file_info; + } + + return ResultError(st); +} + BaseBetaRowsetWriter::BaseBetaRowsetWriter() : _num_segment(0), _segment_start_id(0), _num_rows_written(0), _total_data_size(0), _total_index_size(0), - _segment_creator(_context, _seg_files, _idx_files_info) {} + _segment_creator(_context, _seg_files, _idx_files) {} BetaRowsetWriter::BetaRowsetWriter(StorageEngine& engine) : _engine(engine), _segcompaction_worker(std::make_shared(this)) {} @@ -282,8 +336,7 @@ Status BaseBetaRowsetWriter::_generate_delete_bitmap(int32_t segment_id) { LOG(INFO) << "[Memtable Flush] construct delete bitmap tablet: " << _context.tablet->tablet_id() << ", rowset_ids: " << _context.mow_context->rowset_ids.size() << ", cur max_version: " << _context.mow_context->max_version - << ", transaction_id: " << _context.mow_context->txn_id << ", delete_bitmap_count: " - << _context.tablet->tablet_meta()->delete_bitmap().get_delete_bitmap_count() + << ", transaction_id: " << _context.mow_context->txn_id << ", cost: " << watch.get_elapse_time_us() << "(us), total rows: " << total_rows; return Status::OK(); } @@ -315,7 +368,8 @@ Status BetaRowsetWriter::_load_noncompacted_segment(segment_v2::SegmentSharedPtr .is_doris_table = true, .cache_base_path {}, }; - auto s = segment_v2::Segment::open(io::global_local_filesystem(), path, segment_id, rowset_id(), + auto s = segment_v2::Segment::open(io::global_local_filesystem(), path, + _rowset_meta->tablet_id(), segment_id, rowset_id(), _context.tablet_schema, reader_options, &segment); if (!s.ok()) { LOG(WARNING) << "failed to open segment. " << path << ":" << s; @@ -728,7 +782,6 @@ Status BetaRowsetWriter::_close_file_writers() { Status BetaRowsetWriter::build(RowsetSharedPtr& rowset) { RETURN_IF_ERROR(_close_file_writers()); - const auto total_segment_num = _num_segment - _segcompacted_point + 1 + _num_segcompacted; RETURN_NOT_OK_STATUS_WITH_WARN(_check_segment_number_limit(total_segment_num), "too many segments when build new rowset"); @@ -748,12 +801,15 @@ Status BetaRowsetWriter::build(RowsetSharedPtr& rowset) { : _context.tablet_schema; _rowset_meta->set_tablet_schema(rowset_schema); - if (auto idx_files_info = _idx_files_info.get_inverted_files_info(_segment_start_id); - !idx_files_info.has_value()) [[unlikely]] { - LOG(ERROR) << "expected inverted index files info, but none presents: " - << idx_files_info.error(); - } else { - _rowset_meta->add_inverted_index_files_info(idx_files_info.value()); + // If segment compaction occurs, the idx file info will become inaccurate. + if (rowset_schema->has_inverted_index() && _num_segcompacted == 0) { + if (auto idx_files_info = _idx_files.inverted_index_file_info(_segment_start_id); + !idx_files_info.has_value()) [[unlikely]] { + LOG(ERROR) << "expected inverted index files info, but none presents: " + << idx_files_info.error(); + } else { + _rowset_meta->add_inverted_index_files_info(idx_files_info.value()); + } } RETURN_NOT_OK_STATUS_WITH_WARN(RowsetFactory::create_rowset(rowset_schema, _context.tablet_path, @@ -830,7 +886,8 @@ Status BaseBetaRowsetWriter::_build_rowset_meta(RowsetMeta* rowset_meta, bool ch rowset_meta->set_num_segments(segment_num); rowset_meta->set_num_rows(num_rows_written + _num_rows_written); - rowset_meta->set_total_disk_size(total_data_size + _total_data_size); + rowset_meta->set_total_disk_size(total_data_size + _total_data_size + total_index_size + + _total_index_size); rowset_meta->set_data_disk_size(total_data_size + _total_data_size); rowset_meta->set_index_disk_size(total_index_size + _total_index_size); rowset_meta->set_segments_key_bounds(segments_encoded_key_bounds); @@ -891,6 +948,14 @@ Status BaseBetaRowsetWriter::create_file_writer(uint32_t segment_id, io::FileWri fmt::format("failed to create file = {}, file type = {}", segment_path, file_type)); } +Status BaseBetaRowsetWriter::create_inverted_index_file_writer( + uint32_t segment_id, InvertedIndexFileWriterPtr* index_file_writer) { + RETURN_IF_ERROR(RowsetWriter::create_inverted_index_file_writer(segment_id, index_file_writer)); + // used for inverted index format v1 + (*index_file_writer)->set_file_writer_opts(_context.get_file_writer_options()); + return Status::OK(); +} + Status BetaRowsetWriter::_create_segment_writer_for_segcompaction( std::unique_ptr* writer, int64_t begin, int64_t end) { DCHECK(begin >= 0 && end >= 0); @@ -899,6 +964,22 @@ Status BetaRowsetWriter::_create_segment_writer_for_segcompaction( io::FileWriterPtr file_writer; RETURN_IF_ERROR(_create_file_writer(path, file_writer)); + InvertedIndexFileWriterPtr index_file_writer; + if (_context.tablet_schema->has_inverted_index()) { + io::FileWriterPtr idx_file_writer; + if (_context.tablet_schema->get_inverted_index_storage_format() != + InvertedIndexStorageFormatPB::V1) { + std::string prefix = + std::string {InvertedIndexDescriptor::get_index_file_path_prefix(path)}; + std::string index_path = InvertedIndexDescriptor::get_index_file_path_v2(prefix); + RETURN_IF_ERROR(_create_file_writer(index_path, idx_file_writer)); + } + index_file_writer = std::make_unique( + _context.fs(), path, _context.rowset_id.to_string(), _num_segcompacted, + _context.tablet_schema->get_inverted_index_storage_format(), + std::move(idx_file_writer)); + } + segment_v2::SegmentWriterOptions writer_options; writer_options.enable_unique_key_merge_on_write = _context.enable_unique_key_merge_on_write; writer_options.rowset_ctx = &_context; @@ -907,15 +988,14 @@ Status BetaRowsetWriter::_create_segment_writer_for_segcompaction( writer_options.max_rows_per_segment = _context.max_rows_per_segment; writer_options.mow_ctx = _context.mow_context; - *writer = std::make_unique(file_writer.get(), _num_segcompacted, - _context.tablet_schema, _context.tablet, - _context.data_dir, writer_options); + *writer = std::make_unique( + file_writer.get(), _num_segcompacted, _context.tablet_schema, _context.tablet, + _context.data_dir, writer_options, index_file_writer.get()); if (auto& seg_writer = _segcompaction_worker->get_file_writer(); seg_writer != nullptr && seg_writer->state() != io::FileWriter::State::CLOSED) { RETURN_IF_ERROR(_segcompaction_worker->get_file_writer()->close()); } _segcompaction_worker->get_file_writer().reset(file_writer.release()); - return Status::OK(); } @@ -1005,11 +1085,13 @@ Status BetaRowsetWriter::flush_segment_writer_for_segcompaction( return Status::Error("failed to finalize segment: {}", s.to_string()); } + int64_t inverted_index_file_size = 0; + RETURN_IF_ERROR((*writer)->close_inverted_index(&inverted_index_file_size)); SegmentStatistics segstat; segstat.row_num = row_num; - segstat.data_size = segment_size + (*writer)->get_inverted_index_total_size(); - segstat.index_size = index_size + (*writer)->get_inverted_index_total_size(); + segstat.data_size = segment_size; + segstat.index_size = inverted_index_file_size; segstat.key_bounds = key_bounds; { std::lock_guard lock(_segid_statistics_map_mutex); diff --git a/be/src/olap/rowset/beta_rowset_writer.h b/be/src/olap/rowset/beta_rowset_writer.h index 82e4c9409b4853..4539959fab506b 100644 --- a/be/src/olap/rowset/beta_rowset_writer.h +++ b/be/src/olap/rowset/beta_rowset_writer.h @@ -42,6 +42,7 @@ #include "olap/rowset/rowset_writer.h" #include "olap/rowset/rowset_writer_context.h" #include "olap/rowset/segment_creator.h" +#include "segment_v2/inverted_index_file_writer.h" #include "segment_v2/segment.h" #include "util/spinlock.h" @@ -84,58 +85,33 @@ class SegmentFileCollection { bool _closed {false}; }; -// Collect the size of the inverted index files -class InvertedIndexFilesInfo { +class InvertedIndexFileCollection { public: + ~InvertedIndexFileCollection(); + + // `seg_id` -> inverted index file writer + Status add(int seg_id, InvertedIndexFileWriterPtr&& writer); + + // Close all file writers + // If the inverted index file writer is not closed, an error will be thrown during destruction + Status close(); + // Get inverted index file info in segment id order. - // Return the info of inverted index files from seg_id_offset to the last one. - Result> get_inverted_files_info(int seg_id_offset) { - std::lock_guard lock(_lock); - - Status st; - std::vector inverted_files_info(_inverted_index_files_info.size()); - bool succ = std::all_of( - _inverted_index_files_info.begin(), _inverted_index_files_info.end(), - [&](auto&& it) { - auto&& [seg_id, info] = it; - - int idx = seg_id - seg_id_offset; - if (idx >= inverted_files_info.size()) [[unlikely]] { - auto err_msg = fmt::format( - "invalid seg_id={} num_inverted_files_info={} seg_id_offset={}", - seg_id, inverted_files_info.size(), seg_id_offset); - DCHECK(false) << err_msg; - st = Status::InternalError(err_msg); - return false; - } - - auto& finfo = inverted_files_info[idx]; - if (finfo.has_index_size() || finfo.index_info_size() > 0) [[unlikely]] { - // File size should not been set - auto err_msg = fmt::format("duplicate seg_id={}", seg_id); - DCHECK(false) << err_msg; - st = Status::InternalError(err_msg); - return false; - } - finfo = info; - return true; - }); - - if (succ) { - return inverted_files_info; - } - - return ResultError(st); - } + // `seg_id_offset` is the offset of the segment id relative to the subscript of `_inverted_index_file_writers`, + // for more details, see `Tablet::create_transient_rowset_writer`. + Result> inverted_index_file_info(int seg_id_offset); - void add_file_info(int seg_id, InvertedIndexFileInfo file_info) { - std::lock_guard lock(_lock); - _inverted_index_files_info.emplace(seg_id, file_info); + // return all inverted index file writers + std::unordered_map& get_file_writers() { + return _inverted_index_file_writers; } + int64_t get_total_index_size() const { return _total_size; } + private: - std::unordered_map _inverted_index_files_info; mutable SpinLock _lock; + std::unordered_map _inverted_index_file_writers; + int64_t _total_size = 0; }; class BaseBetaRowsetWriter : public RowsetWriter { @@ -156,6 +132,9 @@ class BaseBetaRowsetWriter : public RowsetWriter { Status create_file_writer(uint32_t segment_id, io::FileWriterPtr& writer, FileType file_type = FileType::SEGMENT_FILE) override; + Status create_inverted_index_file_writer(uint32_t segment_id, + InvertedIndexFileWriterPtr* writer) override; + Status add_segment(uint32_t segment_id, const SegmentStatistics& segstat, TabletSchemaSPtr flush_schema) override; @@ -215,7 +194,9 @@ class BaseBetaRowsetWriter : public RowsetWriter { return _seg_files.get_file_writers(); } - InvertedIndexFilesInfo& get_inverted_index_files_info() { return _idx_files_info; } + std::unordered_map& inverted_index_file_writers() { + return this->_idx_files.get_file_writers(); + } private: void update_rowset_schema(TabletSchemaSPtr flush_schema); @@ -235,6 +216,15 @@ class BaseBetaRowsetWriter : public RowsetWriter { std::lock_guard l(_segid_statistics_map_mutex); return std::accumulate(_segment_num_rows.begin(), _segment_num_rows.end(), uint64_t(0)); } + // Only during vertical compaction is this method called + // Some index files are written during normal compaction and some files are written during index compaction. + // After all index writes are completed, call this method to write the final compound index file. + Status _close_inverted_index_file_writers() { + RETURN_NOT_OK_STATUS_WITH_WARN(_idx_files.close(), + "failed to close index file when build new rowset"); + this->_total_index_size += _idx_files.get_total_index_size(); + return Status::OK(); + } std::atomic _num_segment; // number of consecutive flushed segments roaring::Roaring _segment_set; // bitmap set to record flushed segment id @@ -242,6 +232,7 @@ class BaseBetaRowsetWriter : public RowsetWriter { int32_t _segment_start_id; // basic write start from 0, partial update may be different SegmentFileCollection _seg_files; + InvertedIndexFileCollection _idx_files; // record rows number of every segment already written, using for rowid // conversion when compaction in unique key with MoW model @@ -269,9 +260,6 @@ class BaseBetaRowsetWriter : public RowsetWriter { int64_t _delete_bitmap_ns = 0; int64_t _segment_writer_ns = 0; - - // map - InvertedIndexFilesInfo _idx_files_info; }; class SegcompactionWorker; diff --git a/be/src/olap/rowset/beta_rowset_writer_v2.cpp b/be/src/olap/rowset/beta_rowset_writer_v2.cpp index 0d0ad435b9efd1..cb5dd5a5ee272d 100644 --- a/be/src/olap/rowset/beta_rowset_writer_v2.cpp +++ b/be/src/olap/rowset/beta_rowset_writer_v2.cpp @@ -58,7 +58,7 @@ namespace doris { using namespace ErrorCode; BetaRowsetWriterV2::BetaRowsetWriterV2(const std::vector>& streams) - : _segment_creator(_context, _seg_files, _idx_files_info), _streams(streams) {} + : _segment_creator(_context, _seg_files, _idx_files), _streams(streams) {} BetaRowsetWriterV2::~BetaRowsetWriterV2() = default; diff --git a/be/src/olap/rowset/beta_rowset_writer_v2.h b/be/src/olap/rowset/beta_rowset_writer_v2.h index a9e41e603cef63..78ec4a7dce703c 100644 --- a/be/src/olap/rowset/beta_rowset_writer_v2.h +++ b/be/src/olap/rowset/beta_rowset_writer_v2.h @@ -154,11 +154,10 @@ class BetaRowsetWriterV2 : public RowsetWriter { std::vector _segments_encoded_key_bounds; SegmentFileCollection _seg_files; + InvertedIndexFileCollection _idx_files; SegmentCreator _segment_creator; - InvertedIndexFilesInfo _idx_files_info; - fmt::memory_buffer vlog_buffer; std::vector> _streams; diff --git a/be/src/olap/rowset/rowset.h b/be/src/olap/rowset/rowset.h index 24e660cd2f7210..e1a2347f6aeaa8 100644 --- a/be/src/olap/rowset/rowset.h +++ b/be/src/olap/rowset/rowset.h @@ -149,7 +149,8 @@ class Rowset : public std::enable_shared_from_this { int64_t start_version() const { return rowset_meta()->version().first; } int64_t end_version() const { return rowset_meta()->version().second; } size_t index_disk_size() const { return rowset_meta()->index_disk_size(); } - size_t data_disk_size() const { return rowset_meta()->total_disk_size(); } + size_t data_disk_size() const { return rowset_meta()->data_disk_size(); } + size_t total_disk_size() const { return rowset_meta()->total_disk_size(); } bool empty() const { return rowset_meta()->empty(); } bool zero_num_rows() const { return rowset_meta()->num_rows() == 0; } size_t num_rows() const { return rowset_meta()->num_rows(); } diff --git a/be/src/olap/rowset/rowset_meta.cpp b/be/src/olap/rowset/rowset_meta.cpp index 1843fb8a41ee08..6bed5e800ede4d 100644 --- a/be/src/olap/rowset/rowset_meta.cpp +++ b/be/src/olap/rowset/rowset_meta.cpp @@ -226,6 +226,7 @@ void RowsetMeta::merge_rowset_meta(const RowsetMeta& other) { set_data_disk_size(data_disk_size() + other.data_disk_size()); set_total_disk_size(total_disk_size() + other.total_disk_size()); set_index_disk_size(index_disk_size() + other.index_disk_size()); + set_total_disk_size(data_disk_size() + index_disk_size()); for (auto&& key_bound : other.get_segments_key_bounds()) { add_segment_key_bounds(key_bound); } @@ -273,20 +274,14 @@ InvertedIndexFileInfo RowsetMeta::inverted_index_file_info(int seg_id) { } void RowsetMeta::add_inverted_index_files_info( - const std::vector& idx_file_info) { + const std::vector& idx_file_info) { _rowset_meta_pb.set_enable_inverted_index_file_info(true); for (auto finfo : idx_file_info) { auto* new_file_info = _rowset_meta_pb.add_inverted_index_file_info(); - *new_file_info = finfo; + *new_file_info = *finfo; } } -void RowsetMeta::update_inverted_index_files_info( - const std::vector& idx_file_info) { - _rowset_meta_pb.clear_inverted_index_file_info(); - add_inverted_index_files_info(idx_file_info); -} - bool operator==(const RowsetMeta& a, const RowsetMeta& b) { if (a._rowset_id != b._rowset_id) return false; if (a._is_removed_from_rowset_meta != b._is_removed_from_rowset_meta) return false; diff --git a/be/src/olap/rowset/rowset_meta.h b/be/src/olap/rowset/rowset_meta.h index 164d42cbb16230..46121aeae2be6d 100644 --- a/be/src/olap/rowset/rowset_meta.h +++ b/be/src/olap/rowset/rowset_meta.h @@ -364,9 +364,8 @@ class RowsetMeta : public MetadataAdder { return _rowset_meta_pb.inverted_index_file_info(); } - void add_inverted_index_files_info(const std::vector& idx_file_info); - - void update_inverted_index_files_info(const std::vector& idx_file_info); + void add_inverted_index_files_info( + const std::vector& idx_file_info); int64_t get_metadata_size() const override; diff --git a/be/src/olap/rowset/rowset_reader_context.h b/be/src/olap/rowset/rowset_reader_context.h index 43a84acea02f79..fd4fe7a18234f1 100644 --- a/be/src/olap/rowset/rowset_reader_context.h +++ b/be/src/olap/rowset/rowset_reader_context.h @@ -77,14 +77,12 @@ struct RowsetReaderContext { const DeleteBitmap* delete_bitmap = nullptr; bool record_rowids = false; RowIdConversion* rowid_conversion; - bool is_vertical_compaction = false; bool is_key_column_group = false; const std::set* output_columns = nullptr; RowsetId rowset_id; // slots that cast may be eliminated in storage layer std::map target_cast_type_for_variants; int64_t ttl_seconds = 0; - size_t topn_limit = 0; }; } // namespace doris diff --git a/be/src/olap/rowset/rowset_writer.h b/be/src/olap/rowset/rowset_writer.h index 6861b8ab7e2ce6..ad42982488b316 100644 --- a/be/src/olap/rowset/rowset_writer.h +++ b/be/src/olap/rowset/rowset_writer.h @@ -31,6 +31,7 @@ #include "olap/column_mapping.h" #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_writer_context.h" +#include "olap/rowset/segment_v2/inverted_index_file_writer.h" #include "olap/tablet_fwd.h" #include "olap/tablet_schema.h" #include "vec/core/block.h" @@ -95,6 +96,24 @@ class RowsetWriter { return Status::NotSupported("RowsetWriter does not support create_file_writer"); } + virtual Status create_inverted_index_file_writer( + uint32_t segment_id, InvertedIndexFileWriterPtr* index_file_writer) { + // Create file writer for the inverted index format v2. + io::FileWriterPtr idx_file_v2_ptr; + if (_context.tablet_schema->get_inverted_index_storage_format() != + InvertedIndexStorageFormatPB::V1) { + RETURN_IF_ERROR( + create_file_writer(segment_id, idx_file_v2_ptr, FileType::INVERTED_INDEX_FILE)); + } + std::string segment_prefix {InvertedIndexDescriptor::get_index_file_path_prefix( + _context.segment_path(segment_id))}; + *index_file_writer = std::make_unique( + _context.fs(), segment_prefix, _context.rowset_id.to_string(), segment_id, + _context.tablet_schema->get_inverted_index_storage_format(), + std::move(idx_file_v2_ptr)); + return Status::OK(); + } + // explicit flush all buffered rows into segment file. // note that `add_row` could also trigger flush when certain conditions are met virtual Status flush() = 0; diff --git a/be/src/olap/rowset/segment_creator.cpp b/be/src/olap/rowset/segment_creator.cpp index 1afd3215db42f6..e0eb7534123a86 100644 --- a/be/src/olap/rowset/segment_creator.cpp +++ b/be/src/olap/rowset/segment_creator.cpp @@ -53,8 +53,8 @@ namespace doris { using namespace ErrorCode; SegmentFlusher::SegmentFlusher(RowsetWriterContext& context, SegmentFileCollection& seg_files, - InvertedIndexFilesInfo& idx_files_info) - : _context(context), _seg_files(seg_files), _idx_files_info(idx_files_info) {} + InvertedIndexFileCollection& idx_files) + : _context(context), _seg_files(seg_files), _idx_files(idx_files) {} SegmentFlusher::~SegmentFlusher() = default; @@ -140,13 +140,10 @@ Status SegmentFlusher::_create_segment_writer(std::unique_ptrcreate(segment_id, segment_file_writer)); - io::FileWriterPtr inverted_file_writer; - if (_context.tablet_schema->has_inverted_index() && - _context.tablet_schema->get_inverted_index_storage_format() >= - InvertedIndexStorageFormatPB::V2 && - _context.memtable_on_sink_support_index_v2) { - RETURN_IF_ERROR(_context.file_writer_creator->create(segment_id, inverted_file_writer, - FileType::INVERTED_INDEX_FILE)); + InvertedIndexFileWriterPtr inverted_index_file_writer; + if (_context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR( + _context.file_writer_creator->create(segment_id, &inverted_index_file_writer)); } segment_v2::SegmentWriterOptions writer_options; @@ -161,8 +158,11 @@ Status SegmentFlusher::_create_segment_writer(std::unique_ptr( segment_file_writer.get(), segment_id, _context.tablet_schema, _context.tablet, - _context.data_dir, writer_options, std::move(inverted_file_writer)); + _context.data_dir, writer_options, inverted_index_file_writer.get()); RETURN_IF_ERROR(_seg_files.add(segment_id, std::move(segment_file_writer))); + if (_context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR(_idx_files.add(segment_id, std::move(inverted_index_file_writer))); + } auto s = writer->init(); if (!s.ok()) { LOG(WARNING) << "failed to init segment writer: " << s.to_string(); @@ -178,13 +178,10 @@ Status SegmentFlusher::_create_segment_writer( io::FileWriterPtr segment_file_writer; RETURN_IF_ERROR(_context.file_writer_creator->create(segment_id, segment_file_writer)); - io::FileWriterPtr inverted_file_writer; - if (_context.tablet_schema->has_inverted_index() && - _context.tablet_schema->get_inverted_index_storage_format() >= - InvertedIndexStorageFormatPB::V2 && - _context.memtable_on_sink_support_index_v2) { - RETURN_IF_ERROR(_context.file_writer_creator->create(segment_id, inverted_file_writer, - FileType::INVERTED_INDEX_FILE)); + InvertedIndexFileWriterPtr inverted_index_file_writer; + if (_context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR( + _context.file_writer_creator->create(segment_id, &inverted_index_file_writer)); } segment_v2::VerticalSegmentWriterOptions writer_options; @@ -198,8 +195,11 @@ Status SegmentFlusher::_create_segment_writer( writer = std::make_unique( segment_file_writer.get(), segment_id, _context.tablet_schema, _context.tablet, - _context.data_dir, writer_options, std::move(inverted_file_writer)); + _context.data_dir, writer_options, inverted_index_file_writer.get()); RETURN_IF_ERROR(_seg_files.add(segment_id, std::move(segment_file_writer))); + if (_context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR(_idx_files.add(segment_id, std::move(inverted_index_file_writer))); + } auto s = writer->init(); if (!s.ok()) { LOG(WARNING) << "failed to init segment writer: " << s.to_string(); @@ -225,12 +225,16 @@ Status SegmentFlusher::_flush_segment_writer( if (row_num == 0) { return Status::OK(); } - uint64_t segment_size; - uint64_t index_size; - Status s = writer->finalize(&segment_size, &index_size); + uint64_t segment_file_size; + uint64_t common_index_size; + Status s = writer->finalize(&segment_file_size, &common_index_size); if (!s.ok()) { return Status::Error(s.code(), "failed to finalize segment: {}", s.to_string()); } + + int64_t inverted_index_file_size = 0; + RETURN_IF_ERROR(writer->close_inverted_index(&inverted_index_file_size)); + VLOG_DEBUG << "tablet_id:" << _context.tablet_id << " flushing filename: " << writer->data_dir_path() << " rowset_id:" << _context.rowset_id; @@ -245,17 +249,20 @@ Status SegmentFlusher::_flush_segment_writer( uint32_t segment_id = writer->segment_id(); SegmentStatistics segstat; segstat.row_num = row_num; - segstat.data_size = segment_size + writer->get_inverted_index_total_size(); - segstat.index_size = index_size + writer->get_inverted_index_total_size(); + segstat.data_size = segment_file_size; + segstat.index_size = inverted_index_file_size; segstat.key_bounds = key_bounds; + LOG(INFO) << "tablet_id:" << _context.tablet_id + << ", flushing rowset_dir: " << _context.tablet_path + << ", rowset_id:" << _context.rowset_id << ", data size:" << segstat.data_size + << ", index size:" << segstat.index_size; - _idx_files_info.add_file_info(segment_id, writer->get_inverted_index_file_info()); writer.reset(); RETURN_IF_ERROR(_context.segment_collector->add(segment_id, segstat, flush_schema)); if (flush_size) { - *flush_size = segment_size + index_size; + *flush_size = segment_file_size; } return Status::OK(); } @@ -271,12 +278,16 @@ Status SegmentFlusher::_flush_segment_writer(std::unique_ptrfinalize(&segment_size, &index_size); + uint64_t segment_file_size; + uint64_t common_index_size; + Status s = writer->finalize(&segment_file_size, &common_index_size); if (!s.ok()) { return Status::Error(s.code(), "failed to finalize segment: {}", s.to_string()); } + + int64_t inverted_index_file_size = 0; + RETURN_IF_ERROR(writer->close_inverted_index(&inverted_index_file_size)); + VLOG_DEBUG << "tablet_id:" << _context.tablet_id << " flushing rowset_dir: " << _context.tablet_path << " rowset_id:" << _context.rowset_id; @@ -291,17 +302,20 @@ Status SegmentFlusher::_flush_segment_writer(std::unique_ptrget_segment_id(); SegmentStatistics segstat; segstat.row_num = row_num; - segstat.data_size = segment_size + writer->get_inverted_index_total_size(); - segstat.index_size = index_size + writer->get_inverted_index_total_size(); + segstat.data_size = segment_file_size; + segstat.index_size = inverted_index_file_size; segstat.key_bounds = key_bounds; + LOG(INFO) << "tablet_id:" << _context.tablet_id + << ", flushing rowset_dir: " << _context.tablet_path + << ", rowset_id:" << _context.rowset_id << ", data size:" << segstat.data_size + << ", index size:" << segstat.index_size; - _idx_files_info.add_file_info(segment_id, writer->get_inverted_index_file_info()); writer.reset(); RETURN_IF_ERROR(_context.segment_collector->add(segment_id, segstat, flush_schema)); if (flush_size) { - *flush_size = segment_size + index_size; + *flush_size = segment_file_size; } return Status::OK(); } @@ -330,8 +344,8 @@ int64_t SegmentFlusher::Writer::max_row_to_add(size_t row_avg_size_in_bytes) { } SegmentCreator::SegmentCreator(RowsetWriterContext& context, SegmentFileCollection& seg_files, - InvertedIndexFilesInfo& idx_files_info) - : _segment_flusher(context, seg_files, idx_files_info) {} + InvertedIndexFileCollection& idx_files) + : _segment_flusher(context, seg_files, idx_files) {} Status SegmentCreator::add_block(const vectorized::Block* block) { if (block->rows() == 0) { diff --git a/be/src/olap/rowset/segment_creator.h b/be/src/olap/rowset/segment_creator.h index c862fce87a43bd..f8afd5798927d4 100644 --- a/be/src/olap/rowset/segment_creator.h +++ b/be/src/olap/rowset/segment_creator.h @@ -29,6 +29,7 @@ #include "io/fs/file_reader_writer_fwd.h" #include "olap/olap_common.h" #include "olap/rowset/rowset_writer_context.h" +#include "olap/rowset/segment_v2/inverted_index_file_writer.h" #include "olap/tablet_fwd.h" #include "util/spinlock.h" #include "vec/core/block.h" @@ -46,7 +47,7 @@ class VerticalSegmentWriter; struct SegmentStatistics; class BetaRowsetWriter; class SegmentFileCollection; -class InvertedIndexFilesInfo; +class InvertedIndexFileCollection; class FileWriterCreator { public: @@ -54,9 +55,12 @@ class FileWriterCreator { virtual Status create(uint32_t segment_id, io::FileWriterPtr& file_writer, FileType file_type = FileType::SEGMENT_FILE) = 0; + + virtual Status create(uint32_t segment_id, InvertedIndexFileWriterPtr* file_writer) = 0; }; template + requires std::is_base_of_v class FileWriterCreatorT : public FileWriterCreator { public: explicit FileWriterCreatorT(T* t) : _t(t) {} @@ -66,6 +70,10 @@ class FileWriterCreatorT : public FileWriterCreator { return _t->create_file_writer(segment_id, file_writer, file_type); } + Status create(uint32_t segment_id, InvertedIndexFileWriterPtr* file_writer) override { + return _t->create_inverted_index_file_writer(segment_id, file_writer); + } + private: T* _t = nullptr; }; @@ -79,6 +87,7 @@ class SegmentCollector { }; template + requires std::is_base_of_v class SegmentCollectorT : public SegmentCollector { public: explicit SegmentCollectorT(T* t) : _t(t) {} @@ -95,7 +104,7 @@ class SegmentCollectorT : public SegmentCollector { class SegmentFlusher { public: SegmentFlusher(RowsetWriterContext& context, SegmentFileCollection& seg_files, - InvertedIndexFilesInfo& idx_files_info); + InvertedIndexFileCollection& idx_files); ~SegmentFlusher(); @@ -164,7 +173,7 @@ class SegmentFlusher { private: RowsetWriterContext& _context; SegmentFileCollection& _seg_files; - InvertedIndexFilesInfo& _idx_files_info; + InvertedIndexFileCollection& _idx_files; // written rows by add_block/add_row std::atomic _num_rows_written = 0; @@ -177,7 +186,7 @@ class SegmentFlusher { class SegmentCreator { public: SegmentCreator(RowsetWriterContext& context, SegmentFileCollection& seg_files, - InvertedIndexFilesInfo& idx_files_info); + InvertedIndexFileCollection& idx_files); ~SegmentCreator() = default; diff --git a/be/src/olap/rowset/segment_v2/inverted_index_file_writer.cpp b/be/src/olap/rowset/segment_v2/inverted_index_file_writer.cpp index 7a784a55b862d0..70c1e55d1e8da8 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_file_writer.cpp +++ b/be/src/olap/rowset/segment_v2/inverted_index_file_writer.cpp @@ -122,6 +122,8 @@ int64_t InvertedIndexFileWriter::headerLength() { } Status InvertedIndexFileWriter::close() { + DCHECK(!_closed) << debug_string(); + _closed = true; if (_indices_dirs.empty()) { return Status::OK(); } @@ -370,14 +372,10 @@ int64_t InvertedIndexFileWriter::write_v2() { out_dir->set_file_writer_opts(_opts); std::unique_ptr compound_file_output; - // idx v2 writer != nullptr means memtable on sink node now - if (_idx_v2_writer != nullptr) { - compound_file_output = std::unique_ptr( - out_dir->createOutputV2(_idx_v2_writer.get())); - } else { - compound_file_output = std::unique_ptr( - out_dir->createOutput(index_path.filename().c_str())); - } + + DCHECK(_idx_v2_writer != nullptr) << "inverted index file writer v2 is nullptr"; + compound_file_output = std::unique_ptr( + out_dir->createOutputV2(_idx_v2_writer.get())); // Write the version number compound_file_output->writeInt(InvertedIndexStorageFormatPB::V2); diff --git a/be/src/olap/rowset/segment_v2/inverted_index_file_writer.h b/be/src/olap/rowset/segment_v2/inverted_index_file_writer.h index 2aceb671d809a7..ccd6953cdd7abd 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_file_writer.h +++ b/be/src/olap/rowset/segment_v2/inverted_index_file_writer.h @@ -38,6 +38,9 @@ class DorisFSDirectory; using InvertedIndexDirectoryMap = std::map, std::unique_ptr>; +class InvertedIndexFileWriter; +using InvertedIndexFileWriterPtr = std::unique_ptr; + class FileInfo { public: std::string filename; @@ -65,8 +68,14 @@ class InvertedIndexFileWriter { int64_t write_v1(); Status close(); int64_t headerLength(); - InvertedIndexFileInfo get_index_file_info() const { return _file_info; } - int64_t get_index_file_total_size() const { return _total_file_size; } + const InvertedIndexFileInfo* get_index_file_info() const { + DCHECK(_closed) << debug_string(); + return &_file_info; + } + int64_t get_index_file_total_size() const { + DCHECK(_closed) << debug_string(); + return _total_file_size; + } const io::FileSystemSPtr& get_fs() const { return _fs; } void sort_files(std::vector& file_infos); void copyFile(const char* fileName, lucene::store::Directory* dir, @@ -75,6 +84,20 @@ class InvertedIndexFileWriter { void set_file_writer_opts(const io::FileWriterOptions& opts) { _opts = opts; } + std::string debug_string() const { + std::stringstream indices_dirs; + for (const auto& [index, dir] : _indices_dirs) { + indices_dirs << "index id is: " << index.first << " , index suffix is: " << index.second + << " , index dir is: " << dir->toString(); + } + return fmt::format( + "inverted index file writer debug string: index storage format is: {}, index path " + "prefix is: {}, rowset id is: {}, seg id is: {}, closed is: {}, total file size " + "is: {}, index dirs is: {}", + _storage_format, _index_path_prefix, _rowset_id, _seg_id, _closed, _total_file_size, + indices_dirs.str()); + } + private: InvertedIndexDirectoryMap _indices_dirs; const io::FileSystemSPtr _fs; @@ -82,14 +105,18 @@ class InvertedIndexFileWriter { std::string _rowset_id; int64_t _seg_id; InvertedIndexStorageFormatPB _storage_format; - // v1: all file size - // v2: file size - int64_t _total_file_size = 0; + // write to disk or stream - io::FileWriterPtr _idx_v2_writer; + io::FileWriterPtr _idx_v2_writer = nullptr; io::FileWriterOptions _opts; + // v1: all file size + // v2: file size + int64_t _total_file_size = 0; InvertedIndexFileInfo _file_info; + + // only once + bool _closed = false; }; } // namespace segment_v2 } // namespace doris diff --git a/be/src/olap/rowset/segment_v2/segment.cpp b/be/src/olap/rowset/segment_v2/segment.cpp index 68fe3190b817a1..469d0b9cf21ba3 100644 --- a/be/src/olap/rowset/segment_v2/segment.cpp +++ b/be/src/olap/rowset/segment_v2/segment.cpp @@ -86,10 +86,30 @@ std::string file_cache_key_str(const std::string& seg_path) { return file_cache_key_from_path(seg_path).to_string(); } -Status Segment::open(io::FileSystemSPtr fs, const std::string& path, uint32_t segment_id, - RowsetId rowset_id, TabletSchemaSPtr tablet_schema, +Status Segment::open(io::FileSystemSPtr fs, const std::string& path, int64_t tablet_id, + uint32_t segment_id, RowsetId rowset_id, TabletSchemaSPtr tablet_schema, const io::FileReaderOptions& reader_options, std::shared_ptr* output, InvertedIndexFileInfo idx_file_info) { + auto s = _open(fs, path, segment_id, rowset_id, tablet_schema, reader_options, output, + idx_file_info); + if (!s.ok()) { + if (!config::is_cloud_mode()) { + auto res = ExecEnv::get_tablet(tablet_id); + TabletSharedPtr tablet = + res.has_value() ? std::dynamic_pointer_cast(res.value()) : nullptr; + if (tablet) { + tablet->report_error(s); + } + } + } + + return s; +} + +Status Segment::_open(io::FileSystemSPtr fs, const std::string& path, uint32_t segment_id, + RowsetId rowset_id, TabletSchemaSPtr tablet_schema, + const io::FileReaderOptions& reader_options, std::shared_ptr* output, + InvertedIndexFileInfo idx_file_info) { io::FileReaderSPtr file_reader; RETURN_IF_ERROR(fs->open_file(path, &file_reader, &reader_options)); std::shared_ptr segment( diff --git a/be/src/olap/rowset/segment_v2/segment.h b/be/src/olap/rowset/segment_v2/segment.h index 13c8c86424f173..24f4230bc24719 100644 --- a/be/src/olap/rowset/segment_v2/segment.h +++ b/be/src/olap/rowset/segment_v2/segment.h @@ -80,8 +80,8 @@ using SegmentSharedPtr = std::shared_ptr; // change finished, client should disable all cached Segment for old TabletSchema. class Segment : public std::enable_shared_from_this, public MetadataAdder { public: - static Status open(io::FileSystemSPtr fs, const std::string& path, uint32_t segment_id, - RowsetId rowset_id, TabletSchemaSPtr tablet_schema, + static Status open(io::FileSystemSPtr fs, const std::string& path, int64_t tablet_id, + uint32_t segment_id, RowsetId rowset_id, TabletSchemaSPtr tablet_schema, const io::FileReaderOptions& reader_options, std::shared_ptr* output, InvertedIndexFileInfo idx_file_info = {}); @@ -214,6 +214,10 @@ class Segment : public std::enable_shared_from_this, public MetadataAdd DISALLOW_COPY_AND_ASSIGN(Segment); Segment(uint32_t segment_id, RowsetId rowset_id, TabletSchemaSPtr tablet_schema, InvertedIndexFileInfo idx_file_info = InvertedIndexFileInfo()); + static Status _open(io::FileSystemSPtr fs, const std::string& path, uint32_t segment_id, + RowsetId rowset_id, TabletSchemaSPtr tablet_schema, + const io::FileReaderOptions& reader_options, + std::shared_ptr* output, InvertedIndexFileInfo idx_file_info); // open segment file and read the minimum amount of necessary information (footer) Status _open(); Status _parse_footer(SegmentFooterPB* footer); diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.cpp b/be/src/olap/rowset/segment_v2/segment_iterator.cpp index 985cdc16e68f31..8f10eae03aeba1 100644 --- a/be/src/olap/rowset/segment_v2/segment_iterator.cpp +++ b/be/src/olap/rowset/segment_v2/segment_iterator.cpp @@ -497,7 +497,7 @@ Status SegmentIterator::_prepare_seek(const StorageReadOptions::KeyRange& key_ra } Status SegmentIterator::_get_row_ranges_by_column_conditions() { - SCOPED_RAW_TIMER(&_opts.stats->block_conditions_filtered_ns); + SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_ns); if (_row_bitmap.isEmpty()) { return Status::OK(); } @@ -565,7 +565,7 @@ Status SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row size_t pre_size = 0; { - SCOPED_RAW_TIMER(&_opts.stats->block_conditions_filtered_bf_ns); + SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_bf_ns); // first filter data by bloom filter index // bloom filter index only use CondColumn RowRanges bf_row_ranges = RowRanges::create_single(num_rows()); @@ -588,7 +588,7 @@ Status SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row } { - SCOPED_RAW_TIMER(&_opts.stats->block_conditions_filtered_zonemap_ns); + SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_zonemap_ns); RowRanges zone_map_row_ranges = RowRanges::create_single(num_rows()); // second filter data by zone map for (const auto& cid : cids) { @@ -652,7 +652,7 @@ Status SegmentIterator::_get_row_ranges_from_conditions(RowRanges* condition_row } { - SCOPED_RAW_TIMER(&_opts.stats->block_conditions_filtered_dict_ns); + SCOPED_RAW_TIMER(&_opts.stats->generate_row_ranges_by_dict_ns); /// Low cardinality optimization is currently not very stable, so to prevent data corruption, /// we are temporarily disabling its use in data compaction. if (_opts.io_ctx.reader_type == ReaderType::READER_QUERY) { @@ -1402,7 +1402,7 @@ Status SegmentIterator::_vec_init_lazy_materialization() { if (!_is_common_expr_column[cid]) { _non_predicate_columns.push_back(cid); } else { - _second_read_column_ids.push_back(cid); + _non_predicate_column_ids.push_back(cid); } } } @@ -1412,13 +1412,13 @@ Status SegmentIterator::_vec_init_lazy_materialization() { if (_lazy_materialization_read) { // insert pred cid to first_read_columns for (auto cid : pred_column_ids) { - _first_read_column_ids.push_back(cid); + _predicate_column_ids.push_back(cid); } } else if (!_is_need_vec_eval && !_is_need_short_eval && !_is_need_expr_eval) { // no pred exists, just read and output column for (int i = 0; i < _schema->num_column_ids(); i++) { auto cid = _schema->column_id(i); - _first_read_column_ids.push_back(cid); + _predicate_column_ids.push_back(cid); } } else { if (_is_need_vec_eval || _is_need_short_eval) { @@ -1430,18 +1430,18 @@ Status SegmentIterator::_vec_init_lazy_materialization() { _short_cir_pred_column_ids.end()); pred_id_set.insert(_vec_pred_column_ids.begin(), _vec_pred_column_ids.end()); - DCHECK(_second_read_column_ids.empty()); - // _second_read_column_ids must be empty. Otherwise _lazy_materialization_read must not false. + DCHECK(_non_predicate_column_ids.empty()); + // _non_predicate_column_ids must be empty. Otherwise _lazy_materialization_read must not false. for (int i = 0; i < _schema->num_column_ids(); i++) { auto cid = _schema->column_id(i); if (pred_id_set.find(cid) != pred_id_set.end()) { - _first_read_column_ids.push_back(cid); + _predicate_column_ids.push_back(cid); } // In the past, if schema columns > pred columns, the _lazy_materialization_read maybe == false, but // we make sure using _lazy_materialization_read= true now, so these logic may never happens. I comment // these lines and we could delete them in the future to make the code more clear. // else if (non_pred_set.find(cid) != non_pred_set.end()) { - // _first_read_column_ids.push_back(cid); + // _predicate_column_ids.push_back(cid); // // when _lazy_materialization_read = false, non-predicate column should also be filtered by sel idx, so we regard it as pred columns // _is_pred_column[cid] = true; // } @@ -1449,7 +1449,7 @@ Status SegmentIterator::_vec_init_lazy_materialization() { } else if (_is_need_expr_eval) { DCHECK(!_is_need_vec_eval && !_is_need_short_eval); for (auto cid : _common_expr_columns) { - _first_read_column_ids.push_back(cid); + _predicate_column_ids.push_back(cid); } } } @@ -1635,7 +1635,7 @@ void SegmentIterator::_output_non_pred_columns(vectorized::Block* block) { * 1. Reads a batch of rowids (up to the specified limit), and checks if they are continuous. * Continuous here means that the rowids form an unbroken sequence (e.g., 1, 2, 3, 4...). * - * 2. For each column that needs to be read (identified by _first_read_column_ids): + * 2. For each column that needs to be read (identified by _predicate_column_ids): * - If the rowids are continuous, the function uses seek_to_ordinal and next_batch * for efficient reading. * - If the rowids are not continuous, the function processes them in smaller batches @@ -1648,13 +1648,13 @@ void SegmentIterator::_output_non_pred_columns(vectorized::Block* block) { */ Status SegmentIterator::_read_columns_by_index(uint32_t nrows_read_limit, uint32_t& nrows_read, bool set_block_rowid) { - SCOPED_RAW_TIMER(&_opts.stats->first_read_ns); + SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_ns); nrows_read = _range_iter->read_batch_rowids(_block_rowids.data(), nrows_read_limit); bool is_continuous = (nrows_read > 1) && (_block_rowids[nrows_read - 1] - _block_rowids[0] == nrows_read - 1); - for (auto cid : _first_read_column_ids) { + for (auto cid : _predicate_column_ids) { auto& column = _current_return_columns[cid]; if (_no_need_read_key_data(cid, column, nrows_read)) { continue; @@ -1679,9 +1679,9 @@ Status SegmentIterator::_read_columns_by_index(uint32_t nrows_read_limit, uint32 if (is_continuous) { size_t rows_read = nrows_read; - _opts.stats->block_first_read_seek_num += 1; + _opts.stats->predicate_column_read_seek_num += 1; if (_opts.runtime_state && _opts.runtime_state->enable_profile()) { - SCOPED_RAW_TIMER(&_opts.stats->block_first_read_seek_ns); + SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_seek_ns); RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(_block_rowids[0])); } else { RETURN_IF_ERROR(_column_iterators[cid]->seek_to_ordinal(_block_rowids[0])); @@ -1703,9 +1703,9 @@ Status SegmentIterator::_read_columns_by_index(uint32_t nrows_read_limit, uint32 if (batch_continuous) { size_t rows_read = current_batch_size; - _opts.stats->block_first_read_seek_num += 1; + _opts.stats->predicate_column_read_seek_num += 1; if (_opts.runtime_state && _opts.runtime_state->enable_profile()) { - SCOPED_RAW_TIMER(&_opts.stats->block_first_read_seek_ns); + SCOPED_RAW_TIMER(&_opts.stats->predicate_column_read_seek_ns); RETURN_IF_ERROR( _column_iterators[cid]->seek_to_ordinal(_block_rowids[processed])); } else { @@ -2068,8 +2068,8 @@ Status SegmentIterator::_next_batch_internal(vectorized::Block* block) { RETURN_IF_ERROR(_read_columns_by_index( nrows_read_limit, _current_batch_rows_read, _lazy_materialization_read || _opts.record_rowids || _is_need_expr_eval)); - if (std::find(_first_read_column_ids.begin(), _first_read_column_ids.end(), - _schema->version_col_idx()) != _first_read_column_ids.end()) { + if (std::find(_predicate_column_ids.begin(), _predicate_column_ids.end(), + _schema->version_col_idx()) != _predicate_column_ids.end()) { _replace_version_col(_current_batch_rows_read); } @@ -2094,7 +2094,7 @@ Status SegmentIterator::_next_batch_internal(vectorized::Block* block) { if (_non_predicate_columns.empty()) { return Status::InternalError("_non_predicate_columns is empty"); } - RETURN_IF_ERROR(_convert_to_expected_type(_first_read_column_ids)); + RETURN_IF_ERROR(_convert_to_expected_type(_predicate_column_ids)); RETURN_IF_ERROR(_convert_to_expected_type(_non_predicate_columns)); _output_non_pred_columns(block); } else { @@ -2115,27 +2115,28 @@ Status SegmentIterator::_next_batch_internal(vectorized::Block* block) { if (selected_size > 0) { // step 3.1: output short circuit and predicate column - // when lazy materialization enables, _first_read_column_ids = distinct(_short_cir_pred_column_ids + _vec_pred_column_ids) + // when lazy materialization enables, _predicate_column_ids = distinct(_short_cir_pred_column_ids + _vec_pred_column_ids) // see _vec_init_lazy_materialization // todo(wb) need to tell input columnids from output columnids - RETURN_IF_ERROR(_output_column_by_sel_idx(block, _first_read_column_ids, + RETURN_IF_ERROR(_output_column_by_sel_idx(block, _predicate_column_ids, _sel_rowid_idx.data(), selected_size)); // step 3.2: read remaining expr column and evaluate it. if (_is_need_expr_eval) { // The predicate column contains the remaining expr column, no need second read. - if (!_second_read_column_ids.empty()) { - SCOPED_RAW_TIMER(&_opts.stats->second_read_ns); + if (!_non_predicate_column_ids.empty()) { + SCOPED_RAW_TIMER(&_opts.stats->non_predicate_read_ns); RETURN_IF_ERROR(_read_columns_by_rowids( - _second_read_column_ids, _block_rowids, _sel_rowid_idx.data(), + _non_predicate_column_ids, _block_rowids, _sel_rowid_idx.data(), selected_size, &_current_return_columns)); - if (std::find(_second_read_column_ids.begin(), - _second_read_column_ids.end(), _schema->version_col_idx()) != - _second_read_column_ids.end()) { + if (std::find(_non_predicate_column_ids.begin(), + _non_predicate_column_ids.end(), + _schema->version_col_idx()) != + _non_predicate_column_ids.end()) { _replace_version_col(selected_size); } - RETURN_IF_ERROR(_convert_to_expected_type(_second_read_column_ids)); - for (auto cid : _second_read_column_ids) { + RETURN_IF_ERROR(_convert_to_expected_type(_non_predicate_column_ids)); + for (auto cid : _non_predicate_column_ids) { auto loc = _schema_block_id_map[cid]; block->replace_by_position(loc, std::move(_current_return_columns[cid])); @@ -2168,17 +2169,17 @@ Status SegmentIterator::_next_batch_internal(vectorized::Block* block) { } } } else if (_is_need_expr_eval) { - RETURN_IF_ERROR(_convert_to_expected_type(_second_read_column_ids)); - for (auto cid : _second_read_column_ids) { + RETURN_IF_ERROR(_convert_to_expected_type(_non_predicate_column_ids)); + for (auto cid : _non_predicate_column_ids) { auto loc = _schema_block_id_map[cid]; block->replace_by_position(loc, std::move(_current_return_columns[cid])); } } } else if (_is_need_expr_eval) { - DCHECK(!_first_read_column_ids.empty()); - RETURN_IF_ERROR(_convert_to_expected_type(_first_read_column_ids)); + DCHECK(!_predicate_column_ids.empty()); + RETURN_IF_ERROR(_convert_to_expected_type(_predicate_column_ids)); // first read all rows are insert block, initialize sel_rowid_idx to all rows. - for (auto cid : _first_read_column_ids) { + for (auto cid : _predicate_column_ids) { auto loc = _schema_block_id_map[cid]; block->replace_by_position(loc, std::move(_current_return_columns[cid])); } diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.h b/be/src/olap/rowset/segment_v2/segment_iterator.h index c2e2139e8ad411..5626d15180c295 100644 --- a/be/src/olap/rowset/segment_v2/segment_iterator.h +++ b/be/src/olap/rowset/segment_v2/segment_iterator.h @@ -431,8 +431,8 @@ class SegmentIterator : public RowwiseIterator { // first, read predicate columns by various index // second, read non-predicate columns // so we need a field to stand for columns first time to read - std::vector _first_read_column_ids; - std::vector _second_read_column_ids; + std::vector _predicate_column_ids; + std::vector _non_predicate_column_ids; std::vector _columns_to_filter; std::vector _converted_column_ids; std::vector _schema_block_id_map; // map from schema column id to column idx in Block diff --git a/be/src/olap/rowset/segment_v2/segment_writer.cpp b/be/src/olap/rowset/segment_v2/segment_writer.cpp index 4301303dac9237..7f6c06fb057902 100644 --- a/be/src/olap/rowset/segment_v2/segment_writer.cpp +++ b/be/src/olap/rowset/segment_v2/segment_writer.cpp @@ -85,13 +85,14 @@ inline std::string segment_mem_tracker_name(uint32_t segment_id) { SegmentWriter::SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, DataDir* data_dir, const SegmentWriterOptions& opts, - io::FileWriterPtr inverted_file_writer) + InvertedIndexFileWriter* inverted_file_writer) : _segment_id(segment_id), _tablet_schema(std::move(tablet_schema)), _tablet(std::move(tablet)), _data_dir(data_dir), _opts(opts), _file_writer(file_writer), + _inverted_index_file_writer(inverted_file_writer), _mem_tracker(std::make_unique(segment_mem_tracker_name(segment_id))), _mow_context(std::move(opts.mow_ctx)) { CHECK_NOTNULL(file_writer); @@ -132,17 +133,6 @@ SegmentWriter::SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, } } } - if (_tablet_schema->has_inverted_index()) { - _inverted_index_file_writer = std::make_unique( - _opts.rowset_ctx->fs(), - std::string {InvertedIndexDescriptor::get_index_file_path_prefix( - file_writer->path().c_str())}, - _opts.rowset_ctx->rowset_id.to_string(), segment_id, - _tablet_schema->get_inverted_index_storage_format(), - std::move(inverted_file_writer)); - _inverted_index_file_writer->set_file_writer_opts( - _opts.rowset_ctx->get_file_writer_options()); - } } SegmentWriter::~SegmentWriter() { @@ -224,11 +214,12 @@ Status SegmentWriter::_create_column_writer(uint32_t cid, const TabletColumn& co opts.need_bloom_filter = false; opts.need_bitmap_index = false; } - opts.inverted_index_file_writer = _inverted_index_file_writer.get(); + opts.inverted_index_file_writer = _inverted_index_file_writer; for (const auto* index : opts.indexes) { if (!skip_inverted_index && index->index_type() == IndexType::INVERTED) { opts.inverted_index = index; opts.need_inverted_index = true; + DCHECK(_inverted_index_file_writer != nullptr); // TODO support multiple inverted index break; } @@ -1025,10 +1016,6 @@ Status SegmentWriter::finalize_footer(uint64_t* segment_file_size) { if (*segment_file_size == 0) { return Status::Corruption("Bad segment, file size = 0"); } - if (_inverted_index_file_writer != nullptr) { - RETURN_IF_ERROR(_inverted_index_file_writer->close()); - _inverted_index_file_info = _inverted_index_file_writer->get_index_file_info(); - } return Status::OK(); } @@ -1269,13 +1256,6 @@ Status SegmentWriter::_generate_short_key_index( return Status::OK(); } -int64_t SegmentWriter::get_inverted_index_total_size() { - if (_inverted_index_file_writer != nullptr) { - return _inverted_index_file_writer->get_index_file_total_size(); - } - return 0; -} - inline bool SegmentWriter::_is_mow() { return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write; } diff --git a/be/src/olap/rowset/segment_v2/segment_writer.h b/be/src/olap/rowset/segment_v2/segment_writer.h index bde087e0ed0d9e..9a8af131087f92 100644 --- a/be/src/olap/rowset/segment_v2/segment_writer.h +++ b/be/src/olap/rowset/segment_v2/segment_writer.h @@ -34,6 +34,7 @@ #include "gutil/strings/substitute.h" #include "olap/olap_define.h" #include "olap/rowset/segment_v2/column_writer.h" +#include "olap/rowset/segment_v2/inverted_index_file_writer.h" #include "olap/tablet.h" #include "olap/tablet_schema.h" #include "util/faststring.h" @@ -61,7 +62,6 @@ class FileWriter; } // namespace io namespace segment_v2 { -class InvertedIndexFileWriter; extern const char* k_segment_magic; extern const uint32_t k_segment_magic_length; @@ -84,7 +84,7 @@ class SegmentWriter { explicit SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, DataDir* data_dir, const SegmentWriterOptions& opts, - io::FileWriterPtr inverted_file_writer = nullptr); + InvertedIndexFileWriter* inverted_file_writer); ~SegmentWriter(); Status init(); @@ -113,9 +113,6 @@ class SegmentWriter { uint64_t estimate_segment_size(); - InvertedIndexFileInfo get_inverted_index_file_info() const { return _inverted_index_file_info; } - int64_t get_inverted_index_total_size(); - uint32_t num_rows_written() const { return _num_rows_written; } // for partial update @@ -147,6 +144,17 @@ class SegmentWriter { void set_mow_context(std::shared_ptr mow_context); + Status close_inverted_index(int64_t* inverted_index_file_size) { + // no inverted index + if (_inverted_index_file_writer == nullptr) { + *inverted_index_file_size = 0; + return Status::OK(); + } + RETURN_IF_ERROR(_inverted_index_file_writer->close()); + *inverted_index_file_size = _inverted_index_file_writer->get_index_file_total_size(); + return Status::OK(); + } + private: DISALLOW_COPY_AND_ASSIGN(SegmentWriter); Status _create_column_writer(uint32_t cid, const TabletColumn& column, @@ -202,13 +210,15 @@ class SegmentWriter { // Not owned. owned by RowsetWriter or SegmentFlusher io::FileWriter* _file_writer = nullptr; - std::unique_ptr _inverted_index_file_writer; + // Not owned. owned by RowsetWriter or SegmentFlusher + InvertedIndexFileWriter* _inverted_index_file_writer = nullptr; + SegmentFooterPB _footer; // for mow tables with cluster key, the sort key is the cluster keys not unique keys // for other tables, the sort key is the keys size_t _num_sort_key_columns; size_t _num_short_key_columns; - InvertedIndexFileInfo _inverted_index_file_info; + std::unique_ptr _short_key_index_builder; std::unique_ptr _primary_key_index_builder; std::vector> _column_writers; diff --git a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp index ce033cdd0022d0..1b7416c5d2dffc 100644 --- a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp +++ b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp @@ -90,13 +90,14 @@ VerticalSegmentWriter::VerticalSegmentWriter(io::FileWriter* file_writer, uint32 TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, DataDir* data_dir, const VerticalSegmentWriterOptions& opts, - io::FileWriterPtr inverted_file_writer) + InvertedIndexFileWriter* inverted_file_writer) : _segment_id(segment_id), _tablet_schema(std::move(tablet_schema)), _tablet(std::move(tablet)), _data_dir(data_dir), _opts(opts), _file_writer(file_writer), + _inverted_index_file_writer(inverted_file_writer), _mem_tracker(std::make_unique( vertical_segment_writer_mem_tracker_name(segment_id))), _mow_context(std::move(opts.mow_ctx)) { @@ -138,17 +139,6 @@ VerticalSegmentWriter::VerticalSegmentWriter(io::FileWriter* file_writer, uint32 } } } - if (_tablet_schema->has_inverted_index()) { - _inverted_index_file_writer = std::make_unique( - _opts.rowset_ctx->fs(), - std::string {InvertedIndexDescriptor::get_index_file_path_prefix( - _opts.rowset_ctx->segment_path(segment_id))}, - _opts.rowset_ctx->rowset_id.to_string(), segment_id, - _tablet_schema->get_inverted_index_storage_format(), - std::move(inverted_file_writer)); - _inverted_index_file_writer->set_file_writer_opts( - _opts.rowset_ctx->get_file_writer_options()); - } } VerticalSegmentWriter::~VerticalSegmentWriter() { @@ -222,11 +212,12 @@ Status VerticalSegmentWriter::_create_column_writer(uint32_t cid, const TabletCo if (!skip_inverted_index && index->index_type() == IndexType::INVERTED) { opts.inverted_index = index; opts.need_inverted_index = true; + DCHECK(_inverted_index_file_writer != nullptr); // TODO support multiple inverted index break; } } - opts.inverted_index_file_writer = _inverted_index_file_writer.get(); + opts.inverted_index_file_writer = _inverted_index_file_writer; #define CHECK_FIELD_TYPE(TYPE, type_name) \ if (column.type() == FieldType::OLAP_FIELD_TYPE_##TYPE) { \ @@ -1386,9 +1377,6 @@ Status VerticalSegmentWriter::finalize_columns_index(uint64_t* index_size) { *index_size = _file_writer->bytes_appended() - index_start; } - if (_inverted_index_file_writer != nullptr) { - _inverted_index_file_info = _inverted_index_file_writer->get_index_file_info(); - } // reset all column writers and data_conveter clear(); @@ -1463,9 +1451,6 @@ Status VerticalSegmentWriter::_write_inverted_index() { for (auto& column_writer : _column_writers) { RETURN_IF_ERROR(column_writer->write_inverted_index()); } - if (_inverted_index_file_writer != nullptr) { - RETURN_IF_ERROR(_inverted_index_file_writer->close()); - } return Status::OK(); } @@ -1552,13 +1537,6 @@ void VerticalSegmentWriter::_set_max_key(const Slice& key) { _max_key.append(key.get_data(), key.get_size()); } -int64_t VerticalSegmentWriter::get_inverted_index_total_size() { - if (_inverted_index_file_writer != nullptr) { - return _inverted_index_file_writer->get_index_file_total_size(); - } - return 0; -} - inline bool VerticalSegmentWriter::_is_mow() { return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write; } diff --git a/be/src/olap/rowset/segment_v2/vertical_segment_writer.h b/be/src/olap/rowset/segment_v2/vertical_segment_writer.h index 881a6cee5b41e1..951e9c2e2838c3 100644 --- a/be/src/olap/rowset/segment_v2/vertical_segment_writer.h +++ b/be/src/olap/rowset/segment_v2/vertical_segment_writer.h @@ -34,6 +34,7 @@ #include "gutil/strings/substitute.h" #include "olap/olap_define.h" #include "olap/rowset/segment_v2/column_writer.h" +#include "olap/rowset/segment_v2/inverted_index_file_writer.h" #include "olap/tablet.h" #include "olap/tablet_schema.h" #include "util/faststring.h" @@ -82,7 +83,7 @@ class VerticalSegmentWriter { explicit VerticalSegmentWriter(io::FileWriter* file_writer, uint32_t segment_id, TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet, DataDir* data_dir, const VerticalSegmentWriterOptions& opts, - io::FileWriterPtr inverted_file_writer = nullptr); + InvertedIndexFileWriter* inverted_file_writer); ~VerticalSegmentWriter(); VerticalSegmentWriter(const VerticalSegmentWriter&) = delete; @@ -99,9 +100,7 @@ class VerticalSegmentWriter { [[nodiscard]] std::string data_dir_path() const { return _data_dir == nullptr ? "" : _data_dir->path(); } - [[nodiscard]] InvertedIndexFileInfo get_inverted_index_file_info() const { - return _inverted_index_file_info; - } + [[nodiscard]] uint32_t num_rows_written() const { return _num_rows_written; } // for partial update @@ -122,10 +121,19 @@ class VerticalSegmentWriter { TabletSchemaSPtr flush_schema() const { return _flush_schema; }; - int64_t get_inverted_index_total_size(); - void clear(); + Status close_inverted_index(int64_t* inverted_index_file_size) { + // no inverted index + if (_inverted_index_file_writer == nullptr) { + *inverted_index_file_size = 0; + return Status::OK(); + } + RETURN_IF_ERROR(_inverted_index_file_writer->close()); + *inverted_index_file_size = _inverted_index_file_writer->get_index_file_total_size(); + return Status::OK(); + } + private: void _init_column_meta(ColumnMetaPB* meta, uint32_t column_id, const TabletColumn& column); Status _create_column_writer(uint32_t cid, const TabletColumn& column, @@ -213,14 +221,15 @@ class VerticalSegmentWriter { // Not owned. owned by RowsetWriter io::FileWriter* _file_writer = nullptr; - std::unique_ptr _inverted_index_file_writer; + // Not owned. owned by RowsetWriter or SegmentFlusher + InvertedIndexFileWriter* _inverted_index_file_writer = nullptr; SegmentFooterPB _footer; // for mow tables with cluster key, the sort key is the cluster keys not unique keys // for other tables, the sort key is the keys size_t _num_sort_key_columns; size_t _num_short_key_columns; - InvertedIndexFileInfo _inverted_index_file_info; + std::unique_ptr _short_key_index_builder; std::unique_ptr _primary_key_index_builder; std::vector> _column_writers; diff --git a/be/src/olap/rowset/vertical_beta_rowset_writer.cpp b/be/src/olap/rowset/vertical_beta_rowset_writer.cpp index ced0fb880c41fb..46070f8dccd7ce 100644 --- a/be/src/olap/rowset/vertical_beta_rowset_writer.cpp +++ b/be/src/olap/rowset/vertical_beta_rowset_writer.cpp @@ -138,7 +138,6 @@ Status VerticalBetaRowsetWriter::_flush_columns(segment_v2::SegmentWriter* se this->_segment_num_rows.resize(_cur_writer_idx + 1); this->_segment_num_rows[_cur_writer_idx] = _segment_writers[_cur_writer_idx]->row_count(); } - this->_total_index_size += static_cast(index_size); return Status::OK(); } @@ -164,26 +163,28 @@ Status VerticalBetaRowsetWriter::_create_segment_writer( int seg_id = this->_num_segment.fetch_add(1, std::memory_order_relaxed); - io::FileWriterPtr file_writer; - io::FileWriterOptions opts = this->_context.get_file_writer_options(); + io::FileWriterPtr segment_file_writer; + RETURN_IF_ERROR(BaseBetaRowsetWriter::create_file_writer(seg_id, segment_file_writer)); + DCHECK(segment_file_writer != nullptr); - auto path = context.segment_path(seg_id); - auto& fs = context.fs_ref(); - Status st = fs.create_file(path, &file_writer, &opts); - if (!st.ok()) { - LOG(WARNING) << "failed to create writable file. path=" << path << ", err: " << st; - return st; + InvertedIndexFileWriterPtr inverted_index_file_writer; + if (context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR(RowsetWriter::create_inverted_index_file_writer( + seg_id, &inverted_index_file_writer)); } - DCHECK(file_writer != nullptr); segment_v2::SegmentWriterOptions writer_options; writer_options.enable_unique_key_merge_on_write = context.enable_unique_key_merge_on_write; writer_options.rowset_ctx = &context; writer_options.max_rows_per_segment = context.max_rows_per_segment; - *writer = std::make_unique(file_writer.get(), seg_id, - context.tablet_schema, context.tablet, - context.data_dir, writer_options); - RETURN_IF_ERROR(this->_seg_files.add(seg_id, std::move(file_writer))); + *writer = std::make_unique( + segment_file_writer.get(), seg_id, context.tablet_schema, context.tablet, + context.data_dir, writer_options, inverted_index_file_writer.get()); + + RETURN_IF_ERROR(this->_seg_files.add(seg_id, std::move(segment_file_writer))); + if (context.tablet_schema->has_inverted_index()) { + RETURN_IF_ERROR(this->_idx_files.add(seg_id, std::move(inverted_index_file_writer))); + } auto s = (*writer)->init(column_ids, is_key); if (!s.ok()) { @@ -205,10 +206,7 @@ Status VerticalBetaRowsetWriter::final_flush() { LOG(WARNING) << "Fail to finalize segment footer, " << st; return st; } - this->_total_data_size += segment_size + segment_writer->get_inverted_index_total_size(); - this->_total_index_size += segment_writer->get_inverted_index_total_size(); - this->_idx_files_info.add_file_info(segment_writer->get_segment_id(), - segment_writer->get_inverted_index_file_info()); + this->_total_data_size += segment_size; segment_writer.reset(); } return Status::OK(); @@ -217,6 +215,7 @@ Status VerticalBetaRowsetWriter::final_flush() { template requires std::is_base_of_v Status VerticalBetaRowsetWriter::_close_file_writers() { + RETURN_IF_ERROR(BaseBetaRowsetWriter::_close_inverted_index_file_writers()); return this->_seg_files.close(); } diff --git a/be/src/olap/single_replica_compaction.cpp b/be/src/olap/single_replica_compaction.cpp index ef93ab25caeac9..7470afe0ef62c7 100644 --- a/be/src/olap/single_replica_compaction.cpp +++ b/be/src/olap/single_replica_compaction.cpp @@ -149,11 +149,15 @@ Status SingleReplicaCompaction::_do_single_replica_compaction_impl() { LOG(INFO) << "succeed to do single replica compaction" << ". tablet=" << _tablet->tablet_id() << ", output_version=" << _output_version << ", current_max_version=" << current_max_version - << ", input_rowset_size=" << _input_rowsets_size + << ", input_rowsets_data_size=" << _input_rowsets_data_size + << ", input_rowsets_index_size=" << _input_rowsets_index_size + << ", input_rowsets_total_size=" << _input_rowsets_total_size << ", input_row_num=" << _input_row_num << ", input_segments_num=" << _input_num_segments - << ", _input_index_size=" << _input_index_size + << ", _input_index_size=" << _input_rowsets_index_size << ", output_rowset_data_size=" << _output_rowset->data_disk_size() + << ", output_rowset_index_size=" << _output_rowset->index_disk_size() + << ", output_rowset_total_size=" << _output_rowset->total_disk_size() << ", output_row_num=" << _output_rowset->num_rows() << ", output_segments_num=" << _output_rowset->num_segments(); return Status::OK(); @@ -264,10 +268,11 @@ bool SingleReplicaCompaction::_find_rowset_to_fetch(const std::vector& return false; } for (auto& rowset : _input_rowsets) { - _input_rowsets_size += rowset->data_disk_size(); + _input_rowsets_data_size += rowset->data_disk_size(); _input_row_num += rowset->num_rows(); _input_num_segments += rowset->num_segments(); - _input_index_size += rowset->index_disk_size(); + _input_rowsets_index_size += rowset->index_disk_size(); + _input_rowsets_total_size += rowset->data_disk_size() + rowset->index_disk_size(); } _output_version = *proper_version; } diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp index 9dfb7940dcc916..7c69ba54831ce9 100644 --- a/be/src/olap/tablet.cpp +++ b/be/src/olap/tablet.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include "common/compiler_util.h" // IWYU pragma: keep @@ -86,6 +88,7 @@ #include "olap/rowset/beta_rowset.h" #include "olap/rowset/rowset.h" #include "olap/rowset/rowset_factory.h" +#include "olap/rowset/rowset_fwd.h" #include "olap/rowset/rowset_meta.h" #include "olap/rowset/rowset_meta_manager.h" #include "olap/rowset/rowset_writer.h" @@ -329,6 +332,7 @@ Status Tablet::init() { // should save tablet meta to remote meta store // if it's a primary replica void Tablet::save_meta() { + check_table_size_correctness(); auto res = _tablet_meta->save_meta(_data_dir); CHECK_EQ(res, Status::OK()) << "fail to save tablet_meta. res=" << res << ", root=" << _data_dir->path(); @@ -1694,6 +1698,19 @@ void Tablet::build_tablet_report_info(TTabletInfo* tablet_info, } } +void Tablet::report_error(const Status& st) { + if (st.is()) { + ++_io_error_times; + } else if (st.is()) { + _io_error_times = config::max_tablet_io_errors + 1; + } else if (st.is()) { + check_tablet_path_exists(); + if (!_is_tablet_path_exists.load(std::memory_order_relaxed)) { + _io_error_times = config::max_tablet_io_errors + 1; + } + } +} + Status Tablet::prepare_compaction_and_calculate_permits( CompactionType compaction_type, const TabletSharedPtr& tablet, std::shared_ptr& compaction, int64_t& permits) { @@ -2034,8 +2051,8 @@ Status Tablet::_cooldown_data(RowsetSharedPtr rowset) { LOG(INFO) << "Upload rowset " << old_rowset->version() << " " << new_rowset_id.to_string() << " to " << storage_resource.fs->root_path().native() << ", tablet_id=" << tablet_id() << ", duration=" << duration.count() - << ", capacity=" << old_rowset->data_disk_size() - << ", tp=" << old_rowset->data_disk_size() / duration.count() + << ", capacity=" << old_rowset->total_disk_size() + << ", tp=" << old_rowset->total_disk_size() / duration.count() << ", old rowset_id=" << old_rowset->rowset_id().to_string(); // gen a new rowset @@ -2414,7 +2431,7 @@ RowsetSharedPtr Tablet::need_cooldown(int64_t* cooldown_timestamp, size_t* file_ // current time or it's datatime is less than current time if (newest_cooldown_time != 0 && newest_cooldown_time < UnixSeconds()) { *cooldown_timestamp = newest_cooldown_time; - *file_size = rowset->data_disk_size(); + *file_size = rowset->total_disk_size(); VLOG_DEBUG << "tablet need cooldown, tablet id: " << tablet_id() << " file_size: " << *file_size; return rowset; @@ -2724,4 +2741,120 @@ void Tablet::clear_cache() { } } +void Tablet::check_table_size_correctness() { + if (!config::enable_table_size_correctness_check) { + return; + } + const std::vector& all_rs_metas = _tablet_meta->all_rs_metas(); + for (const auto& rs_meta : all_rs_metas) { + int64_t total_segment_size = get_segment_file_size(rs_meta); + int64_t total_inverted_index_size = get_inverted_index_file_szie(rs_meta); + if (rs_meta->data_disk_size() != total_segment_size || + rs_meta->index_disk_size() != total_inverted_index_size || + rs_meta->data_disk_size() + rs_meta->index_disk_size() != rs_meta->total_disk_size()) { + LOG(WARNING) << "[Local table table size check failed]:" + << " tablet id: " << rs_meta->tablet_id() + << ", rowset id:" << rs_meta->rowset_id() + << ", rowset data disk size:" << rs_meta->data_disk_size() + << ", rowset real data disk size:" << total_segment_size + << ", rowset index disk size:" << rs_meta->index_disk_size() + << ", rowset real index disk size:" << total_inverted_index_size + << ", rowset total disk size:" << rs_meta->total_disk_size() + << ", rowset segment path:" + << StorageResource().remote_segment_path( + rs_meta->tablet_id(), rs_meta->rowset_id().to_string(), 0); + DCHECK(false); + } + } +} + +std::string Tablet::get_segment_path(const RowsetMetaSharedPtr& rs_meta, int64_t seg_id) { + std::string segment_path; + if (rs_meta->is_local()) { + segment_path = local_segment_path(_tablet_path, rs_meta->rowset_id().to_string(), seg_id); + } else { + segment_path = rs_meta->remote_storage_resource().value()->remote_segment_path( + rs_meta->tablet_id(), rs_meta->rowset_id().to_string(), seg_id); + } + return segment_path; +} + +int64_t Tablet::get_segment_file_size(const RowsetMetaSharedPtr& rs_meta) { + const auto& fs = rs_meta->fs(); + if (!fs) { + LOG(WARNING) << "get fs failed, resource_id={}" << rs_meta->resource_id(); + } + int64_t total_segment_size = 0; + for (int64_t seg_id = 0; seg_id < rs_meta->num_segments(); seg_id++) { + std::string segment_path = get_segment_path(rs_meta, seg_id); + int64_t segment_file_size = 0; + auto st = fs->file_size(segment_path, &segment_file_size); + if (!st.ok()) { + segment_file_size = 0; + LOG(WARNING) << "table size correctness check get segment size failed! msg:" + << st.to_string() << ", segment path:" << segment_path; + } + total_segment_size += segment_file_size; + } + return total_segment_size; +} + +int64_t Tablet::get_inverted_index_file_szie(const RowsetMetaSharedPtr& rs_meta) { + const auto& fs = rs_meta->fs(); + if (!fs) { + LOG(WARNING) << "get fs failed, resource_id={}" << rs_meta->resource_id(); + } + int64_t total_inverted_index_size = 0; + + if (rs_meta->tablet_schema()->get_inverted_index_storage_format() == + InvertedIndexStorageFormatPB::V1) { + auto indices = rs_meta->tablet_schema()->indexes(); + for (auto& index : indices) { + // only get file_size for inverted index + if (index.index_type() != IndexType::INVERTED) { + continue; + } + for (int seg_id = 0; seg_id < rs_meta->num_segments(); ++seg_id) { + std::string segment_path = get_segment_path(rs_meta, seg_id); + int64_t file_size = 0; + + std::string inverted_index_file_path = + InvertedIndexDescriptor::get_index_file_path_v1( + InvertedIndexDescriptor::get_index_file_path_prefix(segment_path), + index.index_id(), index.get_index_suffix()); + auto st = fs->file_size(inverted_index_file_path, &file_size); + if (!st.ok()) { + file_size = 0; + LOG(WARNING) << " tablet id: " << get_tablet_info().tablet_id + << ", rowset id:" << rs_meta->rowset_id() + << ", table size correctness check get inverted index v1 " + "size failed! msg:" + << st.to_string() + << ", inverted index path:" << inverted_index_file_path; + } + total_inverted_index_size += file_size; + } + } + } else { + for (int seg_id = 0; seg_id < rs_meta->num_segments(); ++seg_id) { + int64_t file_size = 0; + std::string segment_path = get_segment_path(rs_meta, seg_id); + std::string inverted_index_file_path = InvertedIndexDescriptor::get_index_file_path_v2( + InvertedIndexDescriptor::get_index_file_path_prefix(segment_path)); + auto st = fs->file_size(inverted_index_file_path, &file_size); + if (!st.ok()) { + file_size = 0; + LOG(WARNING) << " tablet id: " << get_tablet_info().tablet_id + << ", rowset id:" << rs_meta->rowset_id() + << ", table size correctness check get inverted index v2 " + "size failed! msg:" + << st.to_string() + << ", inverted index path:" << inverted_index_file_path; + } + total_inverted_index_size += file_size; + } + } + return total_inverted_index_size; +} + } // namespace doris diff --git a/be/src/olap/tablet.h b/be/src/olap/tablet.h index 2b4daa5a4c35ac..e181af3d4d3a3d 100644 --- a/be/src/olap/tablet.h +++ b/be/src/olap/tablet.h @@ -451,13 +451,7 @@ class Tablet final : public BaseTablet { void gc_binlogs(int64_t version); Status ingest_binlog_metas(RowsetBinlogMetasPB* metas_pb); - inline void report_error(const Status& st) { - if (st.is()) { - ++_io_error_times; - } else if (st.is()) { - _io_error_times = config::max_tablet_io_errors + 1; - } - } + void report_error(const Status& st); inline int64_t get_io_error_times() const { return _io_error_times; } @@ -540,6 +534,10 @@ class Tablet final : public BaseTablet { //////////////////////////////////////////////////////////////////////////// void _clear_cache_by_rowset(const BetaRowsetSharedPtr& rowset); + void check_table_size_correctness(); + std::string get_segment_path(const RowsetMetaSharedPtr& rs_meta, int64_t seg_id); + int64_t get_segment_file_size(const RowsetMetaSharedPtr& rs_meta); + int64_t get_inverted_index_file_szie(const RowsetMetaSharedPtr& rs_meta); public: static const int64_t K_INVALID_CUMULATIVE_POINT = -1; diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp index 64eb408c9e3dbd..b853401855ce94 100644 --- a/be/src/olap/tablet_manager.cpp +++ b/be/src/olap/tablet_manager.cpp @@ -101,7 +101,9 @@ TabletManager::TabletManager(StorageEngine& engine, int32_t tablet_map_lock_shar } TabletManager::~TabletManager() { +#ifndef BE_TEST DEREGISTER_HOOK_METRIC(tablet_meta_mem_consumption); +#endif } Status TabletManager::_add_tablet_unlocked(TTabletId tablet_id, const TabletSharedPtr& tablet, diff --git a/be/src/olap/tablet_meta.cpp b/be/src/olap/tablet_meta.cpp index 97e74211504d58..91f3b7dd8169bf 100644 --- a/be/src/olap/tablet_meta.cpp +++ b/be/src/olap/tablet_meta.cpp @@ -1189,6 +1189,9 @@ void DeleteBitmap::add_to_remove_queue( } void DeleteBitmap::remove_stale_delete_bitmap_from_queue(const std::vector& vector) { + if (!config::enable_delete_bitmap_merge_on_compaction) { + return; + } std::shared_lock l(stale_delete_bitmap_lock); // std::vector> to_delete; diff --git a/be/src/olap/tablet_meta.h b/be/src/olap/tablet_meta.h index 3c87fecb83cbd7..d56e529e42bf4b 100644 --- a/be/src/olap/tablet_meta.h +++ b/be/src/olap/tablet_meta.h @@ -119,6 +119,11 @@ class TabletMeta : public MetadataAdder { TabletMeta(const TabletMeta& tablet_meta); TabletMeta(TabletMeta&& tablet_meta) = delete; +// UT +#ifdef BE_TEST + TabletMeta(TabletSchemaSPtr tablet_schema) : _schema(tablet_schema) {} +#endif + // Function create_from_file is used to be compatible with previous tablet_meta. // Previous tablet_meta is a physical file in tablet dir, which is not stored in rocksdb. Status create_from_file(const std::string& file_path); @@ -637,7 +642,7 @@ inline size_t TabletMeta::num_rows() const { inline size_t TabletMeta::tablet_footprint() const { size_t total_size = 0; for (auto& rs : _rs_metas) { - total_size += rs->data_disk_size(); + total_size += rs->total_disk_size(); } return total_size; } @@ -646,7 +651,7 @@ inline size_t TabletMeta::tablet_local_size() const { size_t total_size = 0; for (auto& rs : _rs_metas) { if (rs->is_local()) { - total_size += rs->data_disk_size(); + total_size += rs->total_disk_size(); } } return total_size; @@ -656,7 +661,7 @@ inline size_t TabletMeta::tablet_remote_size() const { size_t total_size = 0; for (auto& rs : _rs_metas) { if (!rs->is_local()) { - total_size += rs->data_disk_size(); + total_size += rs->total_disk_size(); } } return total_size; diff --git a/be/src/olap/tablet_reader.h b/be/src/olap/tablet_reader.h index 87af3bb08eb36e..dd9d39d9decee0 100644 --- a/be/src/olap/tablet_reader.h +++ b/be/src/olap/tablet_reader.h @@ -167,7 +167,7 @@ class TabletReader { // used for compaction to record row ids bool record_rowids = false; - RowIdConversion* rowid_conversion; + RowIdConversion* rowid_conversion = nullptr; std::vector topn_filter_source_node_ids; int topn_filter_target_node_id = -1; // used for special optimization for query : ORDER BY key LIMIT n diff --git a/be/src/olap/task/engine_checksum_task.cpp b/be/src/olap/task/engine_checksum_task.cpp index d0c4b0e45f468e..05ecfc0401b6d0 100644 --- a/be/src/olap/task/engine_checksum_task.cpp +++ b/be/src/olap/task/engine_checksum_task.cpp @@ -93,7 +93,7 @@ Status EngineChecksumTask::_compute_checksum() { } size_t input_size = 0; for (const auto& rowset : input_rowsets) { - input_size += rowset->data_disk_size(); + input_size += rowset->total_disk_size(); } auto res = reader.init(reader_params); diff --git a/be/src/olap/task/index_builder.cpp b/be/src/olap/task/index_builder.cpp index 38a52d1d2118aa..09cbdeadb3f3c9 100644 --- a/be/src/olap/task/index_builder.cpp +++ b/be/src/olap/task/index_builder.cpp @@ -207,13 +207,12 @@ Status IndexBuilder::update_inverted_index_info() { InvertedIndexStorageFormatPB::V1) { if (_is_drop_op) { VLOG_DEBUG << "data_disk_size:" << input_rowset_meta->data_disk_size() - << " total_disk_size:" << input_rowset_meta->data_disk_size() + << " total_disk_size:" << input_rowset_meta->total_disk_size() << " index_disk_size:" << input_rowset_meta->index_disk_size() << " drop_index_size:" << drop_index_size; rowset_meta->set_total_disk_size(input_rowset_meta->total_disk_size() - drop_index_size); - rowset_meta->set_data_disk_size(input_rowset_meta->data_disk_size() - - drop_index_size); + rowset_meta->set_data_disk_size(input_rowset_meta->data_disk_size()); rowset_meta->set_index_disk_size(input_rowset_meta->index_disk_size() - drop_index_size); } else { @@ -238,7 +237,7 @@ Status IndexBuilder::update_inverted_index_info() { } rowset_meta->set_total_disk_size(input_rowset_meta->total_disk_size() - total_index_size); - rowset_meta->set_data_disk_size(input_rowset_meta->data_disk_size() - total_index_size); + rowset_meta->set_data_disk_size(input_rowset_meta->data_disk_size()); rowset_meta->set_index_disk_size(input_rowset_meta->index_disk_size() - total_index_size); } @@ -292,10 +291,20 @@ Status IndexBuilder::handle_single_rowset(RowsetMetaSharedPtr output_rowset_meta _tablet->tablet_path(), output_rowset_meta->rowset_id().to_string(), seg_ptr->id()))}; + std::string index_path = + InvertedIndexDescriptor::get_index_file_path_v2(index_path_prefix); + io::FileWriterPtr file_writer; + Status st = fs->create_file(index_path, &file_writer); + if (!st.ok()) { + LOG(WARNING) << "failed to create writable file. path=" << index_path + << ", err: " << st; + return st; + } auto inverted_index_file_writer = std::make_unique( fs, std::move(index_path_prefix), output_rowset_meta->rowset_id().to_string(), seg_ptr->id(), - output_rowset_schema->get_inverted_index_storage_format()); + output_rowset_schema->get_inverted_index_storage_format(), + std::move(file_writer)); RETURN_IF_ERROR(inverted_index_file_writer->initialize(dirs)); // create inverted index writer for (auto& index_meta : _dropped_inverted_indexes) { @@ -313,8 +322,7 @@ Status IndexBuilder::handle_single_rowset(RowsetMetaSharedPtr output_rowset_meta inverted_index_size += inverted_index_writer->get_index_file_total_size(); } _inverted_index_file_writers.clear(); - output_rowset_meta->set_data_disk_size(output_rowset_meta->data_disk_size() + - inverted_index_size); + output_rowset_meta->set_data_disk_size(output_rowset_meta->data_disk_size()); output_rowset_meta->set_total_disk_size(output_rowset_meta->total_disk_size() + inverted_index_size); output_rowset_meta->set_index_disk_size(output_rowset_meta->index_disk_size() + @@ -346,10 +354,20 @@ Status IndexBuilder::handle_single_rowset(RowsetMetaSharedPtr output_rowset_meta << seg_ptr->id() << " cannot be found"; continue; } + std::string index_path = + InvertedIndexDescriptor::get_index_file_path_v2(index_path_prefix); + io::FileWriterPtr file_writer; + Status st = fs->create_file(index_path, &file_writer); + if (!st.ok()) { + LOG(WARNING) << "failed to create writable file. path=" << index_path + << ", err: " << st; + return st; + } auto dirs = DORIS_TRY(idx_file_reader_iter->second->get_all_directories()); inverted_index_file_writer = std::make_unique( fs, index_path_prefix, output_rowset_meta->rowset_id().to_string(), - seg_ptr->id(), output_rowset_schema->get_inverted_index_storage_format()); + seg_ptr->id(), output_rowset_schema->get_inverted_index_storage_format(), + std::move(file_writer)); RETURN_IF_ERROR(inverted_index_file_writer->initialize(dirs)); } else { inverted_index_file_writer = std::make_unique( @@ -469,8 +487,7 @@ Status IndexBuilder::handle_single_rowset(RowsetMetaSharedPtr output_rowset_meta } _inverted_index_builders.clear(); _inverted_index_file_writers.clear(); - output_rowset_meta->set_data_disk_size(output_rowset_meta->data_disk_size() + - inverted_index_size); + output_rowset_meta->set_data_disk_size(output_rowset_meta->data_disk_size()); output_rowset_meta->set_total_disk_size(output_rowset_meta->total_disk_size() + inverted_index_size); output_rowset_meta->set_index_disk_size(output_rowset_meta->index_disk_size() + @@ -555,6 +572,13 @@ Status IndexBuilder::_add_nullable(const std::string& column_name, return Status::Error( "CLuceneError occured: {}", e.what()); } + // we should refresh nullmap for array + for (int row_id = 0; row_id < num_rows; row_id++) { + if (null_map && null_map[row_id] == 1) { + RETURN_IF_ERROR( + _inverted_index_builders[index_writer_sign]->add_array_nulls(row_id)); + } + } return Status::OK(); } diff --git a/be/src/pipeline/common/join_utils.h b/be/src/pipeline/common/join_utils.h index 5be3e4af2f374b..52c56abde1afef 100644 --- a/be/src/pipeline/common/join_utils.h +++ b/be/src/pipeline/common/join_utils.h @@ -80,51 +80,22 @@ struct JoinDataVariants { method_variant.emplace(); break; case HashKeyType::int8_key: - if (nullable) { - method_variant.emplace>( - get_key_sizes(data_types)); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::int16_key: - if (nullable) { - method_variant.emplace>( - get_key_sizes(data_types)); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::int32_key: - if (nullable) { - method_variant.emplace>( - get_key_sizes(data_types)); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::int64_key: - if (nullable) { - method_variant.emplace>( - get_key_sizes(data_types)); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::int128_key: - if (nullable) { - method_variant.emplace>( - get_key_sizes(data_types)); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::int256_key: - if (nullable) { - method_variant.emplace(); - } else { - method_variant.emplace(); - } + method_variant.emplace(); break; case HashKeyType::string_key: method_variant.emplace(); diff --git a/be/src/pipeline/dependency.h b/be/src/pipeline/dependency.h index a035d57a8379ea..c6100fe0d8b7cc 100644 --- a/be/src/pipeline/dependency.h +++ b/be/src/pipeline/dependency.h @@ -606,8 +606,9 @@ struct HashJoinSharedState : public JoinSharedState { ENABLE_FACTORY_CREATOR(HashJoinSharedState) // mark the join column whether support null eq std::vector is_null_safe_eq_join; + // mark the build hash table whether it needs to store null value - std::vector store_null_in_hash_table; + std::vector serialize_null_into_key; std::shared_ptr arena = std::make_shared(); // maybe share hash table with other fragment instances diff --git a/be/src/pipeline/exec/aggregation_sink_operator.cpp b/be/src/pipeline/exec/aggregation_sink_operator.cpp index 5fb14c025850b4..ccf24d0cb1e21c 100644 --- a/be/src/pipeline/exec/aggregation_sink_operator.cpp +++ b/be/src/pipeline/exec/aggregation_sink_operator.cpp @@ -63,17 +63,13 @@ Status AggSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& info) { Base::profile(), "MemoryUsageSerializeKeyArena", TUnit::BYTES, 1); _build_timer = ADD_TIMER(Base::profile(), "BuildTime"); - _serialize_key_timer = ADD_TIMER(Base::profile(), "SerializeKeyTime"); - _exec_timer = ADD_TIMER(Base::profile(), "ExecTime"); _merge_timer = ADD_TIMER(Base::profile(), "MergeTime"); _expr_timer = ADD_TIMER(Base::profile(), "ExprTime"); - _serialize_data_timer = ADD_TIMER(Base::profile(), "SerializeDataTime"); _deserialize_data_timer = ADD_TIMER(Base::profile(), "DeserializeAndMergeTime"); _hash_table_compute_timer = ADD_TIMER(Base::profile(), "HashTableComputeTime"); _hash_table_limit_compute_timer = ADD_TIMER(Base::profile(), "DoLimitComputeTime"); _hash_table_emplace_timer = ADD_TIMER(Base::profile(), "HashTableEmplaceTime"); _hash_table_input_counter = ADD_COUNTER(Base::profile(), "HashTableInputCount", TUnit::UNIT); - _max_row_size_counter = ADD_COUNTER(Base::profile(), "MaxRowSizeInBytes", TUnit::UNIT); return Status::OK(); } diff --git a/be/src/pipeline/exec/aggregation_sink_operator.h b/be/src/pipeline/exec/aggregation_sink_operator.h index 8271f1451b4320..21ee640613789e 100644 --- a/be/src/pipeline/exec/aggregation_sink_operator.h +++ b/be/src/pipeline/exec/aggregation_sink_operator.h @@ -102,11 +102,8 @@ class AggSinkLocalState : public PipelineXSinkLocalState { RuntimeProfile::Counter* _hash_table_input_counter = nullptr; RuntimeProfile::Counter* _build_timer = nullptr; RuntimeProfile::Counter* _expr_timer = nullptr; - RuntimeProfile::Counter* _serialize_key_timer = nullptr; RuntimeProfile::Counter* _merge_timer = nullptr; - RuntimeProfile::Counter* _serialize_data_timer = nullptr; RuntimeProfile::Counter* _deserialize_data_timer = nullptr; - RuntimeProfile::Counter* _max_row_size_counter = nullptr; RuntimeProfile::Counter* _hash_table_memory_usage = nullptr; RuntimeProfile::Counter* _hash_table_size_counter = nullptr; RuntimeProfile::Counter* _serialize_key_arena_memory_usage = nullptr; @@ -152,7 +149,6 @@ class AggSinkOperatorX final : public DataSinkOperatorX { : DataDistribution(ExchangeType::HASH_SHUFFLE, _partition_exprs); } bool require_data_distribution() const override { return _is_colocate; } - bool require_shuffled_data_distribution() const override { return !_probe_expr_ctxs.empty(); } size_t get_revocable_mem_size(RuntimeState* state) const; AggregatedDataVariants* get_agg_data(RuntimeState* state) { diff --git a/be/src/pipeline/exec/aggregation_source_operator.cpp b/be/src/pipeline/exec/aggregation_source_operator.cpp index 6d4cd291079cb6..9feb3493068f97 100644 --- a/be/src/pipeline/exec/aggregation_source_operator.cpp +++ b/be/src/pipeline/exec/aggregation_source_operator.cpp @@ -30,20 +30,18 @@ namespace doris::pipeline { AggLocalState::AggLocalState(RuntimeState* state, OperatorXBase* parent) : Base(state, parent), _get_results_timer(nullptr), - _serialize_result_timer(nullptr), _hash_table_iterate_timer(nullptr), _insert_keys_to_column_timer(nullptr), - _serialize_data_timer(nullptr) {} + _insert_values_to_column_timer(nullptr) {} Status AggLocalState::init(RuntimeState* state, LocalStateInfo& info) { RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); _get_results_timer = ADD_TIMER(profile(), "GetResultsTime"); - _serialize_result_timer = ADD_TIMER(profile(), "SerializeResultTime"); _hash_table_iterate_timer = ADD_TIMER(profile(), "HashTableIterateTime"); _insert_keys_to_column_timer = ADD_TIMER(profile(), "InsertKeysToColumnTime"); - _serialize_data_timer = ADD_TIMER(profile(), "SerializeDataTime"); + _insert_values_to_column_timer = ADD_TIMER(profile(), "InsertValuesToColumnTime"); _merge_timer = ADD_TIMER(Base::profile(), "MergeTime"); _deserialize_data_timer = ADD_TIMER(Base::profile(), "DeserializeAndMergeTime"); @@ -58,7 +56,7 @@ Status AggLocalState::init(RuntimeState* state, LocalStateInfo& info) { std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); } else { - _executor.get_result = std::bind(&AggLocalState::_serialize_without_key, this, + _executor.get_result = std::bind(&AggLocalState::_get_results_without_key, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); } @@ -69,8 +67,8 @@ Status AggLocalState::init(RuntimeState* state, LocalStateInfo& info) { std::placeholders::_2, std::placeholders::_3); } else { _executor.get_result = std::bind( - &AggLocalState::_serialize_with_serialized_key_result, this, - std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); + &AggLocalState::_get_results_with_serialized_key, this, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3); } } @@ -94,18 +92,9 @@ Status AggLocalState::_create_agg_status(vectorized::AggregateDataPtr data) { return Status::OK(); } -Status AggLocalState::_destroy_agg_status(vectorized::AggregateDataPtr data) { - auto& shared_state = *Base::_shared_state; - for (int i = 0; i < shared_state.aggregate_evaluators.size(); ++i) { - shared_state.aggregate_evaluators[i]->function()->destroy( - data + shared_state.offsets_of_aggregate_states[i]); - } - return Status::OK(); -} - -Status AggLocalState::_serialize_with_serialized_key_result(RuntimeState* state, - vectorized::Block* block, bool* eos) { - SCOPED_TIMER(_serialize_result_timer); +Status AggLocalState::_get_results_with_serialized_key(RuntimeState* state, + vectorized::Block* block, bool* eos) { + SCOPED_TIMER(_get_results_timer); auto& shared_state = *_shared_state; size_t key_size = _shared_state->probe_expr_ctxs.size(); size_t agg_size = _shared_state->aggregate_evaluators.size(); @@ -125,7 +114,6 @@ Status AggLocalState::_serialize_with_serialized_key_result(RuntimeState* state, } } - SCOPED_TIMER(_get_results_timer); std::visit( vectorized::Overload { [&](std::monostate& arg) -> void { @@ -181,7 +169,7 @@ Status AggLocalState::_serialize_with_serialized_key_result(RuntimeState* state, } { - SCOPED_TIMER(_serialize_data_timer); + SCOPED_TIMER(_insert_values_to_column_timer); for (size_t i = 0; i < shared_state.aggregate_evaluators.size(); ++i) { value_data_types[i] = shared_state.aggregate_evaluators[i] ->function() @@ -333,13 +321,13 @@ Status AggLocalState::_get_with_serialized_key_result(RuntimeState* state, vecto return Status::OK(); } -Status AggLocalState::_serialize_without_key(RuntimeState* state, vectorized::Block* block, - bool* eos) { +Status AggLocalState::_get_results_without_key(RuntimeState* state, vectorized::Block* block, + bool* eos) { + SCOPED_TIMER(_get_results_timer); auto& shared_state = *_shared_state; // 1. `child(0)->rows_returned() == 0` mean not data from child // in level two aggregation node should return NULL result // level one aggregation node set `eos = true` return directly - SCOPED_TIMER(_serialize_result_timer); if (UNLIKELY(_shared_state->input_num_rows == 0)) { *eos = true; return Status::OK(); @@ -573,17 +561,6 @@ template Status AggSourceOperatorX::merge_with_serialized_key_helper( template Status AggSourceOperatorX::merge_with_serialized_key_helper( RuntimeState* state, vectorized::Block* block); -size_t AggLocalState::_get_hash_table_size() { - return std::visit( - vectorized::Overload {[&](std::monostate& arg) -> size_t { - throw doris::Exception(ErrorCode::INTERNAL_ERROR, - "uninited hash table"); - return 0; - }, - [&](auto& agg_method) { return agg_method.hash_table->size(); }}, - _shared_state->agg_data->method_variant); -} - void AggLocalState::_emplace_into_hash_table(vectorized::AggregateDataPtr* places, vectorized::ColumnRawPtrs& key_columns, size_t num_rows) { diff --git a/be/src/pipeline/exec/aggregation_source_operator.h b/be/src/pipeline/exec/aggregation_source_operator.h index 473a051ae3574d..6de2bf93dbc758 100644 --- a/be/src/pipeline/exec/aggregation_source_operator.h +++ b/be/src/pipeline/exec/aggregation_source_operator.h @@ -47,13 +47,12 @@ class AggLocalState final : public PipelineXLocalState { friend class AggSourceOperatorX; Status _get_without_key_result(RuntimeState* state, vectorized::Block* block, bool* eos); - Status _serialize_without_key(RuntimeState* state, vectorized::Block* block, bool* eos); + Status _get_results_without_key(RuntimeState* state, vectorized::Block* block, bool* eos); Status _get_with_serialized_key_result(RuntimeState* state, vectorized::Block* block, bool* eos); - Status _serialize_with_serialized_key_result(RuntimeState* state, vectorized::Block* block, - bool* eos); + Status _get_results_with_serialized_key(RuntimeState* state, vectorized::Block* block, + bool* eos); Status _create_agg_status(vectorized::AggregateDataPtr data); - Status _destroy_agg_status(vectorized::AggregateDataPtr data); void _make_nullable_output_key(vectorized::Block* block) { if (block->rows() != 0) { auto& shared_state = *Base ::_shared_state; @@ -68,16 +67,14 @@ class AggLocalState final : public PipelineXLocalState { vectorized::ColumnRawPtrs& key_columns, size_t num_rows); void _emplace_into_hash_table(vectorized::AggregateDataPtr* places, vectorized::ColumnRawPtrs& key_columns, size_t num_rows); - size_t _get_hash_table_size(); vectorized::PODArray _places; std::vector _deserialize_buffer; RuntimeProfile::Counter* _get_results_timer = nullptr; - RuntimeProfile::Counter* _serialize_result_timer = nullptr; RuntimeProfile::Counter* _hash_table_iterate_timer = nullptr; RuntimeProfile::Counter* _insert_keys_to_column_timer = nullptr; - RuntimeProfile::Counter* _serialize_data_timer = nullptr; + RuntimeProfile::Counter* _insert_values_to_column_timer = nullptr; RuntimeProfile::Counter* _hash_table_compute_timer = nullptr; RuntimeProfile::Counter* _hash_table_emplace_timer = nullptr; diff --git a/be/src/pipeline/exec/analytic_sink_operator.cpp b/be/src/pipeline/exec/analytic_sink_operator.cpp index afe9aeab8fdb84..abde34a1d0255b 100644 --- a/be/src/pipeline/exec/analytic_sink_operator.cpp +++ b/be/src/pipeline/exec/analytic_sink_operator.cpp @@ -30,8 +30,10 @@ Status AnalyticSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& inf RETURN_IF_ERROR(PipelineXSinkLocalState::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); - _blocks_memory_usage = ADD_COUNTER_WITH_LEVEL(_profile, "MemoryUsageBlocks", TUnit::BYTES, 1); - _evaluation_timer = ADD_TIMER(profile(), "EvaluationTime"); + _evaluation_timer = ADD_TIMER(profile(), "GetPartitionBoundTime"); + _compute_agg_data_timer = ADD_TIMER(profile(), "ComputeAggDataTime"); + _compute_partition_by_timer = ADD_TIMER(profile(), "ComputePartitionByTime"); + _compute_order_by_timer = ADD_TIMER(profile(), "ComputeOrderByTime"); return Status::OK(); } @@ -288,35 +290,41 @@ Status AnalyticSinkOperatorX::sink(doris::RuntimeState* state, vectorized::Block } } - for (size_t i = 0; i < _agg_functions_size; - ++i) { //insert _agg_input_columns, execute calculate for its - for (size_t j = 0; j < local_state._agg_expr_ctxs[i].size(); ++j) { - RETURN_IF_ERROR(_insert_range_column( - input_block, local_state._agg_expr_ctxs[i][j], - local_state._shared_state->agg_input_columns[i][j].get(), block_rows)); + { + SCOPED_TIMER(local_state._compute_agg_data_timer); + for (size_t i = 0; i < _agg_functions_size; + ++i) { //insert _agg_input_columns, execute calculate for its + for (size_t j = 0; j < local_state._agg_expr_ctxs[i].size(); ++j) { + RETURN_IF_ERROR(_insert_range_column( + input_block, local_state._agg_expr_ctxs[i][j], + local_state._shared_state->agg_input_columns[i][j].get(), block_rows)); + } } } - //record column idx in block - for (size_t i = 0; i < local_state._shared_state->partition_by_eq_expr_ctxs.size(); ++i) { - int result_col_id = -1; - RETURN_IF_ERROR(local_state._shared_state->partition_by_eq_expr_ctxs[i]->execute( - input_block, &result_col_id)); - DCHECK_GE(result_col_id, 0); - local_state._shared_state->partition_by_column_idxs[i] = result_col_id; + { + SCOPED_TIMER(local_state._compute_partition_by_timer); + for (size_t i = 0; i < local_state._shared_state->partition_by_eq_expr_ctxs.size(); ++i) { + int result_col_id = -1; + RETURN_IF_ERROR(local_state._shared_state->partition_by_eq_expr_ctxs[i]->execute( + input_block, &result_col_id)); + DCHECK_GE(result_col_id, 0); + local_state._shared_state->partition_by_column_idxs[i] = result_col_id; + } } - for (size_t i = 0; i < local_state._shared_state->order_by_eq_expr_ctxs.size(); ++i) { - int result_col_id = -1; - RETURN_IF_ERROR(local_state._shared_state->order_by_eq_expr_ctxs[i]->execute( - input_block, &result_col_id)); - DCHECK_GE(result_col_id, 0); - local_state._shared_state->ordey_by_column_idxs[i] = result_col_id; + { + SCOPED_TIMER(local_state._compute_order_by_timer); + for (size_t i = 0; i < local_state._shared_state->order_by_eq_expr_ctxs.size(); ++i) { + int result_col_id = -1; + RETURN_IF_ERROR(local_state._shared_state->order_by_eq_expr_ctxs[i]->execute( + input_block, &result_col_id)); + DCHECK_GE(result_col_id, 0); + local_state._shared_state->ordey_by_column_idxs[i] = result_col_id; + } } - int64_t block_mem_usage = input_block->allocated_bytes(); - COUNTER_UPDATE(local_state._memory_used_counter, block_mem_usage); + COUNTER_UPDATE(local_state._memory_used_counter, input_block->allocated_bytes()); COUNTER_SET(local_state._peak_memory_usage_counter, local_state._memory_used_counter->value()); - COUNTER_UPDATE(local_state._blocks_memory_usage, block_mem_usage); //TODO: if need improvement, the is a tips to maintain a free queue, //so the memory could reuse, no need to new/delete again; diff --git a/be/src/pipeline/exec/analytic_sink_operator.h b/be/src/pipeline/exec/analytic_sink_operator.h index 1a0a671cf9fcaa..e04b220ee351e7 100644 --- a/be/src/pipeline/exec/analytic_sink_operator.h +++ b/be/src/pipeline/exec/analytic_sink_operator.h @@ -58,7 +58,9 @@ class AnalyticSinkLocalState : public PipelineXSinkLocalState _agg_expr_ctxs; }; @@ -88,9 +90,6 @@ class AnalyticSinkOperatorX final : public DataSinkOperatorXAddHighWaterMarkCounter("MemoryUsageBlocks", TUnit::BYTES, "", 1); - _evaluation_timer = ADD_TIMER(profile(), "EvaluationTime"); + _evaluation_timer = ADD_TIMER(profile(), "GetPartitionBoundTime"); + _execute_timer = ADD_TIMER(profile(), "ExecuteTime"); + _get_next_timer = ADD_TIMER(profile(), "GetNextTime"); + _get_result_timer = ADD_TIMER(profile(), "GetResultsTime"); return Status::OK(); } @@ -233,12 +236,6 @@ Status AnalyticLocalState::open(RuntimeState* state) { std::placeholders::_1); } } - _executor.insert_result = - std::bind(&AnalyticLocalState::_insert_result_info, this, std::placeholders::_1); - _executor.execute = - std::bind(&AnalyticLocalState::_execute_for_win_func, this, std::placeholders::_1, - std::placeholders::_2, std::placeholders::_3, std::placeholders::_4); - _create_agg_status(); return Status::OK(); } @@ -282,6 +279,7 @@ void AnalyticLocalState::_destroy_agg_status() { void AnalyticLocalState::_execute_for_win_func(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end) { + SCOPED_TIMER(_execute_timer); for (size_t i = 0; i < _agg_functions_size; ++i) { std::vector agg_columns; for (int j = 0; j < _shared_state->agg_input_columns[i].size(); ++j) { @@ -300,6 +298,7 @@ void AnalyticLocalState::_execute_for_win_func(int64_t partition_start, int64_t } void AnalyticLocalState::_insert_result_info(int64_t current_block_rows) { + SCOPED_TIMER(_get_result_timer); int64_t current_block_row_pos = _shared_state->input_block_first_row_positions[_output_block_index]; int64_t get_result_start = _shared_state->current_row_position - current_block_row_pos; @@ -344,6 +343,7 @@ void AnalyticLocalState::_insert_result_info(int64_t current_block_rows) { } Status AnalyticLocalState::_get_next_for_rows(size_t current_block_rows) { + SCOPED_TIMER(_get_next_timer); while (_shared_state->current_row_position < _shared_state->partition_by_end.pos && _window_end_position < current_block_rows) { int64_t range_start, range_end; @@ -367,31 +367,33 @@ Status AnalyticLocalState::_get_next_for_rows(size_t current_block_rows) { // Make sure range_start <= range_end range_start = std::min(range_start, range_end); } - _executor.execute(_partition_by_start.pos, _shared_state->partition_by_end.pos, range_start, - range_end); - _executor.insert_result(current_block_rows); + _execute_for_win_func(_partition_by_start.pos, _shared_state->partition_by_end.pos, + range_start, range_end); + _insert_result_info(current_block_rows); } return Status::OK(); } Status AnalyticLocalState::_get_next_for_partition(size_t current_block_rows) { + SCOPED_TIMER(_get_next_timer); if (_next_partition) { - _executor.execute(_partition_by_start.pos, _shared_state->partition_by_end.pos, - _partition_by_start.pos, _shared_state->partition_by_end.pos); + _execute_for_win_func(_partition_by_start.pos, _shared_state->partition_by_end.pos, + _partition_by_start.pos, _shared_state->partition_by_end.pos); } - _executor.insert_result(current_block_rows); + _insert_result_info(current_block_rows); return Status::OK(); } Status AnalyticLocalState::_get_next_for_range(size_t current_block_rows) { + SCOPED_TIMER(_get_next_timer); while (_shared_state->current_row_position < _shared_state->partition_by_end.pos && _window_end_position < current_block_rows) { if (_shared_state->current_row_position >= _order_by_end.pos) { _update_order_by_range(); - _executor.execute(_partition_by_start.pos, _shared_state->partition_by_end.pos, - _order_by_start.pos, _order_by_end.pos); + _execute_for_win_func(_partition_by_start.pos, _shared_state->partition_by_end.pos, + _order_by_start.pos, _order_by_end.pos); } - _executor.insert_result(current_block_rows); + _insert_result_info(current_block_rows); } return Status::OK(); } @@ -536,7 +538,7 @@ Status AnalyticSourceOperatorX::get_block(RuntimeState* state, vectorized::Block local_state.init_result_columns(); size_t current_block_rows = local_state._shared_state->input_blocks[local_state._output_block_index].rows(); - static_cast(local_state._executor.get_next(current_block_rows)); + RETURN_IF_ERROR(local_state._executor.get_next(current_block_rows)); if (local_state._window_end_position == current_block_rows) { break; } diff --git a/be/src/pipeline/exec/analytic_source_operator.h b/be/src/pipeline/exec/analytic_source_operator.h index 0080ad5e03c8b0..8f44b77f567e55 100644 --- a/be/src/pipeline/exec/analytic_source_operator.h +++ b/be/src/pipeline/exec/analytic_source_operator.h @@ -96,17 +96,15 @@ class AnalyticLocalState final : public PipelineXLocalState std::vector _agg_functions; RuntimeProfile::Counter* _evaluation_timer = nullptr; + RuntimeProfile::Counter* _execute_timer = nullptr; + RuntimeProfile::Counter* _get_next_timer = nullptr; + RuntimeProfile::Counter* _get_result_timer = nullptr; RuntimeProfile::HighWaterMarkCounter* _blocks_memory_usage = nullptr; - using vectorized_execute = std::function; using vectorized_get_next = std::function; - using vectorized_get_result = std::function; struct executor { - vectorized_execute execute; vectorized_get_next get_next; - vectorized_get_result insert_result; }; executor _executor; diff --git a/be/src/pipeline/exec/cache_source_operator.cpp b/be/src/pipeline/exec/cache_source_operator.cpp index 2e9b21976f841a..cace8465fc2d46 100644 --- a/be/src/pipeline/exec/cache_source_operator.cpp +++ b/be/src/pipeline/exec/cache_source_operator.cpp @@ -65,7 +65,7 @@ Status CacheSourceLocalState::init(RuntimeState* state, LocalStateInfo& info) { // 3. lookup the cache and find proper slot order hit_cache = QueryCache::instance()->lookup(_cache_key, _version, &_query_cache_handle); - _runtime_profile->add_info_string("HitCache", hit_cache ? "1" : "0"); + _runtime_profile->add_info_string("HitCache", std::to_string(hit_cache)); if (hit_cache && !cache_param.force_refresh_query_cache) { _hit_cache_results = _query_cache_handle.get_cache_result(); auto hit_cache_slot_orders = _query_cache_handle.get_cache_slot_orders(); @@ -125,13 +125,16 @@ Status CacheSourceOperatorX::get_block(RuntimeState* state, vectorized::Block* b if (local_state._hit_cache_results == nullptr) { Defer insert_cache([&] { - if (*eos && local_state._need_insert_cache) { - local_state._runtime_profile->add_info_string("InsertCache", "1"); - local_state._global_cache->insert(local_state._cache_key, local_state._version, - local_state._local_cache_blocks, - local_state._slot_orders, - local_state._current_query_cache_bytes); - local_state._local_cache_blocks.clear(); + if (*eos) { + local_state._runtime_profile->add_info_string( + "InsertCache", std::to_string(local_state._need_insert_cache)); + if (local_state._need_insert_cache) { + local_state._global_cache->insert(local_state._cache_key, local_state._version, + local_state._local_cache_blocks, + local_state._slot_orders, + local_state._current_query_cache_bytes); + local_state._local_cache_blocks.clear(); + } } }); @@ -162,7 +165,6 @@ Status CacheSourceOperatorX::get_block(RuntimeState* state, vectorized::Block* b // over the max bytes, pass through the data, no need to do cache local_state._local_cache_blocks.clear(); local_state._need_insert_cache = false; - local_state._runtime_profile->add_info_string("InsertCache", "0"); } else { local_state._local_cache_blocks.emplace_back(std::move(output_block)); } diff --git a/be/src/pipeline/exec/datagen_operator.cpp b/be/src/pipeline/exec/datagen_operator.cpp index faa6359e87490b..d400953799e5bb 100644 --- a/be/src/pipeline/exec/datagen_operator.cpp +++ b/be/src/pipeline/exec/datagen_operator.cpp @@ -36,7 +36,9 @@ DataGenSourceOperatorX::DataGenSourceOperatorX(ObjectPool* pool, const TPlanNode : OperatorX(pool, tnode, operator_id, descs), _tuple_id(tnode.data_gen_scan_node.tuple_id), _tuple_desc(nullptr), - _runtime_filter_descs(tnode.runtime_filters) {} + _runtime_filter_descs(tnode.runtime_filters) { + _is_serial_operator = tnode.__isset.is_serial_operator && tnode.is_serial_operator; +} Status DataGenSourceOperatorX::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(OperatorX::init(tnode, state)); @@ -68,17 +70,25 @@ Status DataGenSourceOperatorX::get_block(RuntimeState* state, vectorized::Block* RETURN_IF_CANCELLED(state); auto& local_state = get_local_state(state); SCOPED_TIMER(local_state.exec_time_counter()); - Status res = local_state._table_func->get_next(state, block, eos); - RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, block, - block->columns())); + { + SCOPED_TIMER(local_state._table_function_execution_timer); + RETURN_IF_ERROR(local_state._table_func->get_next(state, block, eos)); + } + { + SCOPED_TIMER(local_state._filter_timer); + RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, block, + block->columns())); + } local_state.reached_limit(block, eos); - return res; + return Status::OK(); } Status DataGenLocalState::init(RuntimeState* state, LocalStateInfo& info) { SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); RETURN_IF_ERROR(PipelineXLocalState<>::init(state, info)); + _table_function_execution_timer = ADD_TIMER(profile(), "TableFunctionExecutionTime"); + _filter_timer = ADD_TIMER(profile(), "FilterTime"); auto& p = _parent->cast(); _table_func = std::make_shared(p._tuple_id, p._tuple_desc); _table_func->set_tuple_desc(p._tuple_desc); @@ -87,8 +97,8 @@ Status DataGenLocalState::init(RuntimeState* state, LocalStateInfo& info) { // TODO: use runtime filter to filte result block, maybe this node need derive from vscan_node. for (const auto& filter_desc : p._runtime_filter_descs) { std::shared_ptr runtime_filter; - RETURN_IF_ERROR(state->register_consumer_runtime_filter( - filter_desc, p.ignore_data_distribution(), p.node_id(), &runtime_filter)); + RETURN_IF_ERROR(state->register_consumer_runtime_filter(filter_desc, p.is_serial_operator(), + p.node_id(), &runtime_filter)); runtime_filter->init_profile(_runtime_profile.get()); } return Status::OK(); diff --git a/be/src/pipeline/exec/datagen_operator.h b/be/src/pipeline/exec/datagen_operator.h index c63ef97bb7a40f..bada5ec4080d08 100644 --- a/be/src/pipeline/exec/datagen_operator.h +++ b/be/src/pipeline/exec/datagen_operator.h @@ -44,6 +44,8 @@ class DataGenLocalState final : public PipelineXLocalState<> { private: friend class DataGenSourceOperatorX; std::shared_ptr _table_func; + RuntimeProfile::Counter* _table_function_execution_timer = nullptr; + RuntimeProfile::Counter* _filter_timer = nullptr; }; class DataGenSourceOperatorX final : public OperatorX { diff --git a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp index a59af8ce7b474a..642b669263456d 100644 --- a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp +++ b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.cpp @@ -71,7 +71,6 @@ Status DistinctStreamingAggLocalState::init(RuntimeState* state, LocalStateInfo& SCOPED_TIMER(Base::exec_time_counter()); SCOPED_TIMER(Base::_init_timer); _build_timer = ADD_TIMER(Base::profile(), "BuildTime"); - _exec_timer = ADD_TIMER(Base::profile(), "ExecTime"); _hash_table_compute_timer = ADD_TIMER(Base::profile(), "HashTableComputeTime"); _hash_table_emplace_timer = ADD_TIMER(Base::profile(), "HashTableEmplaceTime"); _hash_table_input_counter = ADD_COUNTER(Base::profile(), "HashTableInputCount", TUnit::UNIT); diff --git a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.h b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.h index 1f7a21190ad769..4c5fcd5efa74b9 100644 --- a/be/src/pipeline/exec/distinct_streaming_aggregation_operator.h +++ b/be/src/pipeline/exec/distinct_streaming_aggregation_operator.h @@ -116,9 +116,6 @@ class DistinctStreamingAggOperatorX final } bool require_data_distribution() const override { return _is_colocate; } - bool require_shuffled_data_distribution() const override { - return _needs_finalize || (!_probe_expr_ctxs.empty() && !_is_streaming_preagg); - } private: friend class DistinctStreamingAggLocalState; diff --git a/be/src/pipeline/exec/es_scan_operator.cpp b/be/src/pipeline/exec/es_scan_operator.cpp index 7b846e715f32d2..2cb3cd5e0b29ce 100644 --- a/be/src/pipeline/exec/es_scan_operator.cpp +++ b/be/src/pipeline/exec/es_scan_operator.cpp @@ -44,12 +44,10 @@ static std::string get_host_and_port(const std::vector& Status EsScanLocalState::_init_profile() { RETURN_IF_ERROR(Base::_init_profile()); - _es_profile.reset(new RuntimeProfile("EsIterator")); - Base::_scanner_profile->add_child(_es_profile.get(), true, nullptr); - _rows_read_counter = ADD_COUNTER(_es_profile, "RowsRead", TUnit::UNIT); - _read_timer = ADD_TIMER(_es_profile, "TotalRawReadTime(*)"); - _materialize_timer = ADD_TIMER(_es_profile, "MaterializeTupleTime(*)"); + _blocks_read_counter = ADD_COUNTER(_runtime_profile, "BlocksRead", TUnit::UNIT); + _read_timer = ADD_TIMER(_runtime_profile, "TotalRawReadTime(*)"); + _materialize_timer = ADD_TIMER(_runtime_profile, "MaterializeTupleTime(*)"); return Status::OK(); } diff --git a/be/src/pipeline/exec/es_scan_operator.h b/be/src/pipeline/exec/es_scan_operator.h index 4e80150d0ba8c6..2ae562e4fc7f32 100644 --- a/be/src/pipeline/exec/es_scan_operator.h +++ b/be/src/pipeline/exec/es_scan_operator.h @@ -52,13 +52,12 @@ class EsScanLocalState final : public ScanLocalState { Status _init_scanners(std::list* scanners) override; std::vector> _scan_ranges; - std::unique_ptr _es_profile; // FIXME: non-static data member '_rows_read_counter' of 'EsScanLocalState' shadows member inherited from type 'ScanLocalStateBase' #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wshadow-field" #endif - RuntimeProfile::Counter* _rows_read_counter = nullptr; + RuntimeProfile::Counter* _blocks_read_counter = nullptr; #ifdef __clang__ #pragma clang diagnostic pop #endif diff --git a/be/src/pipeline/exec/exchange_sink_buffer.cpp b/be/src/pipeline/exec/exchange_sink_buffer.cpp index 016802f8f73bd8..7163299d766f4e 100644 --- a/be/src/pipeline/exec/exchange_sink_buffer.cpp +++ b/be/src/pipeline/exec/exchange_sink_buffer.cpp @@ -235,7 +235,7 @@ Status ExchangeSinkBuffer::_send_rpc(InstanceLoId id) { auto send_callback = request.channel->get_send_callback(id, request.eos); send_callback->cntl_->set_timeout_ms(request.channel->_brpc_timeout_ms); - if (config::exchange_sink_ignore_eovercrowded) { + if (config::execution_ignore_eovercrowded) { send_callback->cntl_->ignore_eovercrowded(); } send_callback->addFailedHandler([&, weak_task_ctx = weak_task_exec_ctx()]( @@ -313,7 +313,7 @@ Status ExchangeSinkBuffer::_send_rpc(InstanceLoId id) { } auto send_callback = request.channel->get_send_callback(id, request.eos); send_callback->cntl_->set_timeout_ms(request.channel->_brpc_timeout_ms); - if (config::exchange_sink_ignore_eovercrowded) { + if (config::execution_ignore_eovercrowded) { send_callback->cntl_->ignore_eovercrowded(); } send_callback->addFailedHandler([&, weak_task_ctx = weak_task_exec_ctx()]( diff --git a/be/src/pipeline/exec/exchange_sink_buffer.h b/be/src/pipeline/exec/exchange_sink_buffer.h index 2ff7a20086470a..13692532a335a4 100644 --- a/be/src/pipeline/exec/exchange_sink_buffer.h +++ b/be/src/pipeline/exec/exchange_sink_buffer.h @@ -195,7 +195,6 @@ class ExchangeSinkBuffer final : public HasTaskExecutionCtx { private: friend class ExchangeSinkLocalState; - void _set_ready_to_finish(bool all_done); phmap::flat_hash_map> _instance_to_package_queue_mutex; diff --git a/be/src/pipeline/exec/exchange_sink_operator.cpp b/be/src/pipeline/exec/exchange_sink_operator.cpp index a3b6f8da7e9447..1f91af01aa1f6b 100644 --- a/be/src/pipeline/exec/exchange_sink_operator.cpp +++ b/be/src/pipeline/exec/exchange_sink_operator.cpp @@ -39,11 +39,6 @@ namespace doris::pipeline { #include "common/compile_check_begin.h" -Status ExchangeSinkLocalState::serialize_block(vectorized::Block* src, PBlock* dest, - int num_receivers) { - return _parent->cast().serialize_block(*this, src, dest, num_receivers); -} - bool ExchangeSinkLocalState::transfer_large_data_by_brpc() const { return _parent->cast()._transfer_large_data_by_brpc; } @@ -61,14 +56,10 @@ Status ExchangeSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& inf _local_sent_rows = ADD_COUNTER(_profile, "LocalSentRows", TUnit::UNIT); _serialize_batch_timer = ADD_TIMER(_profile, "SerializeBatchTime"); _compress_timer = ADD_TIMER(_profile, "CompressTime"); - _brpc_send_timer = ADD_TIMER(_profile, "BrpcSendTime"); - _brpc_wait_timer = ADD_TIMER(_profile, "BrpcSendTime.Wait"); _local_send_timer = ADD_TIMER(_profile, "LocalSendTime"); _split_block_hash_compute_timer = ADD_TIMER(_profile, "SplitBlockHashComputeTime"); - _split_block_distribute_by_channel_timer = - ADD_TIMER(_profile, "SplitBlockDistributeByChannelTime"); + _distribute_rows_into_channels_timer = ADD_TIMER(_profile, "DistributeRowsIntoChannelsTime"); _blocks_sent_counter = ADD_COUNTER_WITH_LEVEL(_profile, "BlocksProduced", TUnit::UNIT, 1); - _rows_sent_counter = ADD_COUNTER_WITH_LEVEL(_profile, "RowsProduced", TUnit::UNIT, 1); _overall_throughput = _profile->add_derived_counter( "OverallThroughput", TUnit::BYTES_PER_SECOND, [this]() { @@ -141,7 +132,7 @@ Status ExchangeSinkLocalState::open(RuntimeState* state) { std::mt19937 g(rd()); shuffle(channels.begin(), channels.end(), g); } - int local_size = 0; + size_t local_size = 0; for (int i = 0; i < channels.size(); ++i) { RETURN_IF_ERROR(channels[i]->open(state)); if (channels[i]->is_local()) { @@ -151,6 +142,8 @@ Status ExchangeSinkLocalState::open(RuntimeState* state) { } only_local_exchange = local_size == channels.size(); + _rpc_channels_num = channels.size() - local_size; + PUniqueId id; id.set_hi(_state->query_id().hi); id.set_lo(_state->query_id().lo); @@ -288,7 +281,7 @@ Status ExchangeSinkLocalState::_send_new_partition_batch() { vectorized::Block tmp_block = _row_distribution._batching_block->to_block(); // Borrow out, for lval ref auto& p = _parent->cast(); - // these order is only. + // these order is unique. // 1. clear batching stats(and flag goes true) so that we won't make a new batching process in dealing batched block. // 2. deal batched block // 3. now reuse the column of lval block. cuz write doesn't real adjust it. it generate a new block from that. @@ -389,7 +382,6 @@ void ExchangeSinkOperatorX::_handle_eof_channel(RuntimeState* state, ChannelPtrT Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block, bool eos) { auto& local_state = get_local_state(state); COUNTER_UPDATE(local_state.rows_input_counter(), (int64_t)block->rows()); - COUNTER_UPDATE(local_state.rows_sent_counter(), (int64_t)block->rows()); SCOPED_TIMER(local_state.exec_time_counter()); bool all_receiver_eof = true; for (auto& channel : local_state.channels) { @@ -431,14 +423,15 @@ Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block { bool serialized = false; RETURN_IF_ERROR(local_state._serializer.next_serialized_block( - block, block_holder->get_block(), local_state.channels.size(), &serialized, - eos)); + block, block_holder->get_block(), local_state._rpc_channels_num, + &serialized, eos)); if (serialized) { auto cur_block = local_state._serializer.get_block()->to_block(); if (!cur_block.empty()) { + DCHECK(eos || local_state._serializer.is_local()) << debug_string(state, 0); RETURN_IF_ERROR(local_state._serializer.serialize_block( &cur_block, block_holder->get_block(), - local_state.channels.size())); + local_state._rpc_channels_num)); } else { block_holder->reset_block(); } @@ -504,10 +497,12 @@ Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block old_channel_mem_usage += channel->mem_usage(); } if (_part_type == TPartitionType::HASH_PARTITIONED) { + SCOPED_TIMER(local_state._distribute_rows_into_channels_timer); RETURN_IF_ERROR(channel_add_rows( state, local_state.channels, local_state._partition_count, local_state._partitioner->get_channel_ids().get(), rows, block, eos)); } else { + SCOPED_TIMER(local_state._distribute_rows_into_channels_timer); RETURN_IF_ERROR(channel_add_rows( state, local_state.channels, local_state._partition_count, local_state._partitioner->get_channel_ids().get(), rows, block, eos)); @@ -556,10 +551,13 @@ Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block local_state._row_distribution._deal_batched = true; RETURN_IF_ERROR(local_state._send_new_partition_batch()); } - // the convert_block maybe different with block after execute exprs - // when send data we still use block - RETURN_IF_ERROR(channel_add_rows_with_idx(state, local_state.channels, num_channels, - channel2rows, block, eos)); + { + SCOPED_TIMER(local_state._distribute_rows_into_channels_timer); + // the convert_block maybe different with block after execute exprs + // when send data we still use block + RETURN_IF_ERROR(channel_add_rows_with_idx(state, local_state.channels, num_channels, + channel2rows, block, eos)); + } int64_t new_channel_mem_usage = 0; for (const auto& channel : local_state.channels) { new_channel_mem_usage += channel->mem_usage(); @@ -579,8 +577,12 @@ Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block } std::vector> assignments = local_state.scale_writer_partitioning_exchanger->accept(block); - RETURN_IF_ERROR(channel_add_rows_with_idx( - state, local_state.channels, local_state.channels.size(), assignments, block, eos)); + { + SCOPED_TIMER(local_state._distribute_rows_into_channels_timer); + RETURN_IF_ERROR(channel_add_rows_with_idx(state, local_state.channels, + local_state.channels.size(), assignments, + block, eos)); + } int64_t new_channel_mem_usage = 0; for (const auto& channel : local_state.channels) { @@ -635,24 +637,6 @@ Status ExchangeSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block return final_st; } -Status ExchangeSinkOperatorX::serialize_block(ExchangeSinkLocalState& state, vectorized::Block* src, - PBlock* dest, int num_receivers) { - { - SCOPED_TIMER(state.serialize_batch_timer()); - dest->Clear(); - size_t uncompressed_bytes = 0; - size_t compressed_bytes = 0; - RETURN_IF_ERROR(src->serialize(_state->be_exec_version(), dest, &uncompressed_bytes, - &compressed_bytes, _compression_type, - _transfer_large_data_by_brpc)); - COUNTER_UPDATE(state.bytes_sent_counter(), compressed_bytes * num_receivers); - COUNTER_UPDATE(state.uncompressed_bytes_counter(), uncompressed_bytes * num_receivers); - COUNTER_UPDATE(state.compress_timer(), src->get_compress_time()); - } - - return Status::OK(); -} - void ExchangeSinkLocalState::register_channels(pipeline::ExchangeSinkBuffer* buffer) { for (auto& channel : channels) { channel->register_exchange_buffer(buffer); diff --git a/be/src/pipeline/exec/exchange_sink_operator.h b/be/src/pipeline/exec/exchange_sink_operator.h index 141693eb820f4a..63d50290005470 100644 --- a/be/src/pipeline/exec/exchange_sink_operator.h +++ b/be/src/pipeline/exec/exchange_sink_operator.h @@ -77,27 +77,13 @@ class ExchangeSinkLocalState final : public PipelineXSinkLocalState<> { Status open(RuntimeState* state) override; Status close(RuntimeState* state, Status exec_status) override; Dependency* finishdependency() override { return _finish_dependency.get(); } - Status serialize_block(vectorized::Block* src, PBlock* dest, int num_receivers = 1); void register_channels(pipeline::ExchangeSinkBuffer* buffer); - RuntimeProfile::Counter* brpc_wait_timer() { return _brpc_wait_timer; } RuntimeProfile::Counter* blocks_sent_counter() { return _blocks_sent_counter; } - RuntimeProfile::Counter* rows_sent_counter() { return _rows_sent_counter; } RuntimeProfile::Counter* local_send_timer() { return _local_send_timer; } RuntimeProfile::Counter* local_bytes_send_counter() { return _local_bytes_send_counter; } RuntimeProfile::Counter* local_sent_rows() { return _local_sent_rows; } - RuntimeProfile::Counter* brpc_send_timer() { return _brpc_send_timer; } - RuntimeProfile::Counter* serialize_batch_timer() { return _serialize_batch_timer; } - RuntimeProfile::Counter* split_block_distribute_by_channel_timer() { - return _split_block_distribute_by_channel_timer; - } - RuntimeProfile::Counter* bytes_sent_counter() { return _bytes_sent_counter; } - RuntimeProfile::Counter* split_block_hash_compute_timer() { - return _split_block_hash_compute_timer; - } RuntimeProfile::Counter* merge_block_timer() { return _merge_block_timer; } - RuntimeProfile::Counter* compress_timer() { return _compress_timer; } - RuntimeProfile::Counter* uncompressed_bytes_counter() { return _uncompressed_bytes_counter; } [[nodiscard]] bool transfer_large_data_by_brpc() const; bool is_finished() const override { return _reach_limit.load(); } void set_reach_limit() { _reach_limit = true; }; @@ -129,16 +115,13 @@ class ExchangeSinkLocalState final : public PipelineXSinkLocalState<> { std::unique_ptr _sink_buffer = nullptr; RuntimeProfile::Counter* _serialize_batch_timer = nullptr; RuntimeProfile::Counter* _compress_timer = nullptr; - RuntimeProfile::Counter* _brpc_send_timer = nullptr; - RuntimeProfile::Counter* _brpc_wait_timer = nullptr; RuntimeProfile::Counter* _bytes_sent_counter = nullptr; RuntimeProfile::Counter* _uncompressed_bytes_counter = nullptr; RuntimeProfile::Counter* _local_sent_rows = nullptr; RuntimeProfile::Counter* _local_send_timer = nullptr; RuntimeProfile::Counter* _split_block_hash_compute_timer = nullptr; - RuntimeProfile::Counter* _split_block_distribute_by_channel_timer = nullptr; + RuntimeProfile::Counter* _distribute_rows_into_channels_timer = nullptr; RuntimeProfile::Counter* _blocks_sent_counter = nullptr; - RuntimeProfile::Counter* _rows_sent_counter = nullptr; // Throughput per total time spent in sender RuntimeProfile::Counter* _overall_throughput = nullptr; // Used to counter send bytes under local data exchange @@ -153,6 +136,7 @@ class ExchangeSinkLocalState final : public PipelineXSinkLocalState<> { int _sender_id; std::shared_ptr _broadcast_pb_mem_limiter; + size_t _rpc_channels_num = 0; vectorized::BlockSerializer _serializer; std::shared_ptr _queue_dependency = nullptr; @@ -221,8 +205,6 @@ class ExchangeSinkOperatorX final : public DataSinkOperatorX(tnode.nullable_tuples.begin(), tnode.nullable_tuples.begin() + tnode.exchange_node.input_row_tuples.size())), - _offset(tnode.exchange_node.__isset.offset ? tnode.exchange_node.offset : 0) {} + _offset(tnode.exchange_node.__isset.offset ? tnode.exchange_node.offset : 0) { + _is_serial_operator = tnode.__isset.is_serial_operator && tnode.is_serial_operator; +} Status ExchangeSourceOperatorX::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(OperatorX::init(tnode, state)); @@ -142,15 +148,22 @@ Status ExchangeSourceOperatorX::get_block(RuntimeState* state, vectorized::Block }); SCOPED_TIMER(local_state.exec_time_counter()); if (_is_merging && !local_state.is_ready) { + SCOPED_TIMER(local_state.create_merger_timer); RETURN_IF_ERROR(local_state.stream_recvr->create_merger( local_state.vsort_exec_exprs.lhs_ordering_expr_ctxs(), _is_asc_order, _nulls_first, state->batch_size(), _limit, _offset)); local_state.is_ready = true; return Status::OK(); } - auto status = local_state.stream_recvr->get_next(block, eos); - RETURN_IF_ERROR(doris::vectorized::VExprContext::filter_block(local_state.conjuncts(), block, - block->columns())); + { + SCOPED_TIMER(local_state.get_data_from_recvr_timer); + RETURN_IF_ERROR(local_state.stream_recvr->get_next(block, eos)); + } + { + SCOPED_TIMER(local_state.filter_timer); + RETURN_IF_ERROR(doris::vectorized::VExprContext::filter_block(local_state.conjuncts(), + block, block->columns())); + } // In vsortrunmerger, it will set eos=true, and block not empty // so that eos==true, could not make sure that block not have valid data if (!*eos || block->rows() > 0) { @@ -174,7 +187,7 @@ Status ExchangeSourceOperatorX::get_block(RuntimeState* state, vectorized::Block local_state.set_num_rows_returned(_limit); } } - return status; + return Status::OK(); } Status ExchangeLocalState::close(RuntimeState* state) { diff --git a/be/src/pipeline/exec/exchange_source_operator.h b/be/src/pipeline/exec/exchange_source_operator.h index 0fe3dcbb590b7d..f938f5007d1643 100644 --- a/be/src/pipeline/exec/exchange_source_operator.h +++ b/be/src/pipeline/exec/exchange_source_operator.h @@ -59,6 +59,9 @@ class ExchangeLocalState final : public PipelineXLocalState<> { std::vector> deps; std::vector metrics; + RuntimeProfile::Counter* get_data_from_recvr_timer = nullptr; + RuntimeProfile::Counter* filter_timer = nullptr; + RuntimeProfile::Counter* create_merger_timer = nullptr; }; class ExchangeSourceOperatorX final : public OperatorX { @@ -81,7 +84,7 @@ class ExchangeSourceOperatorX final : public OperatorX { [[nodiscard]] bool is_merging() const { return _is_merging; } DataDistribution required_data_distribution() const override { - if (OperatorX::ignore_data_distribution()) { + if (OperatorX::is_serial_operator()) { return {ExchangeType::NOOP}; } return _partition_type == TPartitionType::HASH_PARTITIONED diff --git a/be/src/pipeline/exec/group_commit_block_sink_operator.cpp b/be/src/pipeline/exec/group_commit_block_sink_operator.cpp index e0171b41ab1ee8..9f99d55d3ea989 100644 --- a/be/src/pipeline/exec/group_commit_block_sink_operator.cpp +++ b/be/src/pipeline/exec/group_commit_block_sink_operator.cpp @@ -64,6 +64,7 @@ Status GroupCommitBlockSinkLocalState::open(RuntimeState* state) { } Status GroupCommitBlockSinkLocalState::_initialize_load_queue() { + SCOPED_TIMER(_init_load_queue_timer); auto& p = _parent->cast(); if (_state->exec_env()->wal_mgr()->is_running()) { RETURN_IF_ERROR(_state->exec_env()->group_commit_mgr()->get_first_block_load_queue( @@ -238,6 +239,17 @@ Status GroupCommitBlockSinkLocalState::_add_blocks(RuntimeState* state, return Status::OK(); } +Status GroupCommitBlockSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& info) { + RETURN_IF_ERROR(Base::init(state, info)); + SCOPED_TIMER(exec_time_counter()); + SCOPED_TIMER(_init_timer); + _init_load_queue_timer = ADD_TIMER(_profile, "InitLoadQueueTime"); + _valid_and_convert_block_timer = ADD_TIMER(_profile, "ValidAndConvertBlockTime"); + _find_partition_timer = ADD_TIMER(_profile, "FindPartitionTime"); + _append_blocks_timer = ADD_TIMER(_profile, "AppendBlocksTime"); + return Status::OK(); +} + Status GroupCommitBlockSinkOperatorX::init(const TDataSink& t_sink) { RETURN_IF_ERROR(Base::init(t_sink)); DCHECK(t_sink.__isset.olap_table_sink); @@ -318,10 +330,15 @@ Status GroupCommitBlockSinkOperatorX::sink(RuntimeState* state, vectorized::Bloc std::shared_ptr block; bool has_filtered_rows = false; - RETURN_IF_ERROR(local_state._block_convertor->validate_and_convert_block( - state, input_block, block, local_state._output_vexpr_ctxs, rows, has_filtered_rows)); + { + SCOPED_TIMER(local_state._valid_and_convert_block_timer); + RETURN_IF_ERROR(local_state._block_convertor->validate_and_convert_block( + state, input_block, block, local_state._output_vexpr_ctxs, rows, + has_filtered_rows)); + } local_state._has_filtered_rows = false; if (!local_state._vpartition->is_auto_partition()) { + SCOPED_TIMER(local_state._find_partition_timer); //reuse vars for find_partition local_state._partitions.assign(rows, nullptr); local_state._filter_bitmap.Reset(rows); @@ -351,23 +368,26 @@ Status GroupCommitBlockSinkOperatorX::sink(RuntimeState* state, vectorized::Bloc } } } - - if (local_state._block_convertor->num_filtered_rows() > 0 || local_state._has_filtered_rows) { - auto cloneBlock = block->clone_without_columns(); - auto res_block = vectorized::MutableBlock::build_mutable_block(&cloneBlock); - for (int i = 0; i < rows; ++i) { - if (local_state._block_convertor->filter_map()[i]) { - continue; - } - if (local_state._filter_bitmap.Get(i)) { - continue; + { + SCOPED_TIMER(local_state._append_blocks_timer); + if (local_state._block_convertor->num_filtered_rows() > 0 || + local_state._has_filtered_rows) { + auto cloneBlock = block->clone_without_columns(); + auto res_block = vectorized::MutableBlock::build_mutable_block(&cloneBlock); + for (int i = 0; i < rows; ++i) { + if (local_state._block_convertor->filter_map()[i]) { + continue; + } + if (local_state._filter_bitmap.Get(i)) { + continue; + } + res_block.add_row(block.get(), i); } - res_block.add_row(block.get(), i); + block->swap(res_block.to_block()); } - block->swap(res_block.to_block()); + // add block into block queue + RETURN_IF_ERROR(local_state._add_block(state, block)); } - // add block into block queue - RETURN_IF_ERROR(local_state._add_block(state, block)); return wind_up(); } diff --git a/be/src/pipeline/exec/group_commit_block_sink_operator.h b/be/src/pipeline/exec/group_commit_block_sink_operator.h index 32ca0613652ae4..e469aee8df595c 100644 --- a/be/src/pipeline/exec/group_commit_block_sink_operator.h +++ b/be/src/pipeline/exec/group_commit_block_sink_operator.h @@ -42,8 +42,8 @@ class GroupCommitBlockSinkLocalState final : public PipelineXSinkLocalState dependencies() const override { @@ -79,6 +79,11 @@ class GroupCommitBlockSinkLocalState final : public PipelineXSinkLocalState _finish_dependency; std::shared_ptr _create_plan_dependency = nullptr; std::shared_ptr _put_block_dependency = nullptr; + + RuntimeProfile::Counter* _init_load_queue_timer = nullptr; + RuntimeProfile::Counter* _valid_and_convert_block_timer = nullptr; + RuntimeProfile::Counter* _find_partition_timer = nullptr; + RuntimeProfile::Counter* _append_blocks_timer = nullptr; }; class GroupCommitBlockSinkOperatorX final diff --git a/be/src/pipeline/exec/group_commit_scan_operator.cpp b/be/src/pipeline/exec/group_commit_scan_operator.cpp index 9577639813a760..141a5e7bf770c5 100644 --- a/be/src/pipeline/exec/group_commit_scan_operator.cpp +++ b/be/src/pipeline/exec/group_commit_scan_operator.cpp @@ -31,6 +31,7 @@ GroupCommitOperatorX::GroupCommitOperatorX(ObjectPool* pool, const TPlanNode& tn Status GroupCommitOperatorX::get_block(RuntimeState* state, vectorized::Block* block, bool* eos) { auto& local_state = get_local_state(state); + SCOPED_TIMER(local_state.exec_time_counter()); bool find_node = false; while (!find_node && !*eos) { RETURN_IF_ERROR(local_state.load_block_queue->get_block(state, block, &find_node, eos, diff --git a/be/src/pipeline/exec/hashjoin_build_sink.cpp b/be/src/pipeline/exec/hashjoin_build_sink.cpp index 5ead4ba13a389c..37de9ac93d839f 100644 --- a/be/src/pipeline/exec/hashjoin_build_sink.cpp +++ b/be/src/pipeline/exec/hashjoin_build_sink.cpp @@ -43,7 +43,7 @@ Status HashJoinBuildSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo _shared_state->join_op_variants = p._join_op_variants; _shared_state->is_null_safe_eq_join = p._is_null_safe_eq_join; - _shared_state->store_null_in_hash_table = p._store_null_in_hash_table; + _shared_state->serialize_null_into_key = p._serialize_null_into_key; _build_expr_ctxs.resize(p._build_expr_ctxs.size()); for (size_t i = 0; i < _build_expr_ctxs.size(); i++) { RETURN_IF_ERROR(p._build_expr_ctxs[i]->clone(state, _build_expr_ctxs[i])); @@ -51,19 +51,19 @@ Status HashJoinBuildSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo _shared_state->build_exprs_size = _build_expr_ctxs.size(); _should_build_hash_table = true; + profile()->add_info_string("BroadcastJoin", std::to_string(p._is_broadcast_join)); if (p._is_broadcast_join) { - profile()->add_info_string("BroadcastJoin", "true"); if (state->enable_share_hash_table_for_broadcast_join()) { _should_build_hash_table = info.task_idx == 0; if (_should_build_hash_table) { - profile()->add_info_string("ShareHashTableEnabled", "true"); p._shared_hashtable_controller->set_builder_and_consumers( state->fragment_instance_id(), p.node_id()); } - } else { - profile()->add_info_string("ShareHashTableEnabled", "false"); } } + profile()->add_info_string("BuildShareHashTable", std::to_string(_should_build_hash_table)); + profile()->add_info_string("ShareHashTableEnabled", + std::to_string(state->enable_share_hash_table_for_broadcast_join())); if (!_should_build_hash_table) { _dependency->block(); _finish_dependency->block(); @@ -72,6 +72,7 @@ Status HashJoinBuildSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo _finish_dependency->shared_from_this()); } + _runtime_filter_init_timer = ADD_TIMER(profile(), "RuntimeFilterInitTime"); _build_blocks_memory_usage = ADD_COUNTER_WITH_LEVEL(profile(), "MemoryUsageBuildBlocks", TUnit::BYTES, 1); _hash_table_memory_usage = @@ -81,13 +82,10 @@ Status HashJoinBuildSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo // Build phase auto* record_profile = _should_build_hash_table ? profile() : faker_runtime_profile(); - _build_table_timer = ADD_TIMER(profile(), "BuildTableTime"); - _build_side_merge_block_timer = ADD_TIMER(profile(), "BuildSideMergeBlockTime"); + _build_table_timer = ADD_TIMER(profile(), "BuildHashTableTime"); + _build_side_merge_block_timer = ADD_TIMER(profile(), "MergeBuildBlockTime"); _build_table_insert_timer = ADD_TIMER(record_profile, "BuildTableInsertTime"); _build_expr_call_timer = ADD_TIMER(record_profile, "BuildExprCallTime"); - _build_side_compute_hash_timer = ADD_TIMER(record_profile, "BuildSideHashComputingTime"); - - _allocate_resource_timer = ADD_TIMER(profile(), "AllocateResourceTime"); // Hash Table Init RETURN_IF_ERROR(_hash_table_init(state)); @@ -227,33 +225,22 @@ Status HashJoinBuildSinkLocalState::_extract_join_column( vectorized::Block& block, vectorized::ColumnUInt8::MutablePtr& null_map, vectorized::ColumnRawPtrs& raw_ptrs, const std::vector& res_col_ids) { auto& shared_state = *_shared_state; - auto& p = _parent->cast(); for (size_t i = 0; i < shared_state.build_exprs_size; ++i) { - if (p._should_convert_to_nullable[i]) { + const auto* column = block.get_by_position(res_col_ids[i]).column.get(); + if (!column->is_nullable() && shared_state.serialize_null_into_key[i]) { _key_columns_holder.emplace_back( vectorized::make_nullable(block.get_by_position(res_col_ids[i]).column)); raw_ptrs[i] = _key_columns_holder.back().get(); - continue; - } - - if (shared_state.is_null_safe_eq_join[i]) { - raw_ptrs[i] = block.get_by_position(res_col_ids[i]).column.get(); + } else if (const auto* nullable = check_and_get_column(*column); + !shared_state.serialize_null_into_key[i] && nullable) { + // update nulllmap and split nested out of ColumnNullable when serialize_null_into_key is false and column is nullable + const auto& col_nested = nullable->get_nested_column(); + const auto& col_nullmap = nullable->get_null_map_data(); + DCHECK(null_map != nullptr); + vectorized::VectorizedUtils::update_null_map(null_map->get_data(), col_nullmap); + raw_ptrs[i] = &col_nested; } else { - const auto* column = block.get_by_position(res_col_ids[i]).column.get(); - if (const auto* nullable = check_and_get_column(*column)) { - const auto& col_nested = nullable->get_nested_column(); - const auto& col_nullmap = nullable->get_null_map_data(); - - if (shared_state.store_null_in_hash_table[i]) { - raw_ptrs[i] = nullable; - } else { - DCHECK(null_map != nullptr); - vectorized::VectorizedUtils::update_null_map(null_map->get_data(), col_nullmap); - raw_ptrs[i] = &col_nested; - } - } else { - raw_ptrs[i] = column; - } + raw_ptrs[i] = column; } } return Status::OK(); @@ -267,7 +254,6 @@ Status HashJoinBuildSinkLocalState::process_build_block(RuntimeState* state, if (UNLIKELY(rows == 0)) { return Status::OK(); } - COUNTER_UPDATE(_build_rows_counter, rows); block.replace_if_overflow(); vectorized::ColumnRawPtrs raw_ptrs(_build_expr_ctxs.size()); @@ -284,13 +270,9 @@ Status HashJoinBuildSinkLocalState::process_build_block(RuntimeState* state, .data()[0] = 1; } } - // TODO: Now we are not sure whether a column is nullable only by ExecNode's `row_desc` - // so we have to initialize this flag by the first build block. - if (!_has_set_need_null_map_for_build) { - _has_set_need_null_map_for_build = true; - _set_build_ignore_flag(block, _build_col_ids); - } - if (p._short_circuit_for_null_in_build_side || _build_side_ignore_null) { + + _set_build_side_has_external_nullmap(block, _build_col_ids); + if (_build_side_has_external_nullmap) { null_map_val = vectorized::ColumnUInt8::create(); null_map_val->get_data().assign(rows, (uint8_t)0); } @@ -300,27 +282,23 @@ Status HashJoinBuildSinkLocalState::process_build_block(RuntimeState* state, st = std::visit( vectorized::Overload { - [&](std::monostate& arg, auto join_op, auto has_null_value, + [&](std::monostate& arg, auto join_op, auto short_circuit_for_null_in_build_side, auto with_other_conjuncts) -> Status { LOG(FATAL) << "FATAL: uninited hash table"; __builtin_unreachable(); return Status::OK(); }, - [&](auto&& arg, auto&& join_op, auto has_null_value, - auto short_circuit_for_null_in_build_side, + [&](auto&& arg, auto&& join_op, auto short_circuit_for_null_in_build_side, auto with_other_conjuncts) -> Status { using HashTableCtxType = std::decay_t; using JoinOpType = std::decay_t; ProcessHashTableBuild hash_table_build_process( rows, raw_ptrs, this, state->batch_size(), state); auto st = hash_table_build_process.template run< - JoinOpType::value, has_null_value, - short_circuit_for_null_in_build_side, with_other_conjuncts>( - arg, - has_null_value || short_circuit_for_null_in_build_side - ? &null_map_val->get_data() - : nullptr, + JoinOpType::value, short_circuit_for_null_in_build_side, + with_other_conjuncts>( + arg, null_map_val ? &null_map_val->get_data() : nullptr, &_shared_state->_has_null_in_build_side); COUNTER_SET(_memory_used_counter, _build_blocks_memory_usage->value() + @@ -330,22 +308,24 @@ Status HashJoinBuildSinkLocalState::process_build_block(RuntimeState* state, return st; }}, _shared_state->hash_table_variants->method_variant, _shared_state->join_op_variants, - vectorized::make_bool_variant(_build_side_ignore_null), vectorized::make_bool_variant(p._short_circuit_for_null_in_build_side), vectorized::make_bool_variant((p._have_other_join_conjunct))); return st; } -void HashJoinBuildSinkLocalState::_set_build_ignore_flag(vectorized::Block& block, - const std::vector& res_col_ids) { +void HashJoinBuildSinkLocalState::_set_build_side_has_external_nullmap( + vectorized::Block& block, const std::vector& res_col_ids) { auto& p = _parent->cast(); + if (p._short_circuit_for_null_in_build_side) { + _build_side_has_external_nullmap = true; + return; + } for (size_t i = 0; i < _build_expr_ctxs.size(); ++i) { - if (!_shared_state->is_null_safe_eq_join[i] && !p._short_circuit_for_null_in_build_side) { - const auto* column = block.get_by_position(res_col_ids[i]).column.get(); - if (check_and_get_column(*column)) { - _build_side_ignore_null |= !_shared_state->store_null_in_hash_table[i]; - } + const auto* column = block.get_by_position(res_col_ids[i]).column.get(); + if (column->is_nullable() && !_shared_state->serialize_null_into_key[i]) { + _build_side_has_external_nullmap = true; + return; } } } @@ -359,7 +339,7 @@ Status HashJoinBuildSinkLocalState::_hash_table_init(RuntimeState* state) { /// For 'null safe equal' join, /// the build key column maybe be converted to nullable from non-nullable. - if (p._should_convert_to_nullable[i]) { + if (p._serialize_null_into_key[i]) { data_type = vectorized::make_nullable(data_type); } data_types.emplace_back(std::move(data_type)); @@ -393,10 +373,6 @@ Status HashJoinBuildSinkOperatorX::init(const TPlanNode& tnode, RuntimeState* st _hash_output_slot_ids = tnode.hash_join_node.hash_output_slot_ids; } - const bool build_stores_null = _join_op == TJoinOp::RIGHT_OUTER_JOIN || - _join_op == TJoinOp::FULL_OUTER_JOIN || - _join_op == TJoinOp::RIGHT_ANTI_JOIN; - const std::vector& eq_join_conjuncts = tnode.hash_join_node.eq_join_conjuncts; for (const auto& eq_join_conjunct : eq_join_conjuncts) { vectorized::VExprContextSPtr build_ctx; @@ -430,16 +406,18 @@ Status HashJoinBuildSinkOperatorX::init(const TPlanNode& tnode, RuntimeState* st (eq_join_conjunct.right.nodes[0].is_nullable || eq_join_conjunct.left.nodes[0].is_nullable); - const bool should_convert_to_nullable = is_null_safe_equal && - !eq_join_conjunct.right.nodes[0].is_nullable && - eq_join_conjunct.left.nodes[0].is_nullable; _is_null_safe_eq_join.push_back(is_null_safe_equal); - _should_convert_to_nullable.emplace_back(should_convert_to_nullable); - // if is null aware, build join column and probe join column both need dispose null value - _store_null_in_hash_table.emplace_back( - is_null_safe_equal || - (_build_expr_ctxs.back()->root()->is_nullable() && build_stores_null)); + if (eq_join_conjuncts.size() == 1) { + // single column key serialize method must use nullmap for represent null to instead serialize null into key + _serialize_null_into_key.emplace_back(false); + } else if (is_null_safe_equal) { + // use serialize null into key to represent multi column null value + _serialize_null_into_key.emplace_back(true); + } else { + // on normal conditions, because null!=null, it can be expressed directly with nullmap. + _serialize_null_into_key.emplace_back(false); + } } return Status::OK(); diff --git a/be/src/pipeline/exec/hashjoin_build_sink.h b/be/src/pipeline/exec/hashjoin_build_sink.h index 69aa6843b84ecb..45aa1e8c8a262d 100644 --- a/be/src/pipeline/exec/hashjoin_build_sink.h +++ b/be/src/pipeline/exec/hashjoin_build_sink.h @@ -56,7 +56,8 @@ class HashJoinBuildSinkLocalState final protected: Status _hash_table_init(RuntimeState* state); - void _set_build_ignore_flag(vectorized::Block& block, const std::vector& res_col_ids); + void _set_build_side_has_external_nullmap(vectorized::Block& block, + const std::vector& res_col_ids); Status _do_evaluate(vectorized::Block& block, vectorized::VExprContextSPtrs& exprs, RuntimeProfile::Counter& expr_call_timer, std::vector& res_col_ids); std::vector _convert_block_to_null(vectorized::Block& block); @@ -79,7 +80,6 @@ class HashJoinBuildSinkLocalState final vectorized::MutableBlock _build_side_mutable_block; std::shared_ptr _runtime_filter_slots; - bool _has_set_need_null_map_for_build = false; /* * The comparison result of a null value with any other value is null, @@ -87,21 +87,19 @@ class HashJoinBuildSinkLocalState final * the result of an equality condition involving null should be false, * so null does not need to be added to the hash table. */ - bool _build_side_ignore_null = false; + bool _build_side_has_external_nullmap = false; std::vector _build_col_ids; std::shared_ptr _finish_dependency; RuntimeProfile::Counter* _build_table_timer = nullptr; RuntimeProfile::Counter* _build_expr_call_timer = nullptr; RuntimeProfile::Counter* _build_table_insert_timer = nullptr; - RuntimeProfile::Counter* _build_side_compute_hash_timer = nullptr; RuntimeProfile::Counter* _build_side_merge_block_timer = nullptr; - RuntimeProfile::Counter* _allocate_resource_timer = nullptr; - RuntimeProfile::Counter* _build_blocks_memory_usage = nullptr; RuntimeProfile::Counter* _hash_table_memory_usage = nullptr; RuntimeProfile::Counter* _build_arena_memory_usage = nullptr; + RuntimeProfile::Counter* _runtime_filter_init_timer = nullptr; }; class HashJoinBuildSinkOperatorX final @@ -130,8 +128,8 @@ class HashJoinBuildSinkOperatorX final if (_join_op == TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN) { return {ExchangeType::NOOP}; } else if (_is_broadcast_join) { - return _child->ignore_data_distribution() ? DataDistribution(ExchangeType::PASS_TO_ONE) - : DataDistribution(ExchangeType::NOOP); + return _child->is_serial_operator() ? DataDistribution(ExchangeType::PASS_TO_ONE) + : DataDistribution(ExchangeType::NOOP); } return _join_distribution == TJoinDistributionType::BUCKET_SHUFFLE || _join_distribution == TJoinDistributionType::COLOCATE @@ -139,9 +137,6 @@ class HashJoinBuildSinkOperatorX final : DataDistribution(ExchangeType::HASH_SHUFFLE, _partition_exprs); } - bool require_shuffled_data_distribution() const override { - return _join_op != TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN && !_is_broadcast_join; - } bool is_shuffled_operator() const override { return _join_distribution == TJoinDistributionType::PARTITIONED; } @@ -157,13 +152,11 @@ class HashJoinBuildSinkOperatorX final // build expr vectorized::VExprContextSPtrs _build_expr_ctxs; // mark the build hash table whether it needs to store null value - std::vector _store_null_in_hash_table; + std::vector _serialize_null_into_key; // mark the join column whether support null eq std::vector _is_null_safe_eq_join; - std::vector _should_convert_to_nullable; - bool _is_broadcast_join = false; std::shared_ptr _shared_hashtable_controller; @@ -187,12 +180,12 @@ struct ProcessHashTableBuild { _batch_size(batch_size), _state(state) {} - template + template Status run(HashTableContext& hash_table_ctx, vectorized::ConstNullMapPtr null_map, bool* has_null_key) { - if (short_circuit_for_null || ignore_null) { + if (null_map) { // first row is mocked and is null + // TODO: Need to test the for loop. break may better for (uint32_t i = 1; i < _rows; i++) { if ((*null_map)[i]) { *has_null_key = true; @@ -210,8 +203,21 @@ struct ProcessHashTableBuild { hash_table_ctx.init_serialized_keys(_build_raw_ptrs, _rows, null_map ? null_map->data() : nullptr, true, true, hash_table_ctx.hash_table->get_bucket_size()); - hash_table_ctx.hash_table->template build( - hash_table_ctx.keys, hash_table_ctx.bucket_nums.data(), _rows); + // only 2 cases need to access the null value in hash table + bool keep_null_key = false; + if ((JoinOpType == TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN || + JoinOpType == TJoinOp::NULL_AWARE_LEFT_SEMI_JOIN) && + with_other_conjuncts) { + //null aware join with other conjuncts + keep_null_key = true; + } else if (_parent->_shared_state->is_null_safe_eq_join.size() == 1 && + _parent->_shared_state->is_null_safe_eq_join[0]) { + // single null safe eq + keep_null_key = true; + } + + hash_table_ctx.hash_table->build(hash_table_ctx.keys, hash_table_ctx.bucket_nums.data(), + _rows, keep_null_key); hash_table_ctx.bucket_nums.resize(_batch_size); hash_table_ctx.bucket_nums.shrink_to_fit(); @@ -231,4 +237,4 @@ struct ProcessHashTableBuild { }; } // namespace doris::pipeline -#include "common/compile_check_end.h" \ No newline at end of file +#include "common/compile_check_end.h" diff --git a/be/src/pipeline/exec/hashjoin_probe_operator.cpp b/be/src/pipeline/exec/hashjoin_probe_operator.cpp index bb869ee3257d9d..426bfcb219dc04 100644 --- a/be/src/pipeline/exec/hashjoin_probe_operator.cpp +++ b/be/src/pipeline/exec/hashjoin_probe_operator.cpp @@ -57,13 +57,11 @@ Status HashJoinProbeLocalState::init(RuntimeState* state, LocalStateInfo& info) _probe_arena_memory_usage = profile()->AddHighWaterMarkCounter("MemoryUsageProbeKeyArena", TUnit::BYTES, "", 1); // Probe phase - _probe_next_timer = ADD_TIMER(profile(), "ProbeFindNextTime"); _probe_expr_call_timer = ADD_TIMER(profile(), "ProbeExprCallTime"); _search_hashtable_timer = ADD_TIMER(profile(), "ProbeWhenSearchHashTableTime"); _build_side_output_timer = ADD_TIMER(profile(), "ProbeWhenBuildSideOutputTime"); _probe_side_output_timer = ADD_TIMER(profile(), "ProbeWhenProbeSideOutputTime"); - _probe_process_hashtable_timer = ADD_TIMER(profile(), "ProbeWhenProcessHashTableTime"); - _process_other_join_conjunct_timer = ADD_TIMER(profile(), "OtherJoinConjunctTime"); + _non_equal_join_conjuncts_timer = ADD_TIMER(profile(), "NonEqualJoinConjunctEvaluationTime"); _init_probe_side_timer = ADD_TIMER(profile(), "InitProbeSideTime"); return Status::OK(); } @@ -154,11 +152,9 @@ Status HashJoinProbeLocalState::close(RuntimeState* state) { bool HashJoinProbeLocalState::_need_probe_null_map(vectorized::Block& block, const std::vector& res_col_ids) { for (size_t i = 0; i < _probe_expr_ctxs.size(); ++i) { - if (!_shared_state->is_null_safe_eq_join[i]) { - auto column = block.get_by_position(res_col_ids[i]).column.get(); - if (check_and_get_column(*column)) { - return true; - } + const auto* column = block.get_by_position(res_col_ids[i]).column.get(); + if (column->is_nullable() && !_shared_state->serialize_null_into_key[i]) { + return true; } } return false; @@ -231,7 +227,6 @@ HashJoinProbeOperatorX::HashJoinProbeOperatorX(ObjectPool* pool, const TPlanNode Status HashJoinProbeOperatorX::pull(doris::RuntimeState* state, vectorized::Block* output_block, bool* eos) const { auto& local_state = get_local_state(state); - SCOPED_TIMER(local_state._probe_timer); if (local_state._shared_state->short_circuit_for_probe) { // If we use a short-circuit strategy, should return empty block directly. *eos = true; @@ -290,16 +285,14 @@ Status HashJoinProbeOperatorX::pull(doris::RuntimeState* state, vectorized::Bloc if (local_state._probe_index < local_state._probe_block.rows()) { DCHECK(local_state._has_set_need_null_map_for_probe); std::visit( - [&](auto&& arg, auto&& process_hashtable_ctx, auto need_null_map_for_probe, - auto ignore_null) { + [&](auto&& arg, auto&& process_hashtable_ctx, auto need_judge_null) { using HashTableProbeType = std::decay_t; if constexpr (!std::is_same_v) { using HashTableCtxType = std::decay_t; if constexpr (!std::is_same_v) { - st = process_hashtable_ctx.template process( + st = process_hashtable_ctx.template process( arg, - need_null_map_for_probe + local_state._null_map_column ? &local_state._null_map_column->get_data() : nullptr, mutable_join_block, &temp_block, @@ -314,8 +307,8 @@ Status HashJoinProbeOperatorX::pull(doris::RuntimeState* state, vectorized::Bloc }, local_state._shared_state->hash_table_variants->method_variant, *local_state._process_hashtable_ctx_variants, - vectorized::make_bool_variant(local_state._need_null_map_for_probe), - vectorized::make_bool_variant(local_state._shared_state->probe_ignore_null)); + vectorized::make_bool_variant(local_state._need_null_map_for_probe && + local_state._shared_state->probe_ignore_null)); } else if (local_state._probe_eos) { if (_is_right_semi_anti || (_is_outer_join && _join_op != TJoinOp::LEFT_OUTER_JOIN)) { std::visit( @@ -324,7 +317,7 @@ Status HashJoinProbeOperatorX::pull(doris::RuntimeState* state, vectorized::Bloc if constexpr (!std::is_same_v) { using HashTableCtxType = std::decay_t; if constexpr (!std::is_same_v) { - st = process_hashtable_ctx.process_data_in_hashtable( + st = process_hashtable_ctx.finish_probing( arg, mutable_join_block, &temp_block, eos, _is_mark_join); } else { st = Status::InternalError("uninited hash table"); @@ -383,34 +376,22 @@ Status HashJoinProbeLocalState::_extract_join_column(vectorized::Block& block, } auto& shared_state = *_shared_state; - auto& p = _parent->cast(); for (size_t i = 0; i < shared_state.build_exprs_size; ++i) { - if (p._should_convert_to_nullable[i]) { + const auto* column = block.get_by_position(res_col_ids[i]).column.get(); + if (!column->is_nullable() && shared_state.serialize_null_into_key[i]) { _key_columns_holder.emplace_back( vectorized::make_nullable(block.get_by_position(res_col_ids[i]).column)); _probe_columns[i] = _key_columns_holder.back().get(); - continue; - } - - if (shared_state.is_null_safe_eq_join[i]) { - _probe_columns[i] = block.get_by_position(res_col_ids[i]).column.get(); + } else if (const auto* nullable = check_and_get_column(*column); + nullable && !shared_state.serialize_null_into_key[i]) { + // update nulllmap and split nested out of ColumnNullable when serialize_null_into_key is false and column is nullable + const auto& col_nested = nullable->get_nested_column(); + const auto& col_nullmap = nullable->get_null_map_data(); + DCHECK(_null_map_column != nullptr); + vectorized::VectorizedUtils::update_null_map(_null_map_column->get_data(), col_nullmap); + _probe_columns[i] = &col_nested; } else { - const auto* column = block.get_by_position(res_col_ids[i]).column.get(); - if (const auto* nullable = check_and_get_column(*column)) { - const auto& col_nested = nullable->get_nested_column(); - const auto& col_nullmap = nullable->get_null_map_data(); - - DCHECK(_null_map_column != nullptr); - vectorized::VectorizedUtils::update_null_map(_null_map_column->get_data(), - col_nullmap); - if (shared_state.store_null_in_hash_table[i]) { - _probe_columns[i] = nullable; - } else { - _probe_columns[i] = &col_nested; - } - } else { - _probe_columns[i] = column; - } + _probe_columns[i] = column; } } return Status::OK(); @@ -531,20 +512,6 @@ Status HashJoinProbeOperatorX::init(const TPlanNode& tnode, RuntimeState* state) null_aware || (_probe_expr_ctxs.back()->root()->is_nullable() && probe_dispose_null); conjuncts_index++; - const bool is_null_safe_equal = eq_join_conjunct.__isset.opcode && - (eq_join_conjunct.opcode == TExprOpcode::EQ_FOR_NULL) && - (eq_join_conjunct.right.nodes[0].is_nullable || - eq_join_conjunct.left.nodes[0].is_nullable); - - /// If it's right anti join, - /// we should convert the probe to nullable if the build side is nullable. - /// And if it is 'null safe equal', - /// we must make sure the build side and the probe side are both nullable or non-nullable. - const bool should_convert_to_nullable = - (is_null_safe_equal || _join_op == TJoinOp::RIGHT_ANTI_JOIN) && - !eq_join_conjunct.left.nodes[0].is_nullable && - eq_join_conjunct.right.nodes[0].is_nullable; - _should_convert_to_nullable.emplace_back(should_convert_to_nullable); } for (size_t i = 0; i < _probe_expr_ctxs.size(); ++i) { _probe_ignore_null |= !probe_not_ignore_null[i]; diff --git a/be/src/pipeline/exec/hashjoin_probe_operator.h b/be/src/pipeline/exec/hashjoin_probe_operator.h index 917c2692b44d61..1bdb9d13347d09 100644 --- a/be/src/pipeline/exec/hashjoin_probe_operator.h +++ b/be/src/pipeline/exec/hashjoin_probe_operator.h @@ -117,14 +117,12 @@ class HashJoinProbeLocalState final std::make_unique(); RuntimeProfile::Counter* _probe_expr_call_timer = nullptr; - RuntimeProfile::Counter* _probe_next_timer = nullptr; RuntimeProfile::Counter* _probe_side_output_timer = nullptr; - RuntimeProfile::Counter* _probe_process_hashtable_timer = nullptr; RuntimeProfile::HighWaterMarkCounter* _probe_arena_memory_usage = nullptr; RuntimeProfile::Counter* _search_hashtable_timer = nullptr; RuntimeProfile::Counter* _init_probe_side_timer = nullptr; RuntimeProfile::Counter* _build_side_output_timer = nullptr; - RuntimeProfile::Counter* _process_other_join_conjunct_timer = nullptr; + RuntimeProfile::Counter* _non_equal_join_conjuncts_timer = nullptr; }; class HashJoinProbeOperatorX final : public JoinProbeOperatorX { @@ -152,9 +150,6 @@ class HashJoinProbeOperatorX final : public JoinProbeOperatorX _should_convert_to_nullable; - vectorized::DataTypes _right_table_data_types; vectorized::DataTypes _left_table_data_types; std::vector _hash_output_slot_ids; diff --git a/be/src/pipeline/exec/jdbc_table_sink_operator.cpp b/be/src/pipeline/exec/jdbc_table_sink_operator.cpp index 10fd0d8e40bf25..29c881d1c28100 100644 --- a/be/src/pipeline/exec/jdbc_table_sink_operator.cpp +++ b/be/src/pipeline/exec/jdbc_table_sink_operator.cpp @@ -47,6 +47,7 @@ Status JdbcTableSinkOperatorX::open(RuntimeState* state) { Status JdbcTableSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block, bool eos) { auto& local_state = get_local_state(state); SCOPED_TIMER(local_state.exec_time_counter()); + COUNTER_UPDATE(local_state.rows_input_counter(), (int64_t)block->rows()); RETURN_IF_ERROR(local_state.sink(state, block, eos)); return Status::OK(); } diff --git a/be/src/pipeline/exec/join/process_hash_table_probe.h b/be/src/pipeline/exec/join/process_hash_table_probe.h index 692b91f6a0120a..14e0edd977f57b 100644 --- a/be/src/pipeline/exec/join/process_hash_table_probe.h +++ b/be/src/pipeline/exec/join/process_hash_table_probe.h @@ -55,7 +55,7 @@ struct ProcessHashTableProbe { int last_probe_index, bool all_match_one, bool have_other_join_conjunct); - template + template Status process(HashTableType& hash_table_ctx, ConstNullMapPtr null_map, vectorized::MutableBlock& mutable_block, vectorized::Block* output_block, uint32_t probe_rows, bool is_mark_join, bool have_other_join_conjunct); @@ -64,8 +64,8 @@ struct ProcessHashTableProbe { // the output block struct is same with mutable block. we can do more opt on it and simplify // the logic of probe // TODO: opt the visited here to reduce the size of hash table - template + template Status do_process(HashTableType& hash_table_ctx, ConstNullMapPtr null_map, vectorized::MutableBlock& mutable_block, vectorized::Block* output_block, uint32_t probe_rows); @@ -87,9 +87,8 @@ struct ProcessHashTableProbe { // Process full outer join/ right join / right semi/anti join to output the join result // in hash table template - Status process_data_in_hashtable(HashTableType& hash_table_ctx, - vectorized::MutableBlock& mutable_block, - vectorized::Block* output_block, bool* eos, bool is_mark_join); + Status finish_probing(HashTableType& hash_table_ctx, vectorized::MutableBlock& mutable_block, + vectorized::Block* output_block, bool* eos, bool is_mark_join); /// For null aware join with other conjuncts, if the probe key of one row on left side is null, /// we should make this row match with all rows in build side. @@ -136,7 +135,7 @@ struct ProcessHashTableProbe { RuntimeProfile::Counter* _init_probe_side_timer = nullptr; RuntimeProfile::Counter* _build_side_output_timer = nullptr; RuntimeProfile::Counter* _probe_side_output_timer = nullptr; - RuntimeProfile::Counter* _probe_process_hashtable_timer = nullptr; + RuntimeProfile::Counter* _finish_probe_phase_timer = nullptr; size_t _right_col_idx; size_t _right_col_len; diff --git a/be/src/pipeline/exec/join/process_hash_table_probe_impl.h b/be/src/pipeline/exec/join/process_hash_table_probe_impl.h index 7fc639b47a4d01..231c231c81326e 100644 --- a/be/src/pipeline/exec/join/process_hash_table_probe_impl.h +++ b/be/src/pipeline/exec/join/process_hash_table_probe_impl.h @@ -56,7 +56,7 @@ ProcessHashTableProbe::ProcessHashTableProbe(HashJoinProbeLocalState _init_probe_side_timer(parent->_init_probe_side_timer), _build_side_output_timer(parent->_build_side_output_timer), _probe_side_output_timer(parent->_probe_side_output_timer), - _probe_process_hashtable_timer(parent->_probe_process_hashtable_timer), + _finish_probe_phase_timer(parent->_finish_probe_phase_timer), _right_col_idx((_is_right_semi_anti && !_have_other_join_conjunct) ? 0 : _parent->left_table_data_types().size()), @@ -187,8 +187,8 @@ typename HashTableType::State ProcessHashTableProbe::_init_probe_sid } template -template +template Status ProcessHashTableProbe::do_process(HashTableType& hash_table_ctx, vectorized::ConstNullMapPtr null_map, vectorized::MutableBlock& mutable_block, @@ -206,8 +206,8 @@ Status ProcessHashTableProbe::do_process(HashTableType& hash_table_c SCOPED_TIMER(_init_probe_side_timer); _init_probe_side( hash_table_ctx, probe_rows, with_other_conjuncts, - need_null_map_for_probe ? null_map->data() : nullptr, - need_null_map_for_probe && ignore_null && + null_map ? null_map->data() : nullptr, + need_judge_null && (JoinOpType == doris::TJoinOp::LEFT_ANTI_JOIN || JoinOpType == doris::TJoinOp::LEFT_SEMI_JOIN || JoinOpType == doris::TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN || @@ -255,14 +255,12 @@ Status ProcessHashTableProbe::do_process(HashTableType& hash_table_c } } else { SCOPED_TIMER(_search_hashtable_timer); - auto [new_probe_idx, new_build_idx, - new_current_offset] = hash_table_ctx.hash_table->template find_batch < JoinOpType, - with_other_conjuncts, is_mark_join, - need_null_map_for_probe && - ignore_null > (hash_table_ctx.keys, hash_table_ctx.bucket_nums.data(), - probe_index, build_index, cast_set(probe_rows), - _probe_indexs.data(), _probe_visited, _build_indexs.data(), - has_mark_join_conjunct); + auto [new_probe_idx, new_build_idx, new_current_offset] = + hash_table_ctx.hash_table->template find_batch( + hash_table_ctx.keys, hash_table_ctx.bucket_nums.data(), probe_index, + build_index, cast_set(probe_rows), _probe_indexs.data(), + _probe_visited, _build_indexs.data(), has_mark_join_conjunct); probe_index = new_probe_idx; build_index = new_build_idx; current_offset = new_current_offset; @@ -504,7 +502,7 @@ Status ProcessHashTableProbe::do_other_join_conjuncts(vectorized::Bl return Status::OK(); } - SCOPED_TIMER(_parent->_process_other_join_conjunct_timer); + SCOPED_TIMER(_parent->_non_equal_join_conjuncts_timer); size_t orig_columns = output_block->columns(); vectorized::IColumn::Filter other_conjunct_filter(row_count, 1); { @@ -619,10 +617,11 @@ Status ProcessHashTableProbe::do_other_join_conjuncts(vectorized::Bl template template -Status ProcessHashTableProbe::process_data_in_hashtable( - HashTableType& hash_table_ctx, vectorized::MutableBlock& mutable_block, - vectorized::Block* output_block, bool* eos, bool is_mark_join) { - SCOPED_TIMER(_probe_process_hashtable_timer); +Status ProcessHashTableProbe::finish_probing(HashTableType& hash_table_ctx, + vectorized::MutableBlock& mutable_block, + vectorized::Block* output_block, bool* eos, + bool is_mark_join) { + SCOPED_TIMER(_finish_probe_phase_timer); auto& mcol = mutable_block.mutable_columns(); if (is_mark_join) { std::unique_ptr mark_column = @@ -675,7 +674,7 @@ Status ProcessHashTableProbe::process_data_in_hashtable( } template -template +template Status ProcessHashTableProbe::process(HashTableType& hash_table_ctx, vectorized::ConstNullMapPtr null_map, vectorized::MutableBlock& mutable_block, @@ -685,9 +684,9 @@ Status ProcessHashTableProbe::process(HashTableType& hash_table_ctx, Status res; std::visit( [&](auto is_mark_join, auto have_other_join_conjunct) { - res = do_process( - hash_table_ctx, null_map, mutable_block, output_block, probe_rows); + res = do_process(hash_table_ctx, null_map, mutable_block, + output_block, probe_rows); }, vectorized::make_bool_variant(is_mark_join), vectorized::make_bool_variant(have_other_join_conjunct)); @@ -703,29 +702,15 @@ struct ExtractType { }; #define INSTANTIATION(JoinOpType, T) \ - template Status \ - ProcessHashTableProbe::process::Type>( \ + template Status ProcessHashTableProbe::process::Type>( \ ExtractType::Type & hash_table_ctx, vectorized::ConstNullMapPtr null_map, \ vectorized::MutableBlock & mutable_block, vectorized::Block * output_block, \ uint32_t probe_rows, bool is_mark_join, bool have_other_join_conjunct); \ - template Status \ - ProcessHashTableProbe::process::Type>( \ + template Status ProcessHashTableProbe::process::Type>( \ ExtractType::Type & hash_table_ctx, vectorized::ConstNullMapPtr null_map, \ vectorized::MutableBlock & mutable_block, vectorized::Block * output_block, \ uint32_t probe_rows, bool is_mark_join, bool have_other_join_conjunct); \ - template Status \ - ProcessHashTableProbe::process::Type>( \ - ExtractType::Type & hash_table_ctx, vectorized::ConstNullMapPtr null_map, \ - vectorized::MutableBlock & mutable_block, vectorized::Block * output_block, \ - uint32_t probe_rows, bool is_mark_join, bool have_other_join_conjunct); \ - template Status \ - ProcessHashTableProbe::process::Type>( \ - ExtractType::Type & hash_table_ctx, vectorized::ConstNullMapPtr null_map, \ - vectorized::MutableBlock & mutable_block, vectorized::Block * output_block, \ - uint32_t probe_rows, bool is_mark_join, bool have_other_join_conjunct); \ - \ - template Status \ - ProcessHashTableProbe::process_data_in_hashtable::Type>( \ + template Status ProcessHashTableProbe::finish_probing::Type>( \ ExtractType::Type & hash_table_ctx, vectorized::MutableBlock & mutable_block, \ vectorized::Block * output_block, bool* eos, bool is_mark_join); diff --git a/be/src/pipeline/exec/join_build_sink_operator.cpp b/be/src/pipeline/exec/join_build_sink_operator.cpp index fc0d3b8746077b..8b3f5cd98ff7c0 100644 --- a/be/src/pipeline/exec/join_build_sink_operator.cpp +++ b/be/src/pipeline/exec/join_build_sink_operator.cpp @@ -33,15 +33,11 @@ Status JoinBuildSinkLocalState::init(RuntimeState* stat PipelineXSinkLocalState::profile()->add_info_string("JoinType", to_string(p._join_op)); - _build_rows_counter = ADD_COUNTER(PipelineXSinkLocalState::profile(), - "BuildRows", TUnit::UNIT); _publish_runtime_filter_timer = ADD_TIMER(PipelineXSinkLocalState::profile(), "PublishRuntimeFilterTime"); - _runtime_filter_compute_timer = ADD_TIMER(PipelineXSinkLocalState::profile(), - "RuntimeFilterComputeTime"); - _runtime_filter_init_timer = - ADD_TIMER(PipelineXSinkLocalState::profile(), "RuntimeFilterInitTime"); + _runtime_filter_compute_timer = + ADD_TIMER(PipelineXSinkLocalState::profile(), "BuildRuntimeFilterTime"); return Status::OK(); } diff --git a/be/src/pipeline/exec/join_build_sink_operator.h b/be/src/pipeline/exec/join_build_sink_operator.h index 714e0c34190678..9d79a97397ff77 100644 --- a/be/src/pipeline/exec/join_build_sink_operator.h +++ b/be/src/pipeline/exec/join_build_sink_operator.h @@ -39,10 +39,8 @@ class JoinBuildSinkLocalState : public PipelineXSinkLocalState template friend class JoinBuildSinkOperatorX; - RuntimeProfile::Counter* _build_rows_counter = nullptr; RuntimeProfile::Counter* _publish_runtime_filter_timer = nullptr; RuntimeProfile::Counter* _runtime_filter_compute_timer = nullptr; - RuntimeProfile::Counter* _runtime_filter_init_timer = nullptr; std::vector> _runtime_filters; }; diff --git a/be/src/pipeline/exec/join_probe_operator.cpp b/be/src/pipeline/exec/join_probe_operator.cpp index 76dc75a90d8f3c..11b5b29c8b556b 100644 --- a/be/src/pipeline/exec/join_probe_operator.cpp +++ b/be/src/pipeline/exec/join_probe_operator.cpp @@ -29,11 +29,10 @@ Status JoinProbeLocalState::init(RuntimeState* state, LocalStateInfo& info) { RETURN_IF_ERROR(Base::init(state, info)); - _probe_timer = ADD_TIMER(Base::profile(), "ProbeTime"); _join_filter_timer = ADD_TIMER(Base::profile(), "JoinFilterTimer"); _build_output_block_timer = ADD_TIMER(Base::profile(), "BuildOutputBlock"); _probe_rows_counter = ADD_COUNTER_WITH_LEVEL(Base::profile(), "ProbeRows", TUnit::UNIT, 1); - + _finish_probe_phase_timer = ADD_TIMER(Base::profile(), "FinishProbePhaseTime"); return Status::OK(); } diff --git a/be/src/pipeline/exec/join_probe_operator.h b/be/src/pipeline/exec/join_probe_operator.h index 3f68c73d04b161..078806cea4fc5a 100644 --- a/be/src/pipeline/exec/join_probe_operator.h +++ b/be/src/pipeline/exec/join_probe_operator.h @@ -49,10 +49,10 @@ class JoinProbeLocalState : public PipelineXLocalState { size_t _mark_column_id = -1; - RuntimeProfile::Counter* _probe_timer = nullptr; RuntimeProfile::Counter* _probe_rows_counter = nullptr; RuntimeProfile::Counter* _join_filter_timer = nullptr; RuntimeProfile::Counter* _build_output_block_timer = nullptr; + RuntimeProfile::Counter* _finish_probe_phase_timer = nullptr; std::unique_ptr _child_block = nullptr; bool _child_eos = false; diff --git a/be/src/pipeline/exec/memory_scratch_sink_operator.cpp b/be/src/pipeline/exec/memory_scratch_sink_operator.cpp index 1d022f9304fd0d..2c69c0e2b2ba9f 100644 --- a/be/src/pipeline/exec/memory_scratch_sink_operator.cpp +++ b/be/src/pipeline/exec/memory_scratch_sink_operator.cpp @@ -33,6 +33,9 @@ Status MemoryScratchSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + _get_arrow_schema_timer = ADD_TIMER(_profile, "GetArrowSchemaTime"); + _convert_block_to_arrow_batch_timer = ADD_TIMER(_profile, "ConvertBlockToArrowBatchTime"); + _evaluation_timer = ADD_TIMER(_profile, "EvaluationTime"); // create queue state->exec_env()->result_queue_mgr()->create_queue(state->fragment_instance_id(), &_queue); @@ -92,13 +95,22 @@ Status MemoryScratchSinkOperatorX::sink(RuntimeState* state, vectorized::Block* // Exec vectorized expr here to speed up, block.rows() == 0 means expr exec // failed, just return the error status vectorized::Block block; - RETURN_IF_ERROR(vectorized::VExprContext::get_output_block_after_execute_exprs( - local_state._output_vexpr_ctxs, *input_block, &block)); + { + SCOPED_TIMER(local_state._evaluation_timer); + RETURN_IF_ERROR(vectorized::VExprContext::get_output_block_after_execute_exprs( + local_state._output_vexpr_ctxs, *input_block, &block)); + } std::shared_ptr block_arrow_schema; - // After expr executed, use recaculated schema as final schema - RETURN_IF_ERROR(convert_block_arrow_schema(block, &block_arrow_schema, state->timezone())); - RETURN_IF_ERROR(convert_to_arrow_batch(block, block_arrow_schema, arrow::default_memory_pool(), - &result, _timezone_obj)); + { + SCOPED_TIMER(local_state._get_arrow_schema_timer); + // After expr executed, use recaculated schema as final schema + RETURN_IF_ERROR(get_arrow_schema(block, &block_arrow_schema, state->timezone())); + } + { + SCOPED_TIMER(local_state._convert_block_to_arrow_batch_timer); + RETURN_IF_ERROR(convert_to_arrow_batch( + block, block_arrow_schema, arrow::default_memory_pool(), &result, _timezone_obj)); + } local_state._queue->blocking_put(result); if (local_state._queue->size() > config::max_memory_sink_batch_count) { local_state._queue_dependency->block(); diff --git a/be/src/pipeline/exec/memory_scratch_sink_operator.h b/be/src/pipeline/exec/memory_scratch_sink_operator.h index 69c0fa14042ef2..c74659d15b96f2 100644 --- a/be/src/pipeline/exec/memory_scratch_sink_operator.h +++ b/be/src/pipeline/exec/memory_scratch_sink_operator.h @@ -45,6 +45,9 @@ class MemoryScratchSinkLocalState final : public PipelineXSinkLocalState _queue_dependency = nullptr; + RuntimeProfile::Counter* _get_arrow_schema_timer = nullptr; + RuntimeProfile::Counter* _convert_block_to_arrow_batch_timer = nullptr; + RuntimeProfile::Counter* _evaluation_timer = nullptr; }; class MemoryScratchSinkOperatorX final : public DataSinkOperatorX { diff --git a/be/src/pipeline/exec/multi_cast_data_stream_source.cpp b/be/src/pipeline/exec/multi_cast_data_stream_source.cpp index 71204f1285ce7b..e45e59d17e27b3 100644 --- a/be/src/pipeline/exec/multi_cast_data_stream_source.cpp +++ b/be/src/pipeline/exec/multi_cast_data_stream_source.cpp @@ -40,6 +40,9 @@ Status MultiCastDataStreamSourceLocalState::init(RuntimeState* state, LocalState auto& p = _parent->cast(); _shared_state->multi_cast_data_streamer->set_dep_by_sender_idx(p._consumer_id, _dependency); _wait_for_rf_timer = ADD_TIMER(_runtime_profile, "WaitForRuntimeFilter"); + _filter_timer = ADD_TIMER(_runtime_profile, "FilterTime"); + _get_data_timer = ADD_TIMER(_runtime_profile, "GetDataTime"); + _materialize_data_timer = ADD_TIMER(_runtime_profile, "MaterializeDataTime"); // init profile for runtime filter RuntimeFilterConsumer::_init_profile(profile()); init_runtime_filter_dependency(_filter_dependencies, p.operator_id(), p.node_id(), @@ -86,15 +89,19 @@ Status MultiCastDataStreamerSourceOperatorX::get_block(RuntimeState* state, if (!local_state._output_expr_contexts.empty()) { output_block = &tmp_block; } - RETURN_IF_ERROR(local_state._shared_state->multi_cast_data_streamer->pull(_consumer_id, - output_block, eos)); - + { + SCOPED_TIMER(local_state._get_data_timer); + RETURN_IF_ERROR(local_state._shared_state->multi_cast_data_streamer->pull( + _consumer_id, output_block, eos)); + } if (!local_state._conjuncts.empty()) { + SCOPED_TIMER(local_state._filter_timer); RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, output_block, output_block->columns())); } if (!local_state._output_expr_contexts.empty() && output_block->rows() > 0) { + SCOPED_TIMER(local_state._materialize_data_timer); RETURN_IF_ERROR(vectorized::VExprContext::get_output_block_after_execute_exprs( local_state._output_expr_contexts, *output_block, block, true)); vectorized::materialize_block_inplace(*block); diff --git a/be/src/pipeline/exec/multi_cast_data_stream_source.h b/be/src/pipeline/exec/multi_cast_data_stream_source.h index 2059f706cad3f5..57410bf8d9568a 100644 --- a/be/src/pipeline/exec/multi_cast_data_stream_source.h +++ b/be/src/pipeline/exec/multi_cast_data_stream_source.h @@ -68,6 +68,9 @@ class MultiCastDataStreamSourceLocalState final : public PipelineXLocalState> _filter_dependencies; RuntimeProfile::Counter* _wait_for_rf_timer = nullptr; + RuntimeProfile::Counter* _filter_timer = nullptr; + RuntimeProfile::Counter* _get_data_timer = nullptr; + RuntimeProfile::Counter* _materialize_data_timer = nullptr; }; class MultiCastDataStreamerSourceOperatorX final diff --git a/be/src/pipeline/exec/nested_loop_join_build_operator.cpp b/be/src/pipeline/exec/nested_loop_join_build_operator.cpp index 59020a5df437bd..83b378e792c3fa 100644 --- a/be/src/pipeline/exec/nested_loop_join_build_operator.cpp +++ b/be/src/pipeline/exec/nested_loop_join_build_operator.cpp @@ -139,7 +139,6 @@ Status NestedLoopJoinBuildSinkOperatorX::sink(doris::RuntimeState* state, vector } if (eos) { - COUNTER_UPDATE(local_state._build_rows_counter, local_state._build_rows); RuntimeFilterBuild rf_ctx(&local_state); RETURN_IF_ERROR(rf_ctx(state)); diff --git a/be/src/pipeline/exec/nested_loop_join_build_operator.h b/be/src/pipeline/exec/nested_loop_join_build_operator.h index f2ca259754b661..d6e72799f97d92 100644 --- a/be/src/pipeline/exec/nested_loop_join_build_operator.h +++ b/be/src/pipeline/exec/nested_loop_join_build_operator.h @@ -76,8 +76,8 @@ class NestedLoopJoinBuildSinkOperatorX final if (_join_op == TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN) { return {ExchangeType::NOOP}; } - return _child->ignore_data_distribution() ? DataDistribution(ExchangeType::BROADCAST) - : DataDistribution(ExchangeType::NOOP); + return _child->is_serial_operator() ? DataDistribution(ExchangeType::BROADCAST) + : DataDistribution(ExchangeType::NOOP); } private: diff --git a/be/src/pipeline/exec/nested_loop_join_probe_operator.cpp b/be/src/pipeline/exec/nested_loop_join_probe_operator.cpp index d0fb4ee19a5824..afa1a2e59b798c 100644 --- a/be/src/pipeline/exec/nested_loop_join_probe_operator.cpp +++ b/be/src/pipeline/exec/nested_loop_join_probe_operator.cpp @@ -43,6 +43,10 @@ Status NestedLoopJoinProbeLocalState::init(RuntimeState* state, LocalStateInfo& SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); _loop_join_timer = ADD_TIMER(profile(), "LoopGenerateJoin"); + _output_temp_blocks_timer = ADD_TIMER(profile(), "OutputTempBlocksTime"); + _update_visited_flags_timer = ADD_TIMER(profile(), "UpdateVisitedFlagsTime"); + _join_conjuncts_evaluation_timer = ADD_TIMER(profile(), "JoinConjunctsEvaluationTime"); + _filtered_by_join_conjuncts_timer = ADD_TIMER(profile(), "FilteredByJoinConjunctsTime"); return Status::OK(); } @@ -168,23 +172,26 @@ Status NestedLoopJoinProbeLocalState::generate_join_block_data(RuntimeState* sta _process_left_child_block(_join_block, now_process_build_block); } - if constexpr (set_probe_side_flag) { - RETURN_IF_ERROR( - (_do_filtering_and_update_visited_flags( - &_join_block, !p._is_left_semi_anti))); - _update_additional_flags(&_join_block); - // If this join operation is left outer join or full outer join, when - // `_left_side_process_count`, means all rows from build - // side have been joined with _left_side_process_count, we should output current - // probe row with null from build side. - if (_left_side_process_count) { - _finalize_current_phase( - _join_block, state->batch_size()); + { + SCOPED_TIMER(_finish_probe_phase_timer); + if constexpr (set_probe_side_flag) { + RETURN_IF_ERROR( + (_do_filtering_and_update_visited_flags( + &_join_block, !p._is_left_semi_anti))); + _update_additional_flags(&_join_block); + // If this join operation is left outer join or full outer join, when + // `_left_side_process_count`, means all rows from build + // side have been joined with _left_side_process_count, we should output current + // probe row with null from build side. + if (_left_side_process_count) { + _finalize_current_phase( + _join_block, state->batch_size()); + } + } else if (_left_side_process_count && p._is_mark_join && + _shared_state->build_blocks.empty()) { + _append_left_data_with_null(_join_block); } - } else if (_left_side_process_count && p._is_mark_join && - _shared_state->build_blocks.empty()) { - _append_left_data_with_null(_join_block); } } @@ -377,6 +384,7 @@ void NestedLoopJoinProbeLocalState::_append_left_data_with_null(vectorized::Bloc void NestedLoopJoinProbeLocalState::_process_left_child_block( vectorized::Block& block, const vectorized::Block& now_process_build_block) const { + SCOPED_TIMER(_output_temp_blocks_timer); auto& p = _parent->cast(); auto dst_columns = block.mutate_columns(); const size_t max_added_rows = now_process_build_block.rows(); @@ -485,6 +493,7 @@ Status NestedLoopJoinProbeOperatorX::push(doris::RuntimeState* state, vectorized set_build_side_flag, set_probe_side_flag>( state, join_op_variants); }; + SCOPED_TIMER(local_state._loop_join_timer); RETURN_IF_ERROR( std::visit(func, local_state._shared_state->join_op_variants, vectorized::make_bool_variant(_match_all_build || _is_right_semi_anti), diff --git a/be/src/pipeline/exec/nested_loop_join_probe_operator.h b/be/src/pipeline/exec/nested_loop_join_probe_operator.h index 5b0fec159e28bf..c744e6acdc507e 100644 --- a/be/src/pipeline/exec/nested_loop_join_probe_operator.h +++ b/be/src/pipeline/exec/nested_loop_join_probe_operator.h @@ -68,42 +68,48 @@ class NestedLoopJoinProbeLocalState final size_t build_block_idx, size_t processed_blocks_num, bool materialize, Filter& filter) { - if constexpr (SetBuildSideFlag) { - for (size_t i = 0; i < processed_blocks_num; i++) { - auto& build_side_flag = - assert_cast( - _shared_state->build_side_visited_flags[build_block_idx].get()) - ->get_data(); - auto* __restrict build_side_flag_data = build_side_flag.data(); - auto cur_sz = build_side_flag.size(); - const size_t offset = _build_offset_stack.top(); - _build_offset_stack.pop(); - for (size_t j = 0; j < cur_sz; j++) { - build_side_flag_data[j] |= filter[offset + j]; + { + SCOPED_TIMER(_update_visited_flags_timer); + if constexpr (SetBuildSideFlag) { + for (size_t i = 0; i < processed_blocks_num; i++) { + auto& build_side_flag = + assert_cast( + _shared_state->build_side_visited_flags[build_block_idx].get()) + ->get_data(); + auto* __restrict build_side_flag_data = build_side_flag.data(); + auto cur_sz = build_side_flag.size(); + const size_t offset = _build_offset_stack.top(); + _build_offset_stack.pop(); + for (size_t j = 0; j < cur_sz; j++) { + build_side_flag_data[j] |= filter[offset + j]; + } + build_block_idx = build_block_idx == 0 ? _shared_state->build_blocks.size() - 1 + : build_block_idx - 1; } - build_block_idx = build_block_idx == 0 ? _shared_state->build_blocks.size() - 1 - : build_block_idx - 1; } - } - if constexpr (SetProbeSideFlag) { - int64_t end = filter.size(); - for (int i = _left_block_pos == _child_block->rows() ? _left_block_pos - 1 - : _left_block_pos; - i >= _left_block_start_pos; i--) { - int64_t offset = 0; - if (!_probe_offset_stack.empty()) { - offset = _probe_offset_stack.top(); - _probe_offset_stack.pop(); - } - if (!_cur_probe_row_visited_flags[i]) { - _cur_probe_row_visited_flags[i] = - simd::contain_byte(filter.data() + offset, end - offset, 1) ? 1 - : 0; + if constexpr (SetProbeSideFlag) { + int64_t end = filter.size(); + for (int i = _left_block_pos == _child_block->rows() ? _left_block_pos - 1 + : _left_block_pos; + i >= _left_block_start_pos; i--) { + int64_t offset = 0; + if (!_probe_offset_stack.empty()) { + offset = _probe_offset_stack.top(); + _probe_offset_stack.pop(); + } + if (!_cur_probe_row_visited_flags[i]) { + _cur_probe_row_visited_flags[i] = + simd::contain_byte(filter.data() + offset, end - offset, 1) + ? 1 + : 0; + } + end = offset; } - end = offset; } } + if (materialize) { + SCOPED_TIMER(_filtered_by_join_conjuncts_timer); vectorized::Block::filter_block_internal(block, filter, column_to_keep); } else { CLEAR_BLOCK @@ -125,8 +131,11 @@ class NestedLoopJoinProbeLocalState final if (LIKELY(!_join_conjuncts.empty() && block->rows() > 0)) { vectorized::IColumn::Filter filter(block->rows(), 1); bool can_filter_all = false; - RETURN_IF_ERROR(vectorized::VExprContext::execute_conjuncts( - _join_conjuncts, nullptr, IgnoreNull, block, &filter, &can_filter_all)); + { + SCOPED_TIMER(_join_conjuncts_evaluation_timer); + RETURN_IF_ERROR(vectorized::VExprContext::execute_conjuncts( + _join_conjuncts, nullptr, IgnoreNull, block, &filter, &can_filter_all)); + } if (can_filter_all) { CLEAR_BLOCK @@ -185,6 +194,10 @@ class NestedLoopJoinProbeLocalState final vectorized::VExprContextSPtrs _join_conjuncts; RuntimeProfile::Counter* _loop_join_timer = nullptr; + RuntimeProfile::Counter* _output_temp_blocks_timer = nullptr; + RuntimeProfile::Counter* _update_visited_flags_timer = nullptr; + RuntimeProfile::Counter* _join_conjuncts_evaluation_timer = nullptr; + RuntimeProfile::Counter* _filtered_by_join_conjuncts_timer = nullptr; }; class NestedLoopJoinProbeOperatorX final diff --git a/be/src/pipeline/exec/olap_scan_operator.cpp b/be/src/pipeline/exec/olap_scan_operator.cpp index 0d1cb362ea00bd..124f2d1c70ec93 100644 --- a/be/src/pipeline/exec/olap_scan_operator.cpp +++ b/be/src/pipeline/exec/olap_scan_operator.cpp @@ -43,6 +43,9 @@ namespace doris::pipeline { Status OlapScanLocalState::_init_profile() { RETURN_IF_ERROR(ScanLocalState::_init_profile()); + // Rows read from storage. + // Include the rows read from doris page cache. + _scan_rows = ADD_COUNTER(_runtime_profile, "ScanRows", TUnit::UNIT); // 1. init segment profile _segment_profile.reset(new RuntimeProfile("SegmentIterator")); _scanner_profile->add_child(_segment_profile.get(), true, nullptr); @@ -58,22 +61,20 @@ Status OlapScanLocalState::_init_profile() { _block_load_counter = ADD_COUNTER(_segment_profile, "BlocksLoad", TUnit::UNIT); _block_fetch_timer = ADD_TIMER(_scanner_profile, "BlockFetchTime"); _delete_bitmap_get_agg_timer = ADD_TIMER(_scanner_profile, "DeleteBitmapGetAggTime"); - _sync_rowset_timer = ADD_TIMER(_scanner_profile, "SyncRowsetTime"); - _block_convert_timer = ADD_TIMER(_scanner_profile, "BlockConvertTime"); + if (config::is_cloud_mode()) { + _sync_rowset_timer = ADD_TIMER(_scanner_profile, "SyncRowsetTime"); + } _block_init_timer = ADD_TIMER(_segment_profile, "BlockInitTime"); _block_init_seek_timer = ADD_TIMER(_segment_profile, "BlockInitSeekTime"); _block_init_seek_counter = ADD_COUNTER(_segment_profile, "BlockInitSeekCount", TUnit::UNIT); - _block_conditions_filtered_timer = ADD_TIMER(_segment_profile, "BlockConditionsFilteredTime"); - _block_conditions_filtered_bf_timer = - ADD_TIMER(_segment_profile, "BlockConditionsFilteredBloomFilterTime"); + _segment_generate_row_range_timer = ADD_TIMER(_segment_profile, "GenerateRowRangeTime"); + _segment_generate_row_range_by_bf_timer = + ADD_TIMER(_segment_profile, "GenerateRowRangeByBloomFilterIndexTime"); _collect_iterator_merge_next_timer = ADD_TIMER(_segment_profile, "CollectIteratorMergeTime"); - _collect_iterator_normal_next_timer = ADD_TIMER(_segment_profile, "CollectIteratorNormalTime"); - _block_conditions_filtered_zonemap_timer = - ADD_TIMER(_segment_profile, "BlockConditionsFilteredZonemapTime"); - _block_conditions_filtered_zonemap_rp_timer = - ADD_TIMER(_segment_profile, "BlockConditionsFilteredZonemapRuntimePredicateTime"); - _block_conditions_filtered_dict_timer = - ADD_TIMER(_segment_profile, "BlockConditionsFilteredDictTime"); + _segment_generate_row_range_by_zonemap_timer = + ADD_TIMER(_segment_profile, "GenerateRowRangeByZoneMapIndexTime"); + _segment_generate_row_range_by_dict_timer = + ADD_TIMER(_segment_profile, "GenerateRowRangeByDictTime"); _rows_vec_cond_filtered_counter = ADD_COUNTER(_segment_profile, "RowsVectorPredFiltered", TUnit::UNIT); @@ -86,10 +87,11 @@ Status OlapScanLocalState::_init_profile() { _vec_cond_timer = ADD_TIMER(_segment_profile, "VectorPredEvalTime"); _short_cond_timer = ADD_TIMER(_segment_profile, "ShortPredEvalTime"); _expr_filter_timer = ADD_TIMER(_segment_profile, "ExprFilterEvalTime"); - _first_read_timer = ADD_TIMER(_segment_profile, "FirstReadTime"); - _second_read_timer = ADD_TIMER(_segment_profile, "SecondReadTime"); - _first_read_seek_timer = ADD_TIMER(_segment_profile, "FirstReadSeekTime"); - _first_read_seek_counter = ADD_COUNTER(_segment_profile, "FirstReadSeekCount", TUnit::UNIT); + _predicate_column_read_timer = ADD_TIMER(_segment_profile, "PredicateColumnReadTime"); + _non_predicate_column_read_timer = ADD_TIMER(_segment_profile, "NonPredicateColumnReadTime"); + _predicate_column_read_seek_timer = ADD_TIMER(_segment_profile, "PredicateColumnReadSeekTime"); + _predicate_column_read_seek_counter = + ADD_COUNTER(_segment_profile, "PredicateColumnReadSeekCount", TUnit::UNIT); _lazy_read_timer = ADD_TIMER(_segment_profile, "LazyReadTime"); _lazy_read_seek_timer = ADD_TIMER(_segment_profile, "LazyReadSeekTime"); @@ -99,7 +101,7 @@ Status OlapScanLocalState::_init_profile() { _stats_filtered_counter = ADD_COUNTER(_segment_profile, "RowsStatsFiltered", TUnit::UNIT); _stats_rp_filtered_counter = - ADD_COUNTER(_segment_profile, "RowsZonemapRuntimePredicateFiltered", TUnit::UNIT); + ADD_COUNTER(_segment_profile, "RowsZoneMapRuntimePredicateFiltered", TUnit::UNIT); _bf_filtered_counter = ADD_COUNTER(_segment_profile, "RowsBloomFilterFiltered", TUnit::UNIT); _dict_filtered_counter = ADD_COUNTER(_segment_profile, "RowsDictFiltered", TUnit::UNIT); _del_filtered_counter = ADD_COUNTER(_scanner_profile, "RowsDelFiltered", TUnit::UNIT); @@ -130,8 +132,6 @@ Status OlapScanLocalState::_init_profile() { ADD_TIMER(_segment_profile, "InvertedIndexQueryNullBitmapTime"); _inverted_index_query_bitmap_copy_timer = ADD_TIMER(_segment_profile, "InvertedIndexQueryBitmapCopyTime"); - _inverted_index_query_bitmap_op_timer = - ADD_TIMER(_segment_profile, "InvertedIndexQueryBitmapOpTime"); _inverted_index_searcher_open_timer = ADD_TIMER(_segment_profile, "InvertedIndexSearcherOpenTime"); _inverted_index_searcher_search_timer = @@ -143,7 +143,7 @@ Status OlapScanLocalState::_init_profile() { _inverted_index_downgrade_count_counter = ADD_COUNTER(_segment_profile, "InvertedIndexDowngradeCount", TUnit::UNIT); - _output_index_result_column_timer = ADD_TIMER(_segment_profile, "OutputIndexResultColumnTimer"); + _output_index_result_column_timer = ADD_TIMER(_segment_profile, "OutputIndexResultColumnTime"); _filtered_segment_counter = ADD_COUNTER(_segment_profile, "NumSegmentFiltered", TUnit::UNIT); _total_segment_counter = ADD_COUNTER(_segment_profile, "NumSegmentTotal", TUnit::UNIT); _tablet_counter = ADD_COUNTER(_runtime_profile, "TabletNum", TUnit::UNIT); @@ -278,8 +278,9 @@ Status OlapScanLocalState::_init_scanners(std::list* s scan_range->version.data() + scan_range->version.size(), version); tablets.emplace_back(std::move(tablet), version); } - int64_t duration_ns = 0; + if (config::is_cloud_mode()) { + int64_t duration_ns = 0; SCOPED_RAW_TIMER(&duration_ns); std::vector> tasks; tasks.reserve(_scan_ranges.size()); @@ -289,8 +290,8 @@ Status OlapScanLocalState::_init_scanners(std::list* s }); } RETURN_IF_ERROR(cloud::bthread_fork_join(tasks, 10)); + _sync_rowset_timer->update(duration_ns); } - _sync_rowset_timer->update(duration_ns); if (enable_parallel_scan && !p._should_run_serial && !has_cpu_limit && p._push_down_agg_type == TPushAggOp::NONE && @@ -331,25 +332,6 @@ Status OlapScanLocalState::_init_scanners(std::list* s int scanners_per_tablet = std::max(1, 64 / (int)_scan_ranges.size()); - auto build_new_scanner = [&](BaseTabletSPtr tablet, int64_t version, - const std::vector& key_ranges) { - COUNTER_UPDATE(_key_range_counter, key_ranges.size()); - auto scanner = vectorized::NewOlapScanner::create_shared( - this, vectorized::NewOlapScanner::Params { - state(), - _scanner_profile.get(), - key_ranges, - std::move(tablet), - version, - {}, - p._limit, - p._olap_scan_node.is_preaggregation, - }); - RETURN_IF_ERROR(scanner->prepare(state(), _conjuncts)); - scanners->push_back(std::move(scanner)); - return Status::OK(); - }; - for (auto& scan_range : _scan_ranges) { auto tablet = DORIS_TRY(ExecEnv::get_tablet(scan_range->tablet_id)); int64_t version = 0; @@ -375,7 +357,21 @@ Status OlapScanLocalState::_init_scanners(std::list* s ++j, ++i) { scanner_ranges.push_back((*ranges)[i].get()); } - RETURN_IF_ERROR(build_new_scanner(tablet, version, scanner_ranges)); + + COUNTER_UPDATE(_key_range_counter, scanner_ranges.size()); + auto scanner = vectorized::NewOlapScanner::create_shared( + this, vectorized::NewOlapScanner::Params { + state(), + _scanner_profile.get(), + scanner_ranges, + std::move(tablet), + version, + {}, + p._limit, + p._olap_scan_node.is_preaggregation, + }); + RETURN_IF_ERROR(scanner->prepare(state(), _conjuncts)); + scanners->push_back(std::move(scanner)); } } diff --git a/be/src/pipeline/exec/olap_scan_operator.h b/be/src/pipeline/exec/olap_scan_operator.h index c972c7ce99a288..9e8624b3a0b255 100644 --- a/be/src/pipeline/exec/olap_scan_operator.h +++ b/be/src/pipeline/exec/olap_scan_operator.h @@ -97,11 +97,8 @@ class OlapScanLocalState final : public ScanLocalState { std::unique_ptr _segment_profile; - RuntimeProfile::Counter* _num_disks_accessed_counter = nullptr; - RuntimeProfile::Counter* _tablet_counter = nullptr; RuntimeProfile::Counter* _key_range_counter = nullptr; - RuntimeProfile::Counter* _rows_pushed_cond_filtered_counter = nullptr; RuntimeProfile::Counter* _reader_init_timer = nullptr; RuntimeProfile::Counter* _scanner_init_timer = nullptr; RuntimeProfile::Counter* _process_conjunct_timer = nullptr; @@ -139,23 +136,19 @@ class OlapScanLocalState final : public ScanLocalState { RuntimeProfile::Counter* _block_init_timer = nullptr; RuntimeProfile::Counter* _block_init_seek_timer = nullptr; RuntimeProfile::Counter* _block_init_seek_counter = nullptr; - RuntimeProfile::Counter* _block_conditions_filtered_timer = nullptr; - RuntimeProfile::Counter* _block_conditions_filtered_bf_timer = nullptr; + RuntimeProfile::Counter* _segment_generate_row_range_timer = nullptr; + RuntimeProfile::Counter* _segment_generate_row_range_by_bf_timer = nullptr; RuntimeProfile::Counter* _collect_iterator_merge_next_timer = nullptr; - RuntimeProfile::Counter* _collect_iterator_normal_next_timer = nullptr; - RuntimeProfile::Counter* _block_conditions_filtered_zonemap_timer = nullptr; - RuntimeProfile::Counter* _block_conditions_filtered_zonemap_rp_timer = nullptr; - RuntimeProfile::Counter* _block_conditions_filtered_dict_timer = nullptr; - RuntimeProfile::Counter* _first_read_timer = nullptr; - RuntimeProfile::Counter* _second_read_timer = nullptr; - RuntimeProfile::Counter* _first_read_seek_timer = nullptr; - RuntimeProfile::Counter* _first_read_seek_counter = nullptr; + RuntimeProfile::Counter* _segment_generate_row_range_by_zonemap_timer = nullptr; + RuntimeProfile::Counter* _segment_generate_row_range_by_dict_timer = nullptr; + RuntimeProfile::Counter* _predicate_column_read_timer = nullptr; + RuntimeProfile::Counter* _non_predicate_column_read_timer = nullptr; + RuntimeProfile::Counter* _predicate_column_read_seek_timer = nullptr; + RuntimeProfile::Counter* _predicate_column_read_seek_counter = nullptr; RuntimeProfile::Counter* _lazy_read_timer = nullptr; RuntimeProfile::Counter* _lazy_read_seek_timer = nullptr; RuntimeProfile::Counter* _lazy_read_seek_counter = nullptr; - RuntimeProfile::Counter* _block_convert_timer = nullptr; - // total pages read // used by segment v2 RuntimeProfile::Counter* _total_pages_num_counter = nullptr; @@ -175,7 +168,6 @@ class OlapScanLocalState final : public ScanLocalState { RuntimeProfile::Counter* _inverted_index_query_cache_miss_counter = nullptr; RuntimeProfile::Counter* _inverted_index_query_timer = nullptr; RuntimeProfile::Counter* _inverted_index_query_bitmap_copy_timer = nullptr; - RuntimeProfile::Counter* _inverted_index_query_bitmap_op_timer = nullptr; RuntimeProfile::Counter* _inverted_index_searcher_open_timer = nullptr; RuntimeProfile::Counter* _inverted_index_searcher_search_timer = nullptr; RuntimeProfile::Counter* _inverted_index_searcher_cache_hit_counter = nullptr; diff --git a/be/src/pipeline/exec/operator.cpp b/be/src/pipeline/exec/operator.cpp index 6e3099db7486bc..fb2dd828c3916c 100644 --- a/be/src/pipeline/exec/operator.cpp +++ b/be/src/pipeline/exec/operator.cpp @@ -74,6 +74,7 @@ #include "pipeline/exec/union_source_operator.h" #include "pipeline/local_exchange/local_exchange_sink_operator.h" #include "pipeline/local_exchange/local_exchange_source_operator.h" +#include "pipeline/pipeline.h" #include "util/debug_util.h" #include "util/runtime_profile.h" #include "util/string_util.h" @@ -116,11 +117,16 @@ std::string PipelineXSinkLocalState::name_suffix() { }() + ")"; } -DataDistribution DataSinkOperatorXBase::required_data_distribution() const { - return _child && _child->ignore_data_distribution() +DataDistribution OperatorBase::required_data_distribution() const { + return _child && _child->is_serial_operator() && !is_source() ? DataDistribution(ExchangeType::PASSTHROUGH) : DataDistribution(ExchangeType::NOOP); } + +bool OperatorBase::require_shuffled_data_distribution() const { + return Pipeline::is_hash_exchange(required_data_distribution().distribution_type); +} + const RowDescriptor& OperatorBase::row_desc() const { return _child->row_desc(); } diff --git a/be/src/pipeline/exec/operator.h b/be/src/pipeline/exec/operator.h index 5df0a19498f395..2a2b3fdd3b95b2 100644 --- a/be/src/pipeline/exec/operator.h +++ b/be/src/pipeline/exec/operator.h @@ -118,7 +118,8 @@ class OperatorBase { _followed_by_shuffled_operator = followed_by_shuffled_operator; } [[nodiscard]] virtual bool is_shuffled_operator() const { return false; } - [[nodiscard]] virtual bool require_shuffled_data_distribution() const { return false; } + [[nodiscard]] virtual DataDistribution required_data_distribution() const; + [[nodiscard]] virtual bool require_shuffled_data_distribution() const; protected: OperatorPtr _child = nullptr; @@ -483,7 +484,6 @@ class DataSinkOperatorXBase : public OperatorBase { } [[nodiscard]] virtual std::shared_ptr create_shared_state() const = 0; - [[nodiscard]] virtual DataDistribution required_data_distribution() const; Status close(RuntimeState* state) override { return Status::InternalError("Should not reach here!"); @@ -496,8 +496,6 @@ class DataSinkOperatorXBase : public OperatorBase { [[nodiscard]] bool is_sink() const override { return true; } - [[nodiscard]] bool is_source() const override { return false; } - static Status close(RuntimeState* state, Status exec_status) { auto result = state->get_sink_local_state_result(); if (!result) { @@ -652,19 +650,7 @@ class OperatorXBase : public OperatorBase { throw doris::Exception(ErrorCode::NOT_IMPLEMENTED_ERROR, _op_name); } [[nodiscard]] std::string get_name() const override { return _op_name; } - [[nodiscard]] virtual DataDistribution required_data_distribution() const { - return _child && _child->ignore_data_distribution() && !is_source() - ? DataDistribution(ExchangeType::PASSTHROUGH) - : DataDistribution(ExchangeType::NOOP); - } - [[nodiscard]] virtual bool ignore_data_distribution() const { - return _child ? _child->ignore_data_distribution() : _ignore_data_distribution; - } - [[nodiscard]] bool ignore_data_hash_distribution() const { - return _child ? _child->ignore_data_hash_distribution() : _ignore_data_distribution; - } [[nodiscard]] virtual bool need_more_input_data(RuntimeState* state) const { return true; } - void set_ignore_data_distribution() { _ignore_data_distribution = true; } Status open(RuntimeState* state) override; @@ -735,8 +721,6 @@ class OperatorXBase : public OperatorBase { bool has_output_row_desc() const { return _output_row_descriptor != nullptr; } - [[nodiscard]] bool is_source() const override { return false; } - [[nodiscard]] virtual Status get_block_after_projects(RuntimeState* state, vectorized::Block* block, bool* eos); @@ -779,7 +763,6 @@ class OperatorXBase : public OperatorBase { uint32_t _debug_point_count = 0; std::string _op_name; - bool _ignore_data_distribution = false; int _parallel_tasks = 0; //_keep_origin is used to avoid copying during projection, diff --git a/be/src/pipeline/exec/partitioned_aggregation_sink_operator.h b/be/src/pipeline/exec/partitioned_aggregation_sink_operator.h index 6b3a74c83df97c..15f6b22387a8e2 100644 --- a/be/src/pipeline/exec/partitioned_aggregation_sink_operator.h +++ b/be/src/pipeline/exec/partitioned_aggregation_sink_operator.h @@ -309,9 +309,6 @@ class PartitionedAggSinkOperatorX : public DataSinkOperatorXrequire_data_distribution(); } - bool require_shuffled_data_distribution() const override { - return _agg_sink_operator->require_shuffled_data_distribution(); - } Status set_child(OperatorPtr child) override { RETURN_IF_ERROR(DataSinkOperatorX::set_child(child)); diff --git a/be/src/pipeline/exec/partitioned_hash_join_probe_operator.h b/be/src/pipeline/exec/partitioned_hash_join_probe_operator.h index 3aab11f62d883e..f8fc0780b6fc3f 100644 --- a/be/src/pipeline/exec/partitioned_hash_join_probe_operator.h +++ b/be/src/pipeline/exec/partitioned_hash_join_probe_operator.h @@ -165,9 +165,6 @@ class PartitionedHashJoinProbeOperatorX final _distribution_partition_exprs)); } - bool require_shuffled_data_distribution() const override { - return _join_op != TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN; - } bool is_shuffled_operator() const override { return _join_distribution == TJoinDistributionType::PARTITIONED; } diff --git a/be/src/pipeline/exec/partitioned_hash_join_sink_operator.h b/be/src/pipeline/exec/partitioned_hash_join_sink_operator.h index c768d7518b95c9..8e89763b50a9d5 100644 --- a/be/src/pipeline/exec/partitioned_hash_join_sink_operator.h +++ b/be/src/pipeline/exec/partitioned_hash_join_sink_operator.h @@ -115,9 +115,6 @@ class PartitionedHashJoinSinkOperatorX _distribution_partition_exprs); } - bool require_shuffled_data_distribution() const override { - return _join_op != TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN; - } bool is_shuffled_operator() const override { return _join_distribution == TJoinDistributionType::PARTITIONED; } diff --git a/be/src/pipeline/exec/repeat_operator.cpp b/be/src/pipeline/exec/repeat_operator.cpp index dba4f27af7c385..5c94d43f0d1e05 100644 --- a/be/src/pipeline/exec/repeat_operator.cpp +++ b/be/src/pipeline/exec/repeat_operator.cpp @@ -46,6 +46,16 @@ Status RepeatLocalState::open(RuntimeState* state) { return Status::OK(); } +Status RepeatLocalState::init(RuntimeState* state, LocalStateInfo& info) { + RETURN_IF_ERROR(Base::init(state, info)); + SCOPED_TIMER(exec_time_counter()); + SCOPED_TIMER(_init_timer); + _evaluate_input_timer = ADD_TIMER(profile(), "EvaluateInputDataTime"); + _get_repeat_data_timer = ADD_TIMER(profile(), "GetRepeatDataTime"); + _filter_timer = ADD_TIMER(profile(), "FilterTime"); + return Status::OK(); +} + Status RepeatOperatorX::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(OperatorXBase::init(tnode, state)); RETURN_IF_ERROR(vectorized::VExpr::create_expr_trees(tnode.repeat_node.exprs, _expr_ctxs)); @@ -166,23 +176,24 @@ Status RepeatLocalState::add_grouping_id_column(std::size_t rows, std::size_t& c Status RepeatOperatorX::push(RuntimeState* state, vectorized::Block* input_block, bool eos) const { auto& local_state = get_local_state(state); + SCOPED_TIMER(local_state._evaluate_input_timer); local_state._child_eos = eos; - auto& _intermediate_block = local_state._intermediate_block; - auto& _expr_ctxs = local_state._expr_ctxs; - DCHECK(!_intermediate_block || _intermediate_block->rows() == 0); + auto& intermediate_block = local_state._intermediate_block; + auto& expr_ctxs = local_state._expr_ctxs; + DCHECK(!intermediate_block || intermediate_block->rows() == 0); if (input_block->rows() > 0) { - _intermediate_block = vectorized::Block::create_unique(); + intermediate_block = vectorized::Block::create_unique(); - for (auto& expr : _expr_ctxs) { + for (auto& expr : expr_ctxs) { int result_column_id = -1; RETURN_IF_ERROR(expr->execute(input_block, &result_column_id)); DCHECK(result_column_id != -1); input_block->get_by_position(result_column_id).column = input_block->get_by_position(result_column_id) .column->convert_to_full_column_if_const(); - _intermediate_block->insert(input_block->get_by_position(result_column_id)); + intermediate_block->insert(input_block->get_by_position(result_column_id)); } - DCHECK_EQ(_expr_ctxs.size(), _intermediate_block->columns()); + DCHECK_EQ(expr_ctxs.size(), intermediate_block->columns()); } return Status::OK(); @@ -202,33 +213,39 @@ Status RepeatOperatorX::pull(doris::RuntimeState* state, vectorized::Block* outp } DCHECK(output_block->rows() == 0); - if (_intermediate_block && _intermediate_block->rows() > 0) { - RETURN_IF_ERROR(local_state.get_repeated_block(_intermediate_block.get(), _repeat_id_idx, - output_block)); + { + SCOPED_TIMER(local_state._get_repeat_data_timer); + if (_intermediate_block && _intermediate_block->rows() > 0) { + RETURN_IF_ERROR(local_state.get_repeated_block(_intermediate_block.get(), + _repeat_id_idx, output_block)); - _repeat_id_idx++; + _repeat_id_idx++; - int size = _repeat_id_list.size(); - if (_repeat_id_idx >= size) { - _intermediate_block->clear(); + int size = _repeat_id_list.size(); + if (_repeat_id_idx >= size) { + _intermediate_block->clear(); + _child_block.clear_column_data(_child->row_desc().num_materialized_slots()); + _repeat_id_idx = 0; + } + } else if (local_state._expr_ctxs.empty()) { + auto m_block = vectorized::VectorizedUtils::build_mutable_mem_reuse_block( + output_block, _output_slots); + auto rows = _child_block.rows(); + auto& columns = m_block.mutable_columns(); + + for (int repeat_id_idx = 0; repeat_id_idx < _repeat_id_list.size(); repeat_id_idx++) { + std::size_t cur_col = 0; + RETURN_IF_ERROR( + local_state.add_grouping_id_column(rows, cur_col, columns, repeat_id_idx)); + } _child_block.clear_column_data(_child->row_desc().num_materialized_slots()); - _repeat_id_idx = 0; } - } else if (local_state._expr_ctxs.empty()) { - auto m_block = vectorized::VectorizedUtils::build_mutable_mem_reuse_block(output_block, - _output_slots); - auto rows = _child_block.rows(); - auto& columns = m_block.mutable_columns(); - - for (int repeat_id_idx = 0; repeat_id_idx < _repeat_id_list.size(); repeat_id_idx++) { - std::size_t cur_col = 0; - RETURN_IF_ERROR( - local_state.add_grouping_id_column(rows, cur_col, columns, repeat_id_idx)); - } - _child_block.clear_column_data(_child->row_desc().num_materialized_slots()); } - RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, output_block, - output_block->columns())); + { + SCOPED_TIMER(local_state._filter_timer); + RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, output_block, + output_block->columns())); + } *eos = _child_eos && _child_block.rows() == 0; local_state.reached_limit(output_block, eos); return Status::OK(); diff --git a/be/src/pipeline/exec/repeat_operator.h b/be/src/pipeline/exec/repeat_operator.h index 22398df372ae65..31f88f37231aaa 100644 --- a/be/src/pipeline/exec/repeat_operator.h +++ b/be/src/pipeline/exec/repeat_operator.h @@ -36,6 +36,7 @@ class RepeatLocalState final : public PipelineXLocalState { using Base = PipelineXLocalState; RepeatLocalState(RuntimeState* state, OperatorXBase* parent); + Status init(RuntimeState* state, LocalStateInfo& info) override; Status open(RuntimeState* state) override; Status get_repeated_block(vectorized::Block* child_block, int repeat_id_idx, @@ -53,6 +54,10 @@ class RepeatLocalState final : public PipelineXLocalState { int _repeat_id_idx; std::unique_ptr _intermediate_block; vectorized::VExprContextSPtrs _expr_ctxs; + + RuntimeProfile::Counter* _evaluate_input_timer = nullptr; + RuntimeProfile::Counter* _get_repeat_data_timer = nullptr; + RuntimeProfile::Counter* _filter_timer = nullptr; }; class RepeatOperatorX final : public StatefulOperatorX { diff --git a/be/src/pipeline/exec/result_file_sink_operator.cpp b/be/src/pipeline/exec/result_file_sink_operator.cpp index 93026427b86d56..7c9c38ece5c4e9 100644 --- a/be/src/pipeline/exec/result_file_sink_operator.cpp +++ b/be/src/pipeline/exec/result_file_sink_operator.cpp @@ -85,12 +85,6 @@ Status ResultFileSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& i SCOPED_TIMER(_init_timer); _sender_id = info.sender_id; - _brpc_wait_timer = ADD_TIMER(_profile, "BrpcSendTime.Wait"); - _local_send_timer = ADD_TIMER(_profile, "LocalSendTime"); - _brpc_send_timer = ADD_TIMER(_profile, "BrpcSendTime"); - _split_block_distribute_by_channel_timer = - ADD_TIMER(_profile, "SplitBlockDistributeByChannelTime"); - _brpc_send_timer = ADD_TIMER(_profile, "BrpcSendTime"); auto& p = _parent->cast(); CHECK(p._file_opts.get() != nullptr); // create sender diff --git a/be/src/pipeline/exec/result_file_sink_operator.h b/be/src/pipeline/exec/result_file_sink_operator.h index 7268efe4de4065..e9f2b8eeb9c670 100644 --- a/be/src/pipeline/exec/result_file_sink_operator.h +++ b/be/src/pipeline/exec/result_file_sink_operator.h @@ -40,26 +40,12 @@ class ResultFileSinkLocalState final [[nodiscard]] int sender_id() const { return _sender_id; } - RuntimeProfile::Counter* brpc_wait_timer() { return _brpc_wait_timer; } - RuntimeProfile::Counter* local_send_timer() { return _local_send_timer; } - RuntimeProfile::Counter* brpc_send_timer() { return _brpc_send_timer; } - RuntimeProfile::Counter* merge_block_timer() { return _merge_block_timer; } - RuntimeProfile::Counter* split_block_distribute_by_channel_timer() { - return _split_block_distribute_by_channel_timer; - } - private: friend class ResultFileSinkOperatorX; std::shared_ptr _sender; std::shared_ptr _block_holder; - RuntimeProfile::Counter* _brpc_wait_timer = nullptr; - RuntimeProfile::Counter* _local_send_timer = nullptr; - RuntimeProfile::Counter* _brpc_send_timer = nullptr; - RuntimeProfile::Counter* _merge_block_timer = nullptr; - RuntimeProfile::Counter* _split_block_distribute_by_channel_timer = nullptr; - int _sender_id; }; diff --git a/be/src/pipeline/exec/result_sink_operator.cpp b/be/src/pipeline/exec/result_sink_operator.cpp index f04ace2e292595..53a517f859c4e3 100644 --- a/be/src/pipeline/exec/result_sink_operator.cpp +++ b/be/src/pipeline/exec/result_sink_operator.cpp @@ -41,13 +41,12 @@ Status ResultSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& info) RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + _fetch_row_id_timer = ADD_TIMER(profile(), "FetchRowIdTime"); + _write_data_timer = ADD_TIMER(profile(), "WriteDataTime"); static const std::string timer_name = "WaitForDependencyTime"; _wait_for_dependency_timer = ADD_TIMER_WITH_LEVEL(_profile, timer_name, 1); auto fragment_instance_id = state->fragment_instance_id(); - _blocks_sent_counter = ADD_COUNTER_WITH_LEVEL(_profile, "BlocksProduced", TUnit::UNIT, 1); - _rows_sent_counter = ADD_COUNTER_WITH_LEVEL(_profile, "RowsProduced", TUnit::UNIT, 1); - if (state->query_options().enable_parallel_result_sink) { _sender = _parent->cast()._sender; } else { @@ -146,12 +145,15 @@ Status ResultSinkOperatorX::open(RuntimeState* state) { Status ResultSinkOperatorX::sink(RuntimeState* state, vectorized::Block* block, bool eos) { auto& local_state = get_local_state(state); SCOPED_TIMER(local_state.exec_time_counter()); - COUNTER_UPDATE(local_state.rows_sent_counter(), (int64_t)block->rows()); - COUNTER_UPDATE(local_state.blocks_sent_counter(), 1); + COUNTER_UPDATE(local_state.rows_input_counter(), (int64_t)block->rows()); if (_fetch_option.use_two_phase_fetch && block->rows() > 0) { + SCOPED_TIMER(local_state._fetch_row_id_timer); RETURN_IF_ERROR(_second_phase_fetch_data(state, block)); } - RETURN_IF_ERROR(local_state._writer->write(state, *block)); + { + SCOPED_TIMER(local_state._write_data_timer); + RETURN_IF_ERROR(local_state._writer->write(state, *block)); + } if (_fetch_option.use_two_phase_fetch) { // Block structure may be changed by calling _second_phase_fetch_data(). // So we should clear block in case of unmatched columns diff --git a/be/src/pipeline/exec/result_sink_operator.h b/be/src/pipeline/exec/result_sink_operator.h index 3c503096ecb51e..339c167825643b 100644 --- a/be/src/pipeline/exec/result_sink_operator.h +++ b/be/src/pipeline/exec/result_sink_operator.h @@ -128,8 +128,6 @@ class ResultSinkLocalState final : public PipelineXSinkLocalState _sender = nullptr; std::shared_ptr _writer = nullptr; - RuntimeProfile::Counter* _blocks_sent_counter = nullptr; - RuntimeProfile::Counter* _rows_sent_counter = nullptr; + + RuntimeProfile::Counter* _fetch_row_id_timer = nullptr; + RuntimeProfile::Counter* _write_data_timer = nullptr; }; class ResultSinkOperatorX final : public DataSinkOperatorX { diff --git a/be/src/pipeline/exec/scan_operator.cpp b/be/src/pipeline/exec/scan_operator.cpp index 4f3c97bab717b6..6200f3b12ce5a0 100644 --- a/be/src/pipeline/exec/scan_operator.cpp +++ b/be/src/pipeline/exec/scan_operator.cpp @@ -73,7 +73,7 @@ Status ScanLocalState::init(RuntimeState* state, LocalStateInfo& info) SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); auto& p = _parent->cast(); - RETURN_IF_ERROR(RuntimeFilterConsumer::init(state, p.ignore_data_distribution())); + RETURN_IF_ERROR(RuntimeFilterConsumer::init(state, p.is_serial_operator())); // init profile for runtime filter RuntimeFilterConsumer::_init_profile(profile()); init_runtime_filter_dependency(_filter_dependencies, p.operator_id(), p.node_id(), @@ -990,7 +990,7 @@ Status ScanLocalState::_start_scanners( auto& p = _parent->cast(); _scanner_ctx = vectorized::ScannerContext::create_shared( state(), this, p._output_tuple_desc, p.output_row_descriptor(), scanners, p.limit(), - _scan_dependency, p.ignore_data_distribution()); + _scan_dependency, p.is_serial_operator()); return Status::OK(); } @@ -1048,13 +1048,10 @@ Status ScanLocalState::_init_profile() { ADD_COUNTER(_scanner_profile, "NewlyCreateFreeBlocksNum", TUnit::UNIT); _scale_up_scanners_counter = ADD_COUNTER(_scanner_profile, "NumScaleUpScanners", TUnit::UNIT); // time of transfer thread to wait for block from scan thread - _scanner_wait_batch_timer = ADD_TIMER(_scanner_profile, "ScannerBatchWaitTime"); _scanner_sched_counter = ADD_COUNTER(_scanner_profile, "ScannerSchedCount", TUnit::UNIT); - _scanner_ctx_sched_time = ADD_TIMER(_scanner_profile, "ScannerCtxSchedTime"); _scan_timer = ADD_TIMER(_scanner_profile, "ScannerGetBlockTime"); _scan_cpu_timer = ADD_TIMER(_scanner_profile, "ScannerCpuTime"); - _convert_block_timer = ADD_TIMER(_scanner_profile, "ScannerConvertBlockTime"); _filter_timer = ADD_TIMER(_scanner_profile, "ScannerFilterTime"); // time of scan thread to wait for worker thread of the thread pool @@ -1145,6 +1142,8 @@ ScanOperatorX::ScanOperatorX(ObjectPool* pool, const TPlanNode& : OperatorX(pool, tnode, operator_id, descs), _runtime_filter_descs(tnode.runtime_filters), _parallel_tasks(parallel_tasks) { + OperatorX::_is_serial_operator = + tnode.__isset.is_serial_operator && tnode.is_serial_operator; if (tnode.__isset.push_down_count) { _push_down_count = tnode.push_down_count; } @@ -1282,6 +1281,7 @@ Status ScanOperatorX::get_block(RuntimeState* state, vectorized: if (*eos) { // reach limit, stop the scanners. local_state._scanner_ctx->stop_scanners(state); + local_state._scanner_profile->add_info_string("EOS", "True"); } return Status::OK(); diff --git a/be/src/pipeline/exec/scan_operator.h b/be/src/pipeline/exec/scan_operator.h index bf650cb8495935..5d41c800383bd0 100644 --- a/be/src/pipeline/exec/scan_operator.h +++ b/be/src/pipeline/exec/scan_operator.h @@ -102,8 +102,6 @@ class ScanLocalStateBase : public PipelineXLocalState<>, public RuntimeFilterCon std::shared_ptr _scanner_profile; RuntimeProfile::Counter* _scanner_sched_counter = nullptr; - RuntimeProfile::Counter* _scanner_ctx_sched_time = nullptr; - RuntimeProfile::Counter* _scanner_wait_batch_timer = nullptr; RuntimeProfile::Counter* _scanner_wait_worker_timer = nullptr; // Num of newly created free blocks when running query RuntimeProfile::Counter* _newly_create_free_blocks_num = nullptr; @@ -114,8 +112,6 @@ class ScanLocalStateBase : public PipelineXLocalState<>, public RuntimeFilterCon // time of get block from scanner RuntimeProfile::Counter* _scan_timer = nullptr; RuntimeProfile::Counter* _scan_cpu_timer = nullptr; - // time of convert input block to output block from scanner - RuntimeProfile::Counter* _convert_block_timer = nullptr; // time of filter output block from scanner RuntimeProfile::Counter* _filter_timer = nullptr; RuntimeProfile::Counter* _memory_usage_counter = nullptr; @@ -383,8 +379,8 @@ class ScanOperatorX : public OperatorX { TPushAggOp::type get_push_down_agg_type() { return _push_down_agg_type; } DataDistribution required_data_distribution() const override { - if (OperatorX::ignore_data_distribution()) { - // `ignore_data_distribution()` returns true means we ignore the distribution. + if (OperatorX::is_serial_operator()) { + // `is_serial_operator()` returns true means we ignore the distribution. return {ExchangeType::NOOP}; } return {ExchangeType::BUCKET_HASH_SHUFFLE}; diff --git a/be/src/pipeline/exec/set_probe_sink_operator.cpp b/be/src/pipeline/exec/set_probe_sink_operator.cpp index 37db9afacfcacd..813dad3ad79de6 100644 --- a/be/src/pipeline/exec/set_probe_sink_operator.cpp +++ b/be/src/pipeline/exec/set_probe_sink_operator.cpp @@ -71,12 +71,16 @@ Status SetProbeSinkOperatorX::sink(RuntimeState* state, vectorized auto probe_rows = in_block->rows(); if (probe_rows > 0) { - RETURN_IF_ERROR(_extract_probe_column(local_state, *in_block, local_state._probe_columns, - _cur_child_id)); + { + SCOPED_TIMER(local_state._extract_probe_data_timer); + RETURN_IF_ERROR(_extract_probe_column(local_state, *in_block, + local_state._probe_columns, _cur_child_id)); + } RETURN_IF_ERROR(std::visit( [&](auto&& arg) -> Status { using HashTableCtxType = std::decay_t; if constexpr (!std::is_same_v) { + SCOPED_TIMER(local_state._probe_timer); vectorized::HashTableProbe process_hashtable_ctx(&local_state, probe_rows); return process_hashtable_ctx.mark_data_in_hashtable(arg); @@ -99,6 +103,9 @@ Status SetProbeSinkLocalState::init(RuntimeState* state, LocalSink RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + + _probe_timer = ADD_TIMER(Base::profile(), "ProbeTime"); + _extract_probe_data_timer = ADD_TIMER(Base::profile(), "ExtractProbeDataTime"); Parent& parent = _parent->cast(); _shared_state->probe_finished_children_dependency[parent._cur_child_id] = _dependency; _dependency->block(); diff --git a/be/src/pipeline/exec/set_probe_sink_operator.h b/be/src/pipeline/exec/set_probe_sink_operator.h index ab53f5358c2a91..368ea812cdfe01 100644 --- a/be/src/pipeline/exec/set_probe_sink_operator.h +++ b/be/src/pipeline/exec/set_probe_sink_operator.h @@ -60,6 +60,9 @@ class SetProbeSinkLocalState final : public PipelineXSinkLocalState @@ -96,8 +99,6 @@ class SetProbeSinkOperatorX final : public DataSinkOperatorX create_shared_state() const override { return nullptr; } private: diff --git a/be/src/pipeline/exec/set_sink_operator.cpp b/be/src/pipeline/exec/set_sink_operator.cpp index 9a81333efaed89..539134e53e7fe2 100644 --- a/be/src/pipeline/exec/set_sink_operator.cpp +++ b/be/src/pipeline/exec/set_sink_operator.cpp @@ -40,8 +40,10 @@ Status SetSinkOperatorX::sink(RuntimeState* state, vectorized::Blo auto& valid_element_in_hash_tbl = local_state._shared_state->valid_element_in_hash_tbl; if (in_block->rows() != 0) { - RETURN_IF_ERROR(local_state._mutable_block.merge(*in_block)); - + { + SCOPED_TIMER(local_state._merge_block_timer); + RETURN_IF_ERROR(local_state._mutable_block.merge(*in_block)); + } if (local_state._mutable_block.rows() > std::numeric_limits::max()) { return Status::NotSupported("set operator do not support build table rows over:" + std::to_string(std::numeric_limits::max())); @@ -49,6 +51,7 @@ Status SetSinkOperatorX::sink(RuntimeState* state, vectorized::Blo } if (eos || local_state._mutable_block.allocated_bytes() >= BUILD_BLOCK_MAX_SIZE) { + SCOPED_TIMER(local_state._build_timer); build_block = local_state._mutable_block.to_block(); RETURN_IF_ERROR(_process_build_block(local_state, build_block, state)); local_state._mutable_block.clear(); @@ -152,6 +155,7 @@ Status SetSinkLocalState::init(RuntimeState* state, LocalSinkState RETURN_IF_ERROR(PipelineXSinkLocalState::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + _merge_block_timer = ADD_TIMER(_profile, "MergeBlocksTime"); _build_timer = ADD_TIMER(_profile, "BuildTime"); auto& parent = _parent->cast(); _shared_state->probe_finished_children_dependency[parent._cur_child_id] = _dependency; diff --git a/be/src/pipeline/exec/set_sink_operator.h b/be/src/pipeline/exec/set_sink_operator.h index 65c33795e5da2b..ba387d97b41360 100644 --- a/be/src/pipeline/exec/set_sink_operator.h +++ b/be/src/pipeline/exec/set_sink_operator.h @@ -49,14 +49,14 @@ class SetSinkLocalState final : public PipelineXSinkLocalState { private: friend class SetSinkOperatorX; - template - friend struct vectorized::HashTableBuild; - RuntimeProfile::Counter* _build_timer; // time to build hash table vectorized::MutableBlock _mutable_block; // every child has its result expr list vectorized::VExprContextSPtrs _child_exprs; vectorized::Arena _arena; + + RuntimeProfile::Counter* _merge_block_timer = nullptr; + RuntimeProfile::Counter* _build_timer = nullptr; }; template @@ -94,7 +94,6 @@ class SetSinkOperatorX final : public DataSinkOperatorX diff --git a/be/src/pipeline/exec/set_source_operator.cpp b/be/src/pipeline/exec/set_source_operator.cpp index 58958462c2f021..ebcd13ddf14ce4 100644 --- a/be/src/pipeline/exec/set_source_operator.cpp +++ b/be/src/pipeline/exec/set_source_operator.cpp @@ -29,6 +29,8 @@ Status SetSourceLocalState::init(RuntimeState* state, LocalStateIn RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + _get_data_timer = ADD_TIMER(_runtime_profile, "GetDataTime"); + _filter_timer = ADD_TIMER(_runtime_profile, "FilterTime"); _shared_state->probe_finished_children_dependency.resize( _parent->cast>()._child_quantity, nullptr); return Status::OK(); @@ -75,21 +77,26 @@ Status SetSourceOperatorX::get_block(RuntimeState* state, vectoriz auto& local_state = get_local_state(state); SCOPED_TIMER(local_state.exec_time_counter()); _create_mutable_cols(local_state, block); - auto st = std::visit( - [&](auto&& arg) -> Status { - using HashTableCtxType = std::decay_t; - if constexpr (!std::is_same_v) { - return _get_data_in_hashtable(local_state, arg, block, - state->batch_size(), eos); - } else { - LOG(FATAL) << "FATAL: uninited hash table"; - __builtin_unreachable(); - } - }, - local_state._shared_state->hash_table_variants->method_variant); - RETURN_IF_ERROR(st); - RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, block, - block->columns())); + { + SCOPED_TIMER(local_state._get_data_timer); + RETURN_IF_ERROR(std::visit( + [&](auto&& arg) -> Status { + using HashTableCtxType = std::decay_t; + if constexpr (!std::is_same_v) { + return _get_data_in_hashtable(local_state, arg, block, + state->batch_size(), eos); + } else { + LOG(FATAL) << "FATAL: uninited hash table"; + __builtin_unreachable(); + } + }, + local_state._shared_state->hash_table_variants->method_variant)); + } + { + SCOPED_TIMER(local_state._filter_timer); + RETURN_IF_ERROR(vectorized::VExprContext::filter_block(local_state._conjuncts, block, + block->columns())); + } local_state.reached_limit(block, eos); return Status::OK(); } diff --git a/be/src/pipeline/exec/set_source_operator.h b/be/src/pipeline/exec/set_source_operator.h index ce3d0c52edf1d5..976ffde3bf23ea 100644 --- a/be/src/pipeline/exec/set_source_operator.h +++ b/be/src/pipeline/exec/set_source_operator.h @@ -46,6 +46,9 @@ class SetSourceLocalState final : public PipelineXLocalState { std::vector _mutable_cols; //record build column type vectorized::DataTypes _left_table_data_types; + + RuntimeProfile::Counter* _get_data_timer = nullptr; + RuntimeProfile::Counter* _filter_timer = nullptr; }; template diff --git a/be/src/pipeline/exec/sort_sink_operator.cpp b/be/src/pipeline/exec/sort_sink_operator.cpp index 6d6684437b8124..faec4961af93b7 100644 --- a/be/src/pipeline/exec/sort_sink_operator.cpp +++ b/be/src/pipeline/exec/sort_sink_operator.cpp @@ -32,6 +32,8 @@ Status SortSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& info) { SCOPED_TIMER(_init_timer); _sort_blocks_memory_usage = ADD_COUNTER_WITH_LEVEL(_profile, "MemoryUsageSortBlocks", TUnit::BYTES, 1); + _append_blocks_timer = ADD_TIMER(profile(), "AppendBlockTime"); + _update_runtime_predicate_timer = ADD_TIMER(profile(), "UpdateRuntimePredicateTime"); return Status::OK(); } @@ -119,7 +121,10 @@ Status SortSinkOperatorX::sink(doris::RuntimeState* state, vectorized::Block* in SCOPED_TIMER(local_state.exec_time_counter()); COUNTER_UPDATE(local_state.rows_input_counter(), (int64_t)in_block->rows()); if (in_block->rows() > 0) { - RETURN_IF_ERROR(local_state._shared_state->sorter->append_block(in_block)); + { + SCOPED_TIMER(local_state._append_blocks_timer); + RETURN_IF_ERROR(local_state._shared_state->sorter->append_block(in_block)); + } int64_t data_size = local_state._shared_state->sorter->data_size(); COUNTER_SET(local_state._sort_blocks_memory_usage, data_size); COUNTER_SET(local_state._memory_used_counter, data_size); @@ -128,6 +133,7 @@ Status SortSinkOperatorX::sink(doris::RuntimeState* state, vectorized::Block* in RETURN_IF_CANCELLED(state); if (state->get_query_ctx()->has_runtime_predicate(_node_id)) { + SCOPED_TIMER(local_state._update_runtime_predicate_timer); auto& predicate = state->get_query_ctx()->get_runtime_predicate(_node_id); if (predicate.enable()) { vectorized::Field new_top = local_state._shared_state->sorter->get_top_value(); diff --git a/be/src/pipeline/exec/sort_sink_operator.h b/be/src/pipeline/exec/sort_sink_operator.h index 0829c38b40f0b5..6bf87164e71026 100644 --- a/be/src/pipeline/exec/sort_sink_operator.h +++ b/be/src/pipeline/exec/sort_sink_operator.h @@ -46,6 +46,8 @@ class SortSinkLocalState : public PipelineXSinkLocalState { // topn top value vectorized::Field old_top {vectorized::Field::Types::Null}; + RuntimeProfile::Counter* _append_blocks_timer = nullptr; + RuntimeProfile::Counter* _update_runtime_predicate_timer = nullptr; }; class SortSinkOperatorX final : public DataSinkOperatorX { @@ -73,7 +75,6 @@ class SortSinkOperatorX final : public DataSinkOperatorX { return {ExchangeType::NOOP}; } } - bool require_shuffled_data_distribution() const override { return _is_analytic_sort; } bool require_data_distribution() const override { return _is_colocate; } size_t get_revocable_mem_size(RuntimeState* state) const; diff --git a/be/src/pipeline/exec/streaming_aggregation_operator.cpp b/be/src/pipeline/exec/streaming_aggregation_operator.cpp index 603a1a216103ee..96de1f32be5a01 100644 --- a/be/src/pipeline/exec/streaming_aggregation_operator.cpp +++ b/be/src/pipeline/exec/streaming_aggregation_operator.cpp @@ -93,25 +93,18 @@ Status StreamingAggLocalState::init(RuntimeState* state, LocalStateInfo& info) { "MemoryUsageSerializeKeyArena", TUnit::BYTES, "", 1); _build_timer = ADD_TIMER(Base::profile(), "BuildTime"); - _build_table_convert_timer = ADD_TIMER(Base::profile(), "BuildConvertToPartitionedTime"); - _serialize_key_timer = ADD_TIMER(Base::profile(), "SerializeKeyTime"); - _exec_timer = ADD_TIMER(Base::profile(), "ExecTime"); _merge_timer = ADD_TIMER(Base::profile(), "MergeTime"); _expr_timer = ADD_TIMER(Base::profile(), "ExprTime"); - _serialize_data_timer = ADD_TIMER(Base::profile(), "SerializeDataTime"); + _insert_values_to_column_timer = ADD_TIMER(Base::profile(), "InsertValuesToColumnTime"); _deserialize_data_timer = ADD_TIMER(Base::profile(), "DeserializeAndMergeTime"); _hash_table_compute_timer = ADD_TIMER(Base::profile(), "HashTableComputeTime"); _hash_table_emplace_timer = ADD_TIMER(Base::profile(), "HashTableEmplaceTime"); _hash_table_input_counter = ADD_COUNTER(Base::profile(), "HashTableInputCount", TUnit::UNIT); - _max_row_size_counter = ADD_COUNTER(Base::profile(), "MaxRowSizeInBytes", TUnit::UNIT); _hash_table_size_counter = ADD_COUNTER(profile(), "HashTableSize", TUnit::UNIT); - _queue_byte_size_counter = ADD_COUNTER(profile(), "MaxSizeInBlockQueue", TUnit::BYTES); - _queue_size_counter = ADD_COUNTER(profile(), "MaxSizeOfBlockQueue", TUnit::UNIT); _streaming_agg_timer = ADD_TIMER(profile(), "StreamingAggTime"); _build_timer = ADD_TIMER(profile(), "BuildTime"); _expr_timer = ADD_TIMER(Base::profile(), "ExprTime"); _get_results_timer = ADD_TIMER(profile(), "GetResultsTime"); - _serialize_result_timer = ADD_TIMER(profile(), "SerializeResultTime"); _hash_table_iterate_timer = ADD_TIMER(profile(), "HashTableIterateTime"); _insert_keys_to_column_timer = ADD_TIMER(profile(), "InsertKeysToColumnTime"); @@ -679,7 +672,7 @@ Status StreamingAggLocalState::_pre_agg_with_serialized_key(doris::vectorized::B } for (int i = 0; i != _aggregate_evaluators.size(); ++i) { - SCOPED_TIMER(_serialize_data_timer); + SCOPED_TIMER(_insert_values_to_column_timer); RETURN_IF_ERROR( _aggregate_evaluators[i]->streaming_agg_serialize_to_column( in_block, value_columns[i], rows, @@ -848,12 +841,12 @@ Status StreamingAggLocalState::_get_with_serialized_key_result(RuntimeState* sta return Status::OK(); } -Status StreamingAggLocalState::_serialize_without_key(RuntimeState* state, vectorized::Block* block, - bool* eos) { +Status StreamingAggLocalState::_get_results_without_key(RuntimeState* state, + vectorized::Block* block, bool* eos) { // 1. `child(0)->rows_returned() == 0` mean not data from child // in level two aggregation node should return NULL result // level one aggregation node set `eos = true` return directly - SCOPED_TIMER(_serialize_result_timer); + SCOPED_TIMER(_get_results_timer); if (UNLIKELY(_input_num_rows == 0)) { *eos = true; return Status::OK(); @@ -892,10 +885,10 @@ Status StreamingAggLocalState::_serialize_without_key(RuntimeState* state, vecto return Status::OK(); } -Status StreamingAggLocalState::_serialize_with_serialized_key_result(RuntimeState* state, - vectorized::Block* block, - bool* eos) { - SCOPED_TIMER(_serialize_result_timer); +Status StreamingAggLocalState::_get_results_with_serialized_key(RuntimeState* state, + vectorized::Block* block, + bool* eos) { + SCOPED_TIMER(_get_results_timer); auto& p = _parent->cast(); int key_size = _probe_expr_ctxs.size(); int agg_size = _aggregate_evaluators.size(); @@ -914,7 +907,6 @@ Status StreamingAggLocalState::_serialize_with_serialized_key_result(RuntimeStat } } - SCOPED_TIMER(_get_results_timer); std::visit( vectorized::Overload { [&](std::monostate& arg) -> void { @@ -970,7 +962,7 @@ Status StreamingAggLocalState::_serialize_with_serialized_key_result(RuntimeStat } { - SCOPED_TIMER(_serialize_data_timer); + SCOPED_TIMER(_insert_values_to_column_timer); for (size_t i = 0; i < _aggregate_evaluators.size(); ++i) { value_data_types[i] = _aggregate_evaluators[i]->function()->get_serialized_type(); diff --git a/be/src/pipeline/exec/streaming_aggregation_operator.h b/be/src/pipeline/exec/streaming_aggregation_operator.h index 9a84b694635a46..b695880ac2857b 100644 --- a/be/src/pipeline/exec/streaming_aggregation_operator.h +++ b/be/src/pipeline/exec/streaming_aggregation_operator.h @@ -65,11 +65,11 @@ class StreamingAggLocalState final : public PipelineXLocalState void _update_memusage_with_serialized_key(); Status _init_hash_method(const vectorized::VExprContextSPtrs& probe_exprs); Status _get_without_key_result(RuntimeState* state, vectorized::Block* block, bool* eos); - Status _serialize_without_key(RuntimeState* state, vectorized::Block* block, bool* eos); + Status _get_results_without_key(RuntimeState* state, vectorized::Block* block, bool* eos); Status _get_with_serialized_key_result(RuntimeState* state, vectorized::Block* block, bool* eos); - Status _serialize_with_serialized_key_result(RuntimeState* state, vectorized::Block* block, - bool* eos); + Status _get_results_with_serialized_key(RuntimeState* state, vectorized::Block* block, + bool* eos); template Status _merge_with_serialized_key_helper(vectorized::Block* block); @@ -83,25 +83,19 @@ class StreamingAggLocalState final : public PipelineXLocalState Status _create_agg_status(vectorized::AggregateDataPtr data); size_t _get_hash_table_size(); - RuntimeProfile::Counter* _queue_byte_size_counter = nullptr; - RuntimeProfile::Counter* _queue_size_counter = nullptr; RuntimeProfile::Counter* _streaming_agg_timer = nullptr; RuntimeProfile::Counter* _hash_table_compute_timer = nullptr; RuntimeProfile::Counter* _hash_table_emplace_timer = nullptr; RuntimeProfile::Counter* _hash_table_input_counter = nullptr; RuntimeProfile::Counter* _build_timer = nullptr; RuntimeProfile::Counter* _expr_timer = nullptr; - RuntimeProfile::Counter* _build_table_convert_timer = nullptr; - RuntimeProfile::Counter* _serialize_key_timer = nullptr; RuntimeProfile::Counter* _merge_timer = nullptr; - RuntimeProfile::Counter* _serialize_data_timer = nullptr; + RuntimeProfile::Counter* _insert_values_to_column_timer = nullptr; RuntimeProfile::Counter* _deserialize_data_timer = nullptr; - RuntimeProfile::Counter* _max_row_size_counter = nullptr; RuntimeProfile::Counter* _hash_table_memory_usage = nullptr; RuntimeProfile::HighWaterMarkCounter* _serialize_key_arena_memory_usage = nullptr; RuntimeProfile::Counter* _hash_table_size_counter = nullptr; RuntimeProfile::Counter* _get_results_timer = nullptr; - RuntimeProfile::Counter* _serialize_result_timer = nullptr; RuntimeProfile::Counter* _hash_table_iterate_timer = nullptr; RuntimeProfile::Counter* _insert_keys_to_column_timer = nullptr; @@ -136,13 +130,13 @@ class StreamingAggLocalState final : public PipelineXLocalState if constexpr (NeedFinalize) { return local_state->_get_without_key_result(state, block, eos); } else { - return local_state->_serialize_without_key(state, block, eos); + return local_state->_get_results_without_key(state, block, eos); } } else { if constexpr (NeedFinalize) { return local_state->_get_with_serialized_key_result(state, block, eos); } else { - return local_state->_serialize_with_serialized_key_result(state, block, eos); + return local_state->_get_results_with_serialized_key(state, block, eos); } } } diff --git a/be/src/pipeline/exec/table_function_operator.cpp b/be/src/pipeline/exec/table_function_operator.cpp index 38e69f7cb0e897..c1621470f435b4 100644 --- a/be/src/pipeline/exec/table_function_operator.cpp +++ b/be/src/pipeline/exec/table_function_operator.cpp @@ -32,6 +32,18 @@ namespace doris::pipeline { TableFunctionLocalState::TableFunctionLocalState(RuntimeState* state, OperatorXBase* parent) : PipelineXLocalState<>(state, parent), _child_block(vectorized::Block::create_unique()) {} +Status TableFunctionLocalState::init(RuntimeState* state, LocalStateInfo& info) { + RETURN_IF_ERROR(PipelineXLocalState<>::init(state, info)); + SCOPED_TIMER(exec_time_counter()); + SCOPED_TIMER(_init_timer); + _init_function_timer = ADD_TIMER(_runtime_profile, "InitTableFunctionTime"); + _process_rows_timer = ADD_TIMER(_runtime_profile, "ProcessRowsTime"); + _copy_data_timer = ADD_TIMER(_runtime_profile, "CopyDataTime"); + _filter_timer = ADD_TIMER(_runtime_profile, "FilterTime"); + _repeat_data_timer = ADD_TIMER(_runtime_profile, "RepeatDataTime"); + return Status::OK(); +} + Status TableFunctionLocalState::open(RuntimeState* state) { SCOPED_TIMER(PipelineXLocalState<>::exec_time_counter()); SCOPED_TIMER(PipelineXLocalState<>::_open_timer); @@ -59,6 +71,7 @@ void TableFunctionLocalState::_copy_output_slots( if (!_current_row_insert_times) { return; } + SCOPED_TIMER(_copy_data_timer); auto& p = _parent->cast(); for (auto index : p._output_slot_indexs) { auto src_column = _child_block->get_by_position(index).column; @@ -197,15 +210,18 @@ Status TableFunctionLocalState::get_expanded_block(RuntimeState* state, columns[index]->insert_many_defaults(row_size - columns[index]->size()); } - // 3. eval conjuncts - RETURN_IF_ERROR(vectorized::VExprContext::filter_block(_conjuncts, output_block, - output_block->columns())); + { + SCOPED_TIMER(_filter_timer); // 3. eval conjuncts + RETURN_IF_ERROR(vectorized::VExprContext::filter_block(_conjuncts, output_block, + output_block->columns())); + } *eos = _child_eos && _cur_child_offset == -1; return Status::OK(); } void TableFunctionLocalState::process_next_child_row() { + SCOPED_TIMER(_process_rows_timer); _cur_child_offset++; if (_cur_child_offset >= _child_block->rows()) { diff --git a/be/src/pipeline/exec/table_function_operator.h b/be/src/pipeline/exec/table_function_operator.h index 75b1608fad7112..81160acb7f7611 100644 --- a/be/src/pipeline/exec/table_function_operator.h +++ b/be/src/pipeline/exec/table_function_operator.h @@ -37,6 +37,7 @@ class TableFunctionLocalState final : public PipelineXLocalState<> { TableFunctionLocalState(RuntimeState* state, OperatorXBase* parent); ~TableFunctionLocalState() override = default; + Status init(RuntimeState* state, LocalStateInfo& infos) override; Status open(RuntimeState* state) override; Status close(RuntimeState* state) override { for (auto* fn : _fns) { @@ -67,6 +68,12 @@ class TableFunctionLocalState final : public PipelineXLocalState<> { std::unique_ptr _child_block; int _current_row_insert_times = 0; bool _child_eos = false; + + RuntimeProfile::Counter* _init_function_timer = nullptr; + RuntimeProfile::Counter* _process_rows_timer = nullptr; + RuntimeProfile::Counter* _copy_data_timer = nullptr; + RuntimeProfile::Counter* _filter_timer = nullptr; + RuntimeProfile::Counter* _repeat_data_timer = nullptr; }; class TableFunctionOperatorX final : public StatefulOperatorX { @@ -93,6 +100,7 @@ class TableFunctionOperatorX final : public StatefulOperatorXprocess_init(input_block, state)); } local_state.process_next_child_row(); diff --git a/be/src/pipeline/exec/union_sink_operator.cpp b/be/src/pipeline/exec/union_sink_operator.cpp index 288fc131037fab..8467eeb1d5467a 100644 --- a/be/src/pipeline/exec/union_sink_operator.cpp +++ b/be/src/pipeline/exec/union_sink_operator.cpp @@ -32,6 +32,7 @@ Status UnionSinkLocalState::init(RuntimeState* state, LocalSinkStateInfo& info) RETURN_IF_ERROR(Base::init(state, info)); SCOPED_TIMER(exec_time_counter()); SCOPED_TIMER(_init_timer); + _expr_timer = ADD_TIMER(_profile, "ExprTime"); auto& p = _parent->cast(); _shared_state->data_queue.set_sink_dependency(_dependency, p._cur_child_id); return Status::OK(); diff --git a/be/src/pipeline/exec/union_sink_operator.h b/be/src/pipeline/exec/union_sink_operator.h index f939950143ae92..aa94ed9a73038f 100644 --- a/be/src/pipeline/exec/union_sink_operator.h +++ b/be/src/pipeline/exec/union_sink_operator.h @@ -55,6 +55,7 @@ class UnionSinkLocalState final : public PipelineXSinkLocalState { @@ -136,6 +137,7 @@ class UnionSinkOperatorX final : public DataSinkOperatorX { Status materialize_block(RuntimeState* state, vectorized::Block* src_block, int child_idx, vectorized::Block* res_block) { auto& local_state = get_local_state(state); + SCOPED_TIMER(local_state._expr_timer); const auto& child_exprs = local_state._child_expr; vectorized::ColumnsWithTypeAndName colunms; for (size_t i = 0; i < child_exprs.size(); ++i) { diff --git a/be/src/pipeline/local_exchange/local_exchange_source_operator.h b/be/src/pipeline/local_exchange/local_exchange_source_operator.h index c0da5c8120c1e9..3c706d50182538 100644 --- a/be/src/pipeline/local_exchange/local_exchange_source_operator.h +++ b/be/src/pipeline/local_exchange/local_exchange_source_operator.h @@ -81,9 +81,6 @@ class LocalExchangeSourceOperatorX final : public OperatorXunref(local_state._shared_state, local_state._channel_id); } } - } else if (_num_senders != _num_sources || _ignore_source_data_distribution) { + } else if (_num_senders != _num_sources) { // In this branch, data just should be distributed equally into all instances. new_block_wrapper->ref(_num_partitions); for (size_t i = 0; i < _num_partitions; i++) { diff --git a/be/src/pipeline/local_exchange/local_exchanger.h b/be/src/pipeline/local_exchange/local_exchanger.h index b3731638cb3624..bf052ac3b924ca 100644 --- a/be/src/pipeline/local_exchange/local_exchanger.h +++ b/be/src/pipeline/local_exchange/local_exchanger.h @@ -218,24 +218,21 @@ class ShuffleExchanger : public Exchanger { protected: ShuffleExchanger(int running_sink_operators, int num_sources, int num_partitions, - bool ignore_source_data_distribution, int free_block_limit) + int free_block_limit) : Exchanger(running_sink_operators, num_sources, num_partitions, - free_block_limit), - _ignore_source_data_distribution(ignore_source_data_distribution) { + free_block_limit) { _data_queue.resize(num_partitions); } Status _split_rows(RuntimeState* state, const uint32_t* __restrict channel_ids, vectorized::Block* block, LocalExchangeSinkLocalState& local_state); - - const bool _ignore_source_data_distribution = false; }; class BucketShuffleExchanger final : public ShuffleExchanger { ENABLE_FACTORY_CREATOR(BucketShuffleExchanger); BucketShuffleExchanger(int running_sink_operators, int num_sources, int num_partitions, - bool ignore_source_data_distribution, int free_block_limit) + int free_block_limit) : ShuffleExchanger(running_sink_operators, num_sources, num_partitions, - ignore_source_data_distribution, free_block_limit) {} + free_block_limit) {} ~BucketShuffleExchanger() override = default; ExchangeType get_type() const override { return ExchangeType::BUCKET_HASH_SHUFFLE; } }; diff --git a/be/src/pipeline/pipeline.cpp b/be/src/pipeline/pipeline.cpp index 5b93fbdf1f8480..96da754daa5d98 100644 --- a/be/src/pipeline/pipeline.cpp +++ b/be/src/pipeline/pipeline.cpp @@ -39,6 +39,7 @@ bool Pipeline::need_to_local_exchange(const DataDistribution target_data_distrib [&](OperatorPtr op) -> bool { return op->is_serial_operator(); })) { return false; } + // If all operators are serial and sink is not serial, we should improve parallelism for sink. if (std::all_of(_operators.begin(), _operators.end(), [&](OperatorPtr op) -> bool { return op->is_serial_operator(); })) { if (!_sink->is_serial_operator()) { @@ -46,21 +47,22 @@ bool Pipeline::need_to_local_exchange(const DataDistribution target_data_distrib } } else if (std::any_of(_operators.begin(), _operators.end(), [&](OperatorPtr op) -> bool { return op->is_serial_operator(); })) { + // If non-serial operators exist, we should improve parallelism for those. return true; } if (target_data_distribution.distribution_type != ExchangeType::BUCKET_HASH_SHUFFLE && target_data_distribution.distribution_type != ExchangeType::HASH_SHUFFLE) { + // Always do local exchange if non-hash-partition exchanger is required. + // For example, `PASSTHROUGH` exchanger is always required to distribute data evenly. return true; - } else if (_operators.front()->ignore_data_hash_distribution()) { - if (_data_distribution.distribution_type == target_data_distribution.distribution_type && - (_data_distribution.partition_exprs.empty() || - target_data_distribution.partition_exprs.empty())) { - return true; - } - return _data_distribution.distribution_type != target_data_distribution.distribution_type && - !(is_hash_exchange(_data_distribution.distribution_type) && - is_hash_exchange(target_data_distribution.distribution_type)); + } else if (_operators.front()->is_serial_operator()) { + DCHECK(std::all_of(_operators.begin(), _operators.end(), + [&](OperatorPtr op) -> bool { return op->is_serial_operator(); }) && + _sink->is_serial_operator()) + << debug_string(); + // All operators and sink are serial in this path. + return false; } else { return _data_distribution.distribution_type != target_data_distribution.distribution_type && !(is_hash_exchange(_data_distribution.distribution_type) && @@ -71,7 +73,6 @@ bool Pipeline::need_to_local_exchange(const DataDistribution target_data_distrib Status Pipeline::add_operator(OperatorPtr& op, const int parallelism) { if (parallelism > 0 && op->is_serial_operator()) { set_num_tasks(parallelism); - op->set_ignore_data_distribution(); } op->set_parallel_tasks(num_tasks()); _operators.emplace_back(op); diff --git a/be/src/pipeline/pipeline.h b/be/src/pipeline/pipeline.h index 9554537ca164c6..98e52ec5271613 100644 --- a/be/src/pipeline/pipeline.h +++ b/be/src/pipeline/pipeline.h @@ -115,7 +115,7 @@ class Pipeline : public std::enable_shared_from_this { int num_tasks() const { return _num_tasks; } bool close_task() { return _num_tasks_running.fetch_sub(1) == 1; } - std::string debug_string() { + std::string debug_string() const { fmt::memory_buffer debug_string_buffer; fmt::format_to(debug_string_buffer, "Pipeline [id: {}, _num_tasks: {}, _num_tasks_created: {}]", _pipeline_id, diff --git a/be/src/pipeline/pipeline_fragment_context.cpp b/be/src/pipeline/pipeline_fragment_context.cpp index ef856da51357e7..bd45016adf51e6 100644 --- a/be/src/pipeline/pipeline_fragment_context.cpp +++ b/be/src/pipeline/pipeline_fragment_context.cpp @@ -236,8 +236,6 @@ Status PipelineFragmentContext::prepare(const doris::TPipelineFragmentParams& re if (request.__isset.query_options && request.query_options.__isset.execution_timeout) { _timeout = request.query_options.execution_timeout; } - _use_serial_source = - request.fragment.__isset.use_serial_source && request.fragment.use_serial_source; _fragment_level_profile = std::make_unique("PipelineContext"); _prepare_timer = ADD_TIMER(_fragment_level_profile, "PrepareTime"); @@ -704,6 +702,9 @@ Status PipelineFragmentContext::_create_tree_helper(ObjectPool* pool, (followed_by_shuffled_operator || op->is_shuffled_operator()) && require_shuffled_data_distribution; + if (num_children == 0) { + _use_serial_source = op->is_serial_operator(); + } // rely on that tnodes is preorder of the plan for (int i = 0; i < num_children; i++) { ++*node_idx; @@ -736,8 +737,7 @@ Status PipelineFragmentContext::_add_local_exchange_impl( int idx, ObjectPool* pool, PipelinePtr cur_pipe, PipelinePtr new_pip, DataDistribution data_distribution, bool* do_local_exchange, int num_buckets, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_hash_distribution) { + const std::map& shuffle_idx_to_instance_idx) { auto& operators = cur_pipe->operators(); const auto downstream_pipeline_id = cur_pipe->id(); auto local_exchange_id = next_operator_id(); @@ -785,7 +785,6 @@ Status PipelineFragmentContext::_add_local_exchange_impl( case ExchangeType::BUCKET_HASH_SHUFFLE: shared_state->exchanger = BucketShuffleExchanger::create_unique( std::max(cur_pipe->num_tasks(), _num_instances), _num_instances, num_buckets, - ignore_data_hash_distribution, _runtime_state->query_options().__isset.local_exchange_free_blocks_limit ? cast_set( _runtime_state->query_options().local_exchange_free_blocks_limit) @@ -922,8 +921,7 @@ Status PipelineFragmentContext::_add_local_exchange( int pip_idx, int idx, int node_id, ObjectPool* pool, PipelinePtr cur_pipe, DataDistribution data_distribution, bool* do_local_exchange, int num_buckets, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_distribution) { + const std::map& shuffle_idx_to_instance_idx) { if (_num_instances <= 1 || cur_pipe->num_tasks_of_parent() <= 1) { return Status::OK(); } @@ -938,7 +936,7 @@ Status PipelineFragmentContext::_add_local_exchange( auto new_pip = add_pipeline(cur_pipe, pip_idx + 1); RETURN_IF_ERROR(_add_local_exchange_impl( idx, pool, cur_pipe, new_pip, data_distribution, do_local_exchange, num_buckets, - bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx, ignore_data_distribution)); + bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx)); CHECK(total_op_num + 1 == cur_pipe->operators().size() + new_pip->operators().size()) << "total_op_num: " << total_op_num @@ -952,7 +950,7 @@ Status PipelineFragmentContext::_add_local_exchange( cast_set(new_pip->operators().size()), pool, new_pip, add_pipeline(new_pip, pip_idx + 2), DataDistribution(ExchangeType::PASSTHROUGH), do_local_exchange, num_buckets, bucket_seq_to_instance_idx, - shuffle_idx_to_instance_idx, ignore_data_distribution)); + shuffle_idx_to_instance_idx)); } return Status::OK(); } @@ -978,13 +976,8 @@ Status PipelineFragmentContext::_plan_local_exchange( // scan node. so here use `_num_instance` to replace the `num_buckets` to prevent dividing 0 // still keep colocate plan after local shuffle RETURN_IF_ERROR(_plan_local_exchange( - _pipelines[pip_idx]->operators().front()->ignore_data_hash_distribution() || - num_buckets == 0 - ? _num_instances - : num_buckets, - pip_idx, _pipelines[pip_idx], bucket_seq_to_instance_idx, - shuffle_idx_to_instance_idx, - _pipelines[pip_idx]->operators().front()->ignore_data_hash_distribution())); + _use_serial_source || num_buckets == 0 ? _num_instances : num_buckets, pip_idx, + _pipelines[pip_idx], bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx)); } return Status::OK(); } @@ -992,8 +985,7 @@ Status PipelineFragmentContext::_plan_local_exchange( Status PipelineFragmentContext::_plan_local_exchange( int num_buckets, int pip_idx, PipelinePtr pip, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_hash_distribution) { + const std::map& shuffle_idx_to_instance_idx) { int idx = 1; bool do_local_exchange = false; do { @@ -1005,8 +997,7 @@ Status PipelineFragmentContext::_plan_local_exchange( RETURN_IF_ERROR(_add_local_exchange( pip_idx, idx, ops[idx]->node_id(), _runtime_state->obj_pool(), pip, ops[idx]->required_data_distribution(), &do_local_exchange, num_buckets, - bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx, - ignore_data_hash_distribution)); + bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx)); } if (do_local_exchange) { // If local exchange is needed for current operator, we will split this pipeline to @@ -1023,8 +1014,7 @@ Status PipelineFragmentContext::_plan_local_exchange( RETURN_IF_ERROR(_add_local_exchange( pip_idx, idx, pip->sink()->node_id(), _runtime_state->obj_pool(), pip, pip->sink()->required_data_distribution(), &do_local_exchange, num_buckets, - bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx, - ignore_data_hash_distribution)); + bucket_seq_to_instance_idx, shuffle_idx_to_instance_idx)); } return Status::OK(); } @@ -1215,10 +1205,6 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo enable_query_cache ? request.fragment.query_cache_param : TQueryCacheParam {})); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case TPlanNodeType::GROUP_COMMIT_SCAN_NODE: { @@ -1229,10 +1215,6 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo op.reset(new GroupCommitOperatorX(pool, tnode, next_operator_id(), descs, _num_instances)); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case doris::TPlanNodeType::JDBC_SCAN_NODE: { @@ -1245,20 +1227,12 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo "Jdbc scan node is disabled, you can change be config enable_java_support " "to true and restart be."); } - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case doris::TPlanNodeType::FILE_SCAN_NODE: { op.reset(new FileScanOperatorX(pool, tnode, next_operator_id(), descs, _num_instances)); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case TPlanNodeType::ES_SCAN_NODE: @@ -1266,10 +1240,6 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo op.reset(new EsScanOperatorX(pool, tnode, next_operator_id(), descs, _num_instances)); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case TPlanNodeType::EXCHANGE_NODE: { @@ -1278,10 +1248,6 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo op.reset(new ExchangeSourceOperatorX(pool, tnode, next_operator_id(), descs, num_senders)); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - op->set_ignore_data_distribution(); - cur_pipe->set_num_tasks(request.parallel_instances); - } break; } case TPlanNodeType::AGGREGATION_NODE: { @@ -1643,10 +1609,6 @@ Status PipelineFragmentContext::_create_operator(ObjectPool* pool, const TPlanNo op.reset(new DataGenSourceOperatorX(pool, tnode, next_operator_id(), descs)); RETURN_IF_ERROR(cur_pipe->add_operator( op, request.__isset.parallel_instances ? request.parallel_instances : 0)); - if (request.__isset.parallel_instances) { - cur_pipe->set_num_tasks(request.parallel_instances); - op->set_ignore_data_distribution(); - } break; } case TPlanNodeType::SCHEMA_SCAN_NODE: { diff --git a/be/src/pipeline/pipeline_fragment_context.h b/be/src/pipeline/pipeline_fragment_context.h index 6caa0e5c106722..289f5c8236522f 100644 --- a/be/src/pipeline/pipeline_fragment_context.h +++ b/be/src/pipeline/pipeline_fragment_context.h @@ -153,22 +153,19 @@ class PipelineFragmentContext : public TaskExecutionContext { const std::map& shuffle_idx_to_instance_idx); Status _plan_local_exchange(int num_buckets, int pip_idx, PipelinePtr pip, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_distribution); + const std::map& shuffle_idx_to_instance_idx); void _inherit_pipeline_properties(const DataDistribution& data_distribution, PipelinePtr pipe_with_source, PipelinePtr pipe_with_sink); Status _add_local_exchange(int pip_idx, int idx, int node_id, ObjectPool* pool, PipelinePtr cur_pipe, DataDistribution data_distribution, bool* do_local_exchange, int num_buckets, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_distribution); + const std::map& shuffle_idx_to_instance_idx); Status _add_local_exchange_impl(int idx, ObjectPool* pool, PipelinePtr cur_pipe, PipelinePtr new_pip, DataDistribution data_distribution, bool* do_local_exchange, int num_buckets, const std::map& bucket_seq_to_instance_idx, - const std::map& shuffle_idx_to_instance_idx, - const bool ignore_data_hash_distribution); + const std::map& shuffle_idx_to_instance_idx); Status _build_pipeline_tasks(const doris::TPipelineFragmentParams& request, ThreadPool* thread_pool); diff --git a/be/src/pipeline/pipeline_task.cpp b/be/src/pipeline/pipeline_task.cpp index e06b8028c9c730..a8213b31ba8f47 100644 --- a/be/src/pipeline/pipeline_task.cpp +++ b/be/src/pipeline/pipeline_task.cpp @@ -247,6 +247,12 @@ bool PipelineTask::_wait_to_start() { } bool PipelineTask::_is_blocked() { + Defer defer([this] { + if (_blocked_dep != nullptr) { + _task_profile->add_info_string("TaskState", "Blocked"); + _task_profile->add_info_string("BlockedByDependency", _blocked_dep->name()); + } + }); // `_dry_run = true` means we do not need data from source operator. if (!_dry_run) { for (int i = _read_dependencies.size() - 1; i >= 0; i--) { @@ -328,6 +334,8 @@ Status PipelineTask::execute(bool* eos) { RETURN_IF_ERROR(_open()); } + _task_profile->add_info_string("TaskState", "Runnable"); + _task_profile->add_info_string("BlockedByDependency", ""); while (!_fragment_context->is_canceled()) { if (_is_blocked()) { return Status::OK(); @@ -391,6 +399,7 @@ Status PipelineTask::execute(bool* eos) { *eos = status.is() ? true : *eos; if (*eos) { // just return, the scheduler will do finish work _eos = true; + _task_profile->add_info_string("TaskState", "Finished"); return Status::OK(); } } diff --git a/be/src/runtime/descriptors.cpp b/be/src/runtime/descriptors.cpp index cc6f9050ac3915..bea11feb916f10 100644 --- a/be/src/runtime/descriptors.cpp +++ b/be/src/runtime/descriptors.cpp @@ -286,8 +286,7 @@ JdbcTableDescriptor::JdbcTableDescriptor(const TTableDescriptor& tdesc) _connection_pool_max_size(tdesc.jdbcTable.connection_pool_max_size), _connection_pool_max_wait_time(tdesc.jdbcTable.connection_pool_max_wait_time), _connection_pool_max_life_time(tdesc.jdbcTable.connection_pool_max_life_time), - _connection_pool_keep_alive(tdesc.jdbcTable.connection_pool_keep_alive), - _enable_connection_pool(tdesc.jdbcTable.enable_connection_pool) {} + _connection_pool_keep_alive(tdesc.jdbcTable.connection_pool_keep_alive) {} std::string JdbcTableDescriptor::debug_string() const { fmt::memory_buffer buf; @@ -295,14 +294,13 @@ std::string JdbcTableDescriptor::debug_string() const { buf, "JDBCTable({} ,_jdbc_catalog_id = {}, _jdbc_resource_name={} ,_jdbc_driver_url={} " ",_jdbc_driver_class={} ,_jdbc_driver_checksum={} ,_jdbc_url={} " - ",_jdbc_table_name={} ,_jdbc_user={} ,_jdbc_passwd={} " - ",_enable_connection_pool={},_connection_pool_min_size={} " + ",_jdbc_table_name={} ,_jdbc_user={} ,_jdbc_passwd={} ,_connection_pool_min_size={} " ",_connection_pool_max_size={} ,_connection_pool_max_wait_time={} " ",_connection_pool_max_life_time={} ,_connection_pool_keep_alive={})", TableDescriptor::debug_string(), _jdbc_catalog_id, _jdbc_resource_name, _jdbc_driver_url, _jdbc_driver_class, _jdbc_driver_checksum, _jdbc_url, - _jdbc_table_name, _jdbc_user, _jdbc_passwd, _enable_connection_pool, - _connection_pool_min_size, _connection_pool_max_size, _connection_pool_max_wait_time, + _jdbc_table_name, _jdbc_user, _jdbc_passwd, _connection_pool_min_size, + _connection_pool_max_size, _connection_pool_max_wait_time, _connection_pool_max_life_time, _connection_pool_keep_alive); return fmt::to_string(buf); } diff --git a/be/src/runtime/descriptors.h b/be/src/runtime/descriptors.h index b5797b0d016d75..b807c567543038 100644 --- a/be/src/runtime/descriptors.h +++ b/be/src/runtime/descriptors.h @@ -319,7 +319,6 @@ class JdbcTableDescriptor : public TableDescriptor { int32_t connection_pool_max_wait_time() const { return _connection_pool_max_wait_time; } int32_t connection_pool_max_life_time() const { return _connection_pool_max_life_time; } bool connection_pool_keep_alive() const { return _connection_pool_keep_alive; } - bool enable_connection_pool() const { return _enable_connection_pool; } private: int64_t _jdbc_catalog_id; @@ -336,7 +335,6 @@ class JdbcTableDescriptor : public TableDescriptor { int32_t _connection_pool_max_wait_time; int32_t _connection_pool_max_life_time; bool _connection_pool_keep_alive; - bool _enable_connection_pool; }; class TupleDescriptor { diff --git a/be/src/runtime/exec_env.cpp b/be/src/runtime/exec_env.cpp index c714db2d5e40fa..e41cc982a7482c 100644 --- a/be/src/runtime/exec_env.cpp +++ b/be/src/runtime/exec_env.cpp @@ -54,7 +54,10 @@ void ExecEnv::set_write_cooldown_meta_executors() { #endif // BE_TEST Result ExecEnv::get_tablet(int64_t tablet_id) { - return GetInstance()->storage_engine().get_tablet(tablet_id); + auto storage_engine = GetInstance()->_storage_engine.get(); + return storage_engine != nullptr + ? storage_engine->get_tablet(tablet_id) + : ResultError(Status::InternalError("failed to get tablet {}", tablet_id)); } const std::string& ExecEnv::token() const { diff --git a/be/src/runtime/exec_env.h b/be/src/runtime/exec_env.h index 399c2a7ce052df..fdbd6507d13472 100644 --- a/be/src/runtime/exec_env.h +++ b/be/src/runtime/exec_env.h @@ -110,6 +110,7 @@ class RowCache; class DummyLRUCache; class CacheManager; class ProcessProfile; +class HeapProfiler; class WalManager; class DNSCache; @@ -306,6 +307,7 @@ class ExecEnv { RowCache* get_row_cache() { return _row_cache; } CacheManager* get_cache_manager() { return _cache_manager; } ProcessProfile* get_process_profile() { return _process_profile; } + HeapProfiler* get_heap_profiler() { return _heap_profiler; } segment_v2::InvertedIndexSearcherCache* get_inverted_index_searcher_cache() { return _inverted_index_searcher_cache; } @@ -445,6 +447,7 @@ class ExecEnv { RowCache* _row_cache = nullptr; CacheManager* _cache_manager = nullptr; ProcessProfile* _process_profile = nullptr; + HeapProfiler* _heap_profiler = nullptr; segment_v2::InvertedIndexSearcherCache* _inverted_index_searcher_cache = nullptr; segment_v2::InvertedIndexQueryCache* _inverted_index_query_cache = nullptr; QueryCache* _query_cache = nullptr; diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp index d9eedc6d8c5dfe..3d8affade82d27 100644 --- a/be/src/runtime/exec_env_init.cpp +++ b/be/src/runtime/exec_env_init.cpp @@ -71,6 +71,7 @@ #include "runtime/load_path_mgr.h" #include "runtime/load_stream_mgr.h" #include "runtime/memory/cache_manager.h" +#include "runtime/memory/heap_profiler.h" #include "runtime/memory/mem_tracker.h" #include "runtime/memory/mem_tracker_limiter.h" #include "runtime/memory/thread_mem_tracker_mgr.h" @@ -441,8 +442,11 @@ void ExecEnv::init_file_cache_factory(std::vector& cache_paths } for (const auto& status : cache_status) { if (!status.ok()) { - LOG(FATAL) << "failed to init file cache, err: " << status; - exit(-1); + if (!doris::config::ignore_broken_disk) { + LOG(FATAL) << "failed to init file cache, err: " << status; + exit(-1); + } + LOG(WARNING) << "failed to init file cache, err: " << status; } } } @@ -452,6 +456,7 @@ Status ExecEnv::_init_mem_env() { std::stringstream ss; // 1. init mem tracker _process_profile = ProcessProfile::create_global_instance(); + _heap_profiler = HeapProfiler::create_global_instance(); init_mem_tracker(); thread_context()->thread_mem_tracker_mgr->init(); #if defined(USE_MEM_TRACKER) && !defined(__SANITIZE_ADDRESS__) && !defined(ADDRESS_SANITIZER) && \ @@ -674,7 +679,7 @@ void ExecEnv::destroy() { SAFE_STOP(_write_cooldown_meta_executors); // StorageEngine must be destoried before _page_no_cache_mem_tracker.reset and _cache_manager destory - // shouldn't use SAFE_STOP. otherwise will lead to twice stop. + SAFE_STOP(_storage_engine); _storage_engine.reset(); SAFE_STOP(_spill_stream_mgr); @@ -775,6 +780,7 @@ void ExecEnv::destroy() { SAFE_DELETE(_dns_cache); SAFE_DELETE(_process_profile); + SAFE_DELETE(_heap_profiler); _s_tracking_memory = false; diff --git a/be/src/runtime/fragment_mgr.cpp b/be/src/runtime/fragment_mgr.cpp index a12d24c76084b0..7cbb5e0f4adf6e 100644 --- a/be/src/runtime/fragment_mgr.cpp +++ b/be/src/runtime/fragment_mgr.cpp @@ -606,8 +606,6 @@ void FragmentMgr::remove_pipeline_context( .count(); g_fragment_executing_count << -1; g_fragment_last_active_time.set_value(now); - // this log will show when a query is really finished in BEs - LOG_INFO("Removing query {} fragment {}", print_id(query_id), f_context->get_fragment_id()); _pipeline_map.erase({query_id, f_context->get_fragment_id()}); } } @@ -899,11 +897,20 @@ void FragmentMgr::cancel_worker() { running_queries_on_all_fes.clear(); } + std::vector> ctx; { std::lock_guard lock(_lock); + ctx.reserve(_pipeline_map.size()); for (auto& pipeline_itr : _pipeline_map) { - pipeline_itr.second->clear_finished_tasks(); + ctx.push_back(pipeline_itr.second); } + } + for (auto& c : ctx) { + c->clear_finished_tasks(); + } + + { + std::lock_guard lock(_lock); for (auto it = _query_ctx_map.begin(); it != _query_ctx_map.end();) { if (auto q_ctx = it->second.lock()) { if (q_ctx->is_timeout(now)) { diff --git a/be/src/runtime/load_stream.cpp b/be/src/runtime/load_stream.cpp index 80cd167260c04d..88c64eb517c368 100644 --- a/be/src/runtime/load_stream.cpp +++ b/be/src/runtime/load_stream.cpp @@ -31,11 +31,14 @@ #include #include "bvar/bvar.h" +#include "cloud/config.h" #include "common/signal_handler.h" #include "exec/tablet_info.h" #include "gutil/ref_counted.h" +#include "olap/tablet.h" #include "olap/tablet_fwd.h" #include "olap/tablet_schema.h" +#include "runtime/exec_env.h" #include "runtime/fragment_mgr.h" #include "runtime/load_channel.h" #include "runtime/load_stream_mgr.h" @@ -149,6 +152,14 @@ Status TabletStream::append_data(const PStreamHeader& header, butil::IOBuf* data signal::set_signal_task_id(_load_id); g_load_stream_flush_running_threads << -1; auto st = _load_stream_writer->append_data(new_segid, header.offset(), buf, file_type); + if (!st.ok() && !config::is_cloud_mode()) { + auto res = ExecEnv::get_tablet(_id); + TabletSharedPtr tablet = + res.has_value() ? std::dynamic_pointer_cast(res.value()) : nullptr; + if (tablet) { + tablet->report_error(st); + } + } if (eos && st.ok()) { DBUG_EXECUTE_IF("TabletStream.append_data.unknown_file_type", { file_type = static_cast(-1); }); diff --git a/be/src/runtime/load_stream_writer.cpp b/be/src/runtime/load_stream_writer.cpp index 37243fab14bdb3..2e987edc7bd3ba 100644 --- a/be/src/runtime/load_stream_writer.cpp +++ b/be/src/runtime/load_stream_writer.cpp @@ -201,7 +201,7 @@ Status LoadStreamWriter::add_segment(uint32_t segid, const SegmentStatistics& st } DBUG_EXECUTE_IF("LoadStreamWriter.add_segment.size_not_match", { segment_file_size++; }); - if (segment_file_size + inverted_file_size != stat.data_size) { + if (segment_file_size != stat.data_size) { return Status::Corruption( "add_segment failed, segment stat {} does not match, file size={}, inverted file " "size={}, stat.data_size={}, tablet id={}", diff --git a/be/src/runtime/memory/heap_profiler.cpp b/be/src/runtime/memory/heap_profiler.cpp new file mode 100644 index 00000000000000..01ed82f76ef6d1 --- /dev/null +++ b/be/src/runtime/memory/heap_profiler.cpp @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "runtime/memory/heap_profiler.h" + +#ifdef USE_JEMALLOC +#include "jemalloc/jemalloc.h" +#endif +#include "agent/utils.h" +#include "common/config.h" +#include "io/fs/local_file_system.h" + +namespace doris { + +void HeapProfiler::set_prof_active(bool prof) { +#ifdef USE_JEMALLOC + std::lock_guard guard(_mutex); + try { + int err = jemallctl("prof.active", nullptr, nullptr, &prof, 1); + err |= jemallctl("prof.thread_active_init", nullptr, nullptr, &prof, 1); + if (err) { + LOG(WARNING) << "jemalloc heap profiling start failed, " << err; + } else { + LOG(WARNING) << "jemalloc heap profiling started"; + } + } catch (...) { + LOG(WARNING) << "jemalloc heap profiling start failed"; + } +#endif +} + +bool HeapProfiler::get_prof_dump(const std::string& profile_file_name) { +#ifdef USE_JEMALLOC + std::lock_guard guard(_mutex); + const char* file_name_ptr = profile_file_name.c_str(); + try { + int err = jemallctl("prof.dump", nullptr, nullptr, &file_name_ptr, sizeof(const char*)); + if (err) { + LOG(WARNING) << "dump heap profile failed, " << err; + return false; + } else { + LOG(INFO) << "dump heap profile to " << profile_file_name; + return true; + } + } catch (...) { + LOG(WARNING) << "dump heap profile failed"; + return false; + } +#else + return false; +#endif +} + +static std::string jeprof_profile_to_dot(const std::string& profile_file_name) { + AgentUtils util; + const static std::string jeprof_path = fmt::format("{}/bin/jeprof", std::getenv("DORIS_HOME")); + const static std::string binary_path = + fmt::format("{}/lib/doris_be", std::getenv("DORIS_HOME")); + // https://doris.apache.org/community/developer-guide/debug-tool/#3-jeprof-parses-heap-profile + std::string jeprof_cmd = + fmt::format("{} --dot {} {}", jeprof_path, binary_path, profile_file_name); + std::string msg; + bool rc = util.exec_cmd(jeprof_cmd, &msg); + if (!rc) { + LOG(WARNING) << "jeprof profile to dot failed: " << msg; + } + return msg; +} + +void HeapProfiler::heap_profiler_start() { + set_prof_active(true); +} + +void HeapProfiler::heap_profiler_stop() { + set_prof_active(false); +} + +bool HeapProfiler::check_heap_profiler() { +#ifdef USE_JEMALLOC + size_t value = 0; + size_t sz = sizeof(value); + jemallctl("prof.active", &value, &sz, nullptr, 0); + return value; +#else + return false; +#endif +} + +std::string HeapProfiler::dump_heap_profile() { + if (!config::jeprofile_dir.empty()) { + auto st = io::global_local_filesystem()->create_directory(config::jeprofile_dir); + if (!st.ok()) { + LOG(WARNING) << "create jeprofile dir failed."; + return ""; + } + } + std::string profile_file_name = + fmt::format("{}/jeheap_dump.{}.{}.{}.heap", config::jeprofile_dir, std::time(nullptr), + getpid(), rand()); + if (get_prof_dump(profile_file_name)) { + return profile_file_name; + } else { + return ""; + } +} + +std::string HeapProfiler::dump_heap_profile_to_dot() { + std::string profile_file_name = dump_heap_profile(); + if (!profile_file_name.empty()) { + return jeprof_profile_to_dot(profile_file_name); + } else { + return ""; + } +} + +} // namespace doris diff --git a/be/src/runtime/memory/heap_profiler.h b/be/src/runtime/memory/heap_profiler.h new file mode 100644 index 00000000000000..7f156351200b3a --- /dev/null +++ b/be/src/runtime/memory/heap_profiler.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "runtime/exec_env.h" + +namespace doris { + +class HeapProfiler { +public: + static HeapProfiler* create_global_instance() { return new HeapProfiler(); } + static HeapProfiler* instance() { return ExecEnv::GetInstance()->get_heap_profiler(); } + HeapProfiler() = default; + + void heap_profiler_start(); + void heap_profiler_stop(); + bool check_heap_profiler(); + std::string dump_heap_profile(); + std::string dump_heap_profile_to_dot(); + +private: + void set_prof_active(bool prof); + bool get_prof_dump(const std::string& profile_file_name); + + std::mutex _mutex; +}; + +} // namespace doris diff --git a/be/src/runtime/runtime_filter_mgr.cpp b/be/src/runtime/runtime_filter_mgr.cpp index 08a229c0ecf72b..77d2097d20c010 100644 --- a/be/src/runtime/runtime_filter_mgr.cpp +++ b/be/src/runtime/runtime_filter_mgr.cpp @@ -29,6 +29,7 @@ #include #include +#include "common/config.h" #include "common/logging.h" #include "common/status.h" #include "exprs/bloom_filter_func.h" @@ -343,8 +344,10 @@ Status RuntimeFilterMergeControllerEntity::send_filter_size(const PSendFilterSiz auto* pquery_id = closure->request_->mutable_query_id(); pquery_id->set_hi(_state->query_id.hi()); pquery_id->set_lo(_state->query_id.lo()); - closure->cntl_->set_timeout_ms(std::min(3600, _state->execution_timeout) * 1000); - closure->cntl_->ignore_eovercrowded(); + closure->cntl_->set_timeout_ms(get_execution_rpc_timeout_ms(_state->execution_timeout)); + if (config::execution_ignore_eovercrowded) { + closure->cntl_->ignore_eovercrowded(); + } closure->request_->set_filter_id(filter_id); closure->request_->set_filter_size(cnt_val->global_size); @@ -456,8 +459,12 @@ Status RuntimeFilterMergeControllerEntity::merge(const PMergeFilterRequest* requ if (has_attachment) { closure->cntl_->request_attachment().append(request_attachment); } - closure->cntl_->set_timeout_ms(std::min(3600, _state->execution_timeout) * 1000); - closure->cntl_->ignore_eovercrowded(); + + closure->cntl_->set_timeout_ms(get_execution_rpc_timeout_ms(_state->execution_timeout)); + if (config::execution_ignore_eovercrowded) { + closure->cntl_->ignore_eovercrowded(); + } + // set fragment-id if (target.__isset.target_fragment_ids) { for (auto& target_fragment_id : target.target_fragment_ids) { diff --git a/be/src/runtime/runtime_state.h b/be/src/runtime/runtime_state.h index 34ce79ec7a749a..abc823bc25b291 100644 --- a/be/src/runtime/runtime_state.h +++ b/be/src/runtime/runtime_state.h @@ -38,6 +38,7 @@ #include "agent/be_exec_version_manager.h" #include "cctz/time_zone.h" #include "common/compiler_util.h" // IWYU pragma: keep +#include "common/config.h" #include "common/factory_creator.h" #include "common/status.h" #include "gutil/integral_types.h" @@ -51,6 +52,10 @@ namespace doris { class IRuntimeFilter; +inline int32_t get_execution_rpc_timeout_ms(int32_t execution_timeout_sec) { + return std::min(config::execution_max_rpc_timeout_sec, execution_timeout_sec) * 1000; +} + namespace pipeline { class PipelineXLocalStateBase; class PipelineXSinkLocalStateBase; diff --git a/be/src/runtime/stream_load/stream_load_context.h b/be/src/runtime/stream_load/stream_load_context.h index 9d1601372f877d..93f76fad4e613c 100644 --- a/be/src/runtime/stream_load/stream_load_context.h +++ b/be/src/runtime/stream_load/stream_load_context.h @@ -164,9 +164,10 @@ class StreamLoadContext { // the following members control the max progress of a consuming // process. if any of them reach, the consuming will finish. - int64_t max_interval_s = 5; - int64_t max_batch_rows = 100000; - int64_t max_batch_size = 100 * 1024 * 1024; // 100MB + // same as values set in fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java + int64_t max_interval_s = 60; + int64_t max_batch_rows = 20000000; + int64_t max_batch_size = 1024 * 1024 * 1024; // 1GB // for parse json-data std::string data_format = ""; diff --git a/be/src/service/http_service.cpp b/be/src/service/http_service.cpp index 9330867ded65a1..7704d07b6f9477 100644 --- a/be/src/service/http_service.cpp +++ b/be/src/service/http_service.cpp @@ -203,7 +203,20 @@ Status HttpService::start() { static_cast(PprofActions::setup(_env, _ev_http_server.get(), _pool)); // register jeprof actions - static_cast(JeprofileActions::setup(_env, _ev_http_server.get(), _pool)); + SetJeHeapProfileActiveActions* set_jeheap_profile_active_action = + _pool.add(new SetJeHeapProfileActiveActions(_env)); + _ev_http_server->register_handler(HttpMethod::GET, "/jeheap/active/{prof_value}", + set_jeheap_profile_active_action); + + DumpJeHeapProfileToDotActions* dump_jeheap_profile_to_dot_action = + _pool.add(new DumpJeHeapProfileToDotActions(_env)); + _ev_http_server->register_handler(HttpMethod::GET, "/jeheap/dump", + dump_jeheap_profile_to_dot_action); + + DumpJeHeapProfileActions* dump_jeheap_profile_action = + _pool.add(new DumpJeHeapProfileActions(_env)); + _ev_http_server->register_handler(HttpMethod::GET, "/jeheap/dump_only", + dump_jeheap_profile_action); // register metrics { diff --git a/be/src/util/arrow/row_batch.cpp b/be/src/util/arrow/row_batch.cpp index 2c6ed52ddde65f..0cbb6bcd0c8916 100644 --- a/be/src/util/arrow/row_batch.cpp +++ b/be/src/util/arrow/row_batch.cpp @@ -157,17 +157,8 @@ Status convert_to_arrow_type(const TypeDescriptor& type, std::shared_ptr* field, - const std::string& timezone) { - std::shared_ptr type; - RETURN_IF_ERROR(convert_to_arrow_type(desc->type(), &type, timezone)); - *field = arrow::field(desc->col_name(), type, desc->is_nullable()); - return Status::OK(); -} - -Status convert_block_arrow_schema(const vectorized::Block& block, - std::shared_ptr* result, - const std::string& timezone) { +Status get_arrow_schema(const vectorized::Block& block, std::shared_ptr* result, + const std::string& timezone) { std::vector> fields; for (const auto& type_and_name : block) { std::shared_ptr arrow_type; @@ -180,21 +171,6 @@ Status convert_block_arrow_schema(const vectorized::Block& block, return Status::OK(); } -Status convert_to_arrow_schema(const RowDescriptor& row_desc, - std::shared_ptr* result, - const std::string& timezone) { - std::vector> fields; - for (auto tuple_desc : row_desc.tuple_descriptors()) { - for (auto desc : tuple_desc->slots()) { - std::shared_ptr field; - RETURN_IF_ERROR(convert_to_arrow_field(desc, &field, timezone)); - fields.push_back(field); - } - } - *result = arrow::schema(std::move(fields)); - return Status::OK(); -} - Status convert_expr_ctxs_arrow_schema(const vectorized::VExprContextSPtrs& output_vexpr_ctxs, std::shared_ptr* result, const std::string& timezone) { diff --git a/be/src/util/arrow/row_batch.h b/be/src/util/arrow/row_batch.h index 9a33719a1cfbcc..3993003baf6e95 100644 --- a/be/src/util/arrow/row_batch.h +++ b/be/src/util/arrow/row_batch.h @@ -45,12 +45,8 @@ Status convert_to_arrow_type(const TypeDescriptor& type, std::shared_ptr* result, const std::string& timezone); - -Status convert_block_arrow_schema(const vectorized::Block& block, - std::shared_ptr* result, - const std::string& timezone); +Status get_arrow_schema(const vectorized::Block& block, std::shared_ptr* result, + const std::string& timezone); Status convert_expr_ctxs_arrow_schema(const vectorized::VExprContextSPtrs& output_vexpr_ctxs, std::shared_ptr* result, diff --git a/be/src/util/hash_util.hpp b/be/src/util/hash_util.hpp index dc70b1c9f9c40b..e9ac72c5ccdcb4 100644 --- a/be/src/util/hash_util.hpp +++ b/be/src/util/hash_util.hpp @@ -134,7 +134,7 @@ class HashUtil { static const uint32_t MURMUR3_32_SEED = 104729; // modify from https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp - static uint32_t murmur_hash3_32(const void* key, int32_t len, uint32_t seed) { + static uint32_t murmur_hash3_32(const void* key, int64_t len, uint32_t seed) { uint32_t out = 0; murmur_hash3_x86_32(key, len, seed, &out); return out; @@ -227,7 +227,7 @@ class HashUtil { // Our hash function is MurmurHash2, 64 bit version. // It was modified in order to provide the same result in // big and little endian archs (endian neutral). - static uint64_t murmur_hash64A(const void* key, int32_t len, unsigned int seed) { + static uint64_t murmur_hash64A(const void* key, int64_t len, unsigned int seed) { const uint64_t m = MURMUR_PRIME; const int r = 47; uint64_t h = seed ^ (len * m); diff --git a/be/src/util/jni-util.cpp b/be/src/util/jni-util.cpp index 02d20ed9a4fe80..6ad0790ef0859e 100644 --- a/be/src/util/jni-util.cpp +++ b/be/src/util/jni-util.cpp @@ -317,6 +317,7 @@ Status JniUtil::GetJniExceptionMsg(JNIEnv* env, bool log_stack, const string& pr } jobject JniUtil::convert_to_java_map(JNIEnv* env, const std::map& map) { + //TODO: ADD EXCEPTION CHECK. jclass hashmap_class = env->FindClass("java/util/HashMap"); jmethodID hashmap_constructor = env->GetMethodID(hashmap_class, "", "(I)V"); jobject hashmap_object = env->NewObject(hashmap_class, hashmap_constructor, map.size()); @@ -399,16 +400,26 @@ std::map JniUtil::convert_to_cpp_map(JNIEnv* env, jobj Status JniUtil::GetGlobalClassRef(JNIEnv* env, const char* class_str, jclass* class_ref) { *class_ref = NULL; - jclass local_cl = env->FindClass(class_str); - RETURN_ERROR_IF_EXC(env); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF(jclass, local_cl, env, FindClass(class_str)); RETURN_IF_ERROR(LocalToGlobalRef(env, local_cl, reinterpret_cast(class_ref))); - env->DeleteLocalRef(local_cl); - RETURN_ERROR_IF_EXC(env); return Status::OK(); } Status JniUtil::LocalToGlobalRef(JNIEnv* env, jobject local_ref, jobject* global_ref) { *global_ref = env->NewGlobalRef(local_ref); + // NewGlobalRef: + // Returns a global reference to the given obj. + // + //May return NULL if: + // obj refers to null + // the system has run out of memory + // obj was a weak global reference and has already been garbage collected + if (*global_ref == NULL) { + return Status::InternalError( + "LocalToGlobalRef fail,global ref is NULL,maybe the system has run out of memory."); + } + + //NewGlobalRef not throw exception,maybe we just need check NULL. RETURN_ERROR_IF_EXC(env); return Status::OK(); } diff --git a/be/src/util/jni-util.h b/be/src/util/jni-util.h index 666a5e526dfbda..df332951afebb8 100644 --- a/be/src/util/jni-util.h +++ b/be/src/util/jni-util.h @@ -28,6 +28,7 @@ #include "common/status.h" #include "jni_md.h" +#include "util/defer_op.h" #include "util/thrift_util.h" #ifdef USE_HADOOP_HDFS @@ -38,12 +39,25 @@ extern "C" JNIEnv* getJNIEnv(void); namespace doris { class JniUtil; -#define RETURN_ERROR_IF_EXC(env) \ - do { \ - jthrowable exc = (env)->ExceptionOccurred(); \ - if (exc != nullptr) return JniUtil::GetJniExceptionMsg(env); \ +#define RETURN_ERROR_IF_EXC(env) \ + do { \ + if (env->ExceptionCheck()) [[unlikely]] \ + return JniUtil::GetJniExceptionMsg(env); \ } while (false) +#define JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF(type, result, env, func) \ + type result = env->func; \ + DEFER(env->DeleteLocalRef(result)); \ + RETURN_ERROR_IF_EXC(env) + +#define JNI_CALL_METHOD_CHECK_EXCEPTION(type, result, env, func) \ + type result = env->func; \ + RETURN_ERROR_IF_EXC(env) + +//In order to reduce the potential risks caused by not handling exceptions, +// you need to refer to https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/functions.html +// to confirm whether the jni method will throw an exception. + class JniUtil { public: static Status Init() WARN_UNUSED_RESULT; @@ -65,6 +79,10 @@ class JniUtil { return Status::OK(); } + //jclass is generally a local reference. + //Method ID and field ID values are forever. + //If you want to use the jclass across multiple threads or multiple calls into the JNI code you need + // to create a global reference to it with GetGlobalClassRef(). static Status GetGlobalClassRef(JNIEnv* env, const char* class_str, jclass* class_ref) WARN_UNUSED_RESULT; diff --git a/be/src/util/jvm_metrics.cpp b/be/src/util/jvm_metrics.cpp index fc30d1073acdc6..4cb71f5e827878 100644 --- a/be/src/util/jvm_metrics.cpp +++ b/be/src/util/jvm_metrics.cpp @@ -22,7 +22,9 @@ #include #include "common/config.h" +#include "util/defer_op.h" #include "util/metrics.h" + namespace doris { #define DEFINE_JVM_SIZE_BYTES_METRIC(name, type) \ @@ -90,9 +92,13 @@ JvmMetrics::JvmMetrics(MetricRegistry* registry, JNIEnv* env) { break; } try { - _jvm_stats.init(env); + Status st = _jvm_stats.init(env); + if (!st) { + LOG(WARNING) << "jvm Stats Init Fail. " << st.to_string(); + break; + } } catch (...) { - LOG(WARNING) << "JVM STATS INIT FAIL"; + LOG(WARNING) << "jvm Stats Throw Exception Init Fail."; break; } if (!_jvm_stats.init_complete()) { @@ -133,21 +139,22 @@ JvmMetrics::JvmMetrics(MetricRegistry* registry, JNIEnv* env) { void JvmMetrics::update() { static long fail_count = 0; - bool have_exception = false; try { - _jvm_stats.refresh(this); + Status st = _jvm_stats.refresh(this); + if (!st) { + fail_count++; + LOG(WARNING) << "Jvm Stats update Fail! " << st.to_string(); + } else { + fail_count = 0; + } } catch (...) { - have_exception = true; - LOG(WARNING) << "JVM MONITOR UPDATE FAIL!"; + LOG(WARNING) << "Jvm Stats update throw Exception!"; fail_count++; } //When 30 consecutive exceptions occur, turn off jvm information collection. - if (!have_exception) { - fail_count = 0; - } if (fail_count >= 30) { - LOG(WARNING) << "JVM MONITOR CLOSE!"; + LOG(WARNING) << "Jvm Stats CLOSE!"; _jvm_stats.set_complete(false); _server_entity->deregister_hook(_s_hook_name); @@ -182,193 +189,257 @@ void JvmMetrics::update() { } } -void JvmStats::init(JNIEnv* ENV) { - env = ENV; - _managementFactoryClass = env->FindClass("java/lang/management/ManagementFactory"); - if (_managementFactoryClass == nullptr) { - LOG(WARNING) - << "Class java/lang/management/ManagementFactory Not Find.JVM monitoring fails."; - return; - } +Status JvmStats::init(JNIEnv* env) { + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/ManagementFactory", + &_managementFactoryClass)); - _getMemoryMXBeanMethod = env->GetStaticMethodID(_managementFactoryClass, "getMemoryMXBean", - "()Ljava/lang/management/MemoryMXBean;"); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryMXBeanMethod, env, + GetStaticMethodID(_managementFactoryClass, "getMemoryMXBean", + "()Ljava/lang/management/MemoryMXBean;")); - _memoryUsageClass = env->FindClass("java/lang/management/MemoryUsage"); - if (_memoryUsageClass == nullptr) { - LOG(WARNING) << "Class java/lang/management/MemoryUsage Not Find.JVM monitoring fails."; - return; - } - _getMemoryUsageUsedMethod = env->GetMethodID(_memoryUsageClass, "getUsed", "()J"); - _getMemoryUsageCommittedMethod = env->GetMethodID(_memoryUsageClass, "getCommitted", "()J"); - _getMemoryUsageMaxMethod = env->GetMethodID(_memoryUsageClass, "getMax", "()J"); + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/MemoryUsage", + &_memoryUsageClass)); - _memoryMXBeanClass = env->FindClass("java/lang/management/MemoryMXBean"); - if (_memoryMXBeanClass == nullptr) { - LOG(WARNING) << "Class java/lang/management/MemoryMXBean Not Find.JVM monitoring fails."; - return; - } - _getHeapMemoryUsageMethod = env->GetMethodID(_memoryMXBeanClass, "getHeapMemoryUsage", - "()Ljava/lang/management/MemoryUsage;"); - _getNonHeapMemoryUsageMethod = env->GetMethodID(_memoryMXBeanClass, "getNonHeapMemoryUsage", - "()Ljava/lang/management/MemoryUsage;"); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryUsageUsedMethod, env, + GetMethodID(_memoryUsageClass, "getUsed", "()J")); - _getMemoryPoolMXBeansMethod = env->GetStaticMethodID( - _managementFactoryClass, "getMemoryPoolMXBeans", "()Ljava/util/List;"); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryUsageCommittedMethod, env, + GetMethodID(_memoryUsageClass, "getCommitted", "()J")); - _listClass = env->FindClass("java/util/List"); - if (_listClass == nullptr) { - LOG(WARNING) << "Class java/util/List Not Find.JVM monitoring fails."; - return; - } - _getListSizeMethod = env->GetMethodID(_listClass, "size", "()I"); - _getListUseIndexMethod = env->GetMethodID(_listClass, "get", "(I)Ljava/lang/Object;"); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryUsageMaxMethod, env, + GetMethodID(_memoryUsageClass, "getMax", "()J")); - _memoryPoolMXBeanClass = env->FindClass("java/lang/management/MemoryPoolMXBean"); - if (_memoryPoolMXBeanClass == nullptr) { - LOG(WARNING) - << "Class java/lang/management/MemoryPoolMXBean Not Find.JVM monitoring fails."; - return; - } - _getMemoryPoolMXBeanUsageMethod = env->GetMethodID(_memoryPoolMXBeanClass, "getUsage", - "()Ljava/lang/management/MemoryUsage;"); - _getMemoryPollMXBeanPeakMethod = env->GetMethodID(_memoryPoolMXBeanClass, "getPeakUsage", - "()Ljava/lang/management/MemoryUsage;"); - _getMemoryPollMXBeanNameMethod = - env->GetMethodID(_memoryPoolMXBeanClass, "getName", "()Ljava/lang/String;"); - - _getThreadMXBeanMethod = env->GetStaticMethodID(_managementFactoryClass, "getThreadMXBean", - "()Ljava/lang/management/ThreadMXBean;"); - - _getGarbageCollectorMXBeansMethod = env->GetStaticMethodID( - _managementFactoryClass, "getGarbageCollectorMXBeans", "()Ljava/util/List;"); - - _garbageCollectorMXBeanClass = env->FindClass("java/lang/management/GarbageCollectorMXBean"); - if (_garbageCollectorMXBeanClass == nullptr) { - LOG(WARNING) << "Class java/lang/management/GarbageCollectorMXBean Not Find.JVM monitoring " - "fails."; - return; - } - _getGCNameMethod = - env->GetMethodID(_garbageCollectorMXBeanClass, "getName", "()Ljava/lang/String;"); - _getGCCollectionCountMethod = - env->GetMethodID(_garbageCollectorMXBeanClass, "getCollectionCount", "()J"); - _getGCCollectionTimeMethod = - env->GetMethodID(_garbageCollectorMXBeanClass, "getCollectionTime", "()J"); - - _threadMXBeanClass = env->FindClass("java/lang/management/ThreadMXBean"); - if (_threadMXBeanClass == nullptr) { - LOG(WARNING) << "Class java/lang/management/ThreadMXBean Not Find.JVM monitoring fails."; - return; - } - _getAllThreadIdsMethod = env->GetMethodID(_threadMXBeanClass, "getAllThreadIds", "()[J"); - _getThreadInfoMethod = env->GetMethodID(_threadMXBeanClass, "getThreadInfo", - "([JI)[Ljava/lang/management/ThreadInfo;"); - _getPeakThreadCountMethod = env->GetMethodID(_threadMXBeanClass, "getPeakThreadCount", "()I"); - - _threadInfoClass = env->FindClass("java/lang/management/ThreadInfo"); - if (_threadInfoClass == nullptr) { - LOG(WARNING) << "Class java/lang/management/ThreadInfo Not Find.JVM monitoring fails."; - return; - } + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/MemoryMXBean", + &_memoryMXBeanClass)); - _getThreadStateMethod = - env->GetMethodID(_threadInfoClass, "getThreadState", "()Ljava/lang/Thread$State;"); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getHeapMemoryUsageMethod, env, + GetMethodID(_memoryMXBeanClass, "getHeapMemoryUsage", + "()Ljava/lang/management/MemoryUsage;")); + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getNonHeapMemoryUsageMethod, env, + GetMethodID(_memoryMXBeanClass, "getNonHeapMemoryUsage", + "()Ljava/lang/management/MemoryUsage;")); - _threadStateClass = env->FindClass("java/lang/Thread$State"); - if (_threadStateClass == nullptr) { - LOG(WARNING) << "Class java/lang/Thread$State Not Find.JVM monitoring fails."; - return; - } + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getMemoryPoolMXBeansMethod, env, + GetStaticMethodID(_managementFactoryClass, "getMemoryPoolMXBeans", + "()Ljava/util/List;")); - jfieldID newThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "NEW", "Ljava/lang/Thread$State;"); - jfieldID runnableThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "RUNNABLE", "Ljava/lang/Thread$State;"); - jfieldID blockedThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "BLOCKED", "Ljava/lang/Thread$State;"); - jfieldID waitingThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "WAITING", "Ljava/lang/Thread$State;"); - jfieldID timedWaitingThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "TIMED_WAITING", "Ljava/lang/Thread$State;"); - jfieldID terminatedThreadFieldID = - env->GetStaticFieldID(_threadStateClass, "TERMINATED", "Ljava/lang/Thread$State;"); - - _newThreadStateObj = env->GetStaticObjectField(_threadStateClass, newThreadFieldID); - _runnableThreadStateObj = env->GetStaticObjectField(_threadStateClass, runnableThreadFieldID); - _blockedThreadStateObj = env->GetStaticObjectField(_threadStateClass, blockedThreadFieldID); - _waitingThreadStateObj = env->GetStaticObjectField(_threadStateClass, waitingThreadFieldID); - _timedWaitingThreadStateObj = - env->GetStaticObjectField(_threadStateClass, timedWaitingThreadFieldID); - _terminatedThreadStateObj = - env->GetStaticObjectField(_threadStateClass, terminatedThreadFieldID); + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/util/List", &_listClass)); - LOG(INFO) << "Start JVM monitoring."; + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getListSizeMethod, env, + GetMethodID(_listClass, "size", "()I")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getListUseIndexMethod, env, + GetMethodID(_listClass, "get", "(I)Ljava/lang/Object;")); + + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/MemoryPoolMXBean", + &_memoryPoolMXBeanClass)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryPoolMXBeanUsageMethod, env, + GetMethodID(_memoryPoolMXBeanClass, "getUsage", + "()Ljava/lang/management/MemoryUsage;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getMemoryPollMXBeanPeakMethod, env, + GetMethodID(_memoryPoolMXBeanClass, "getPeakUsage", + "()Ljava/lang/management/MemoryUsage;")); + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getMemoryPollMXBeanNameMethod, env, + GetMethodID(_memoryPoolMXBeanClass, "getName", "()Ljava/lang/String;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, _getThreadMXBeanMethod, env, + GetStaticMethodID(_managementFactoryClass, "getThreadMXBean", + "()Ljava/lang/management/ThreadMXBean;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getGarbageCollectorMXBeansMethod, env, + GetStaticMethodID(_managementFactoryClass, "getGarbageCollectorMXBeans", + "()Ljava/util/List;")); + + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/GarbageCollectorMXBean", + &_garbageCollectorMXBeanClass)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getGCNameMethod, env, + GetMethodID(_garbageCollectorMXBeanClass, "getName", "()Ljava/lang/String;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getGCCollectionCountMethod, env, + GetMethodID(_garbageCollectorMXBeanClass, "getCollectionCount", "()J")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _getGCCollectionTimeMethod, env, + GetMethodID(_garbageCollectorMXBeanClass, "getCollectionTime", "()J")); + + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/management/ThreadMXBean", + &_threadMXBeanClass)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, + + _getAllThreadIdsMethod, env, + GetMethodID(_threadMXBeanClass, "getAllThreadIds", "()[J")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, + + _getThreadInfoMethod, env, + GetMethodID(_threadMXBeanClass, "getThreadInfo", + "([JI)[Ljava/lang/management/ThreadInfo;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(, + + _getPeakThreadCountMethod, env, + GetMethodID(_threadMXBeanClass, "getPeakThreadCount", "()I")); + + RETURN_IF_ERROR( + JniUtil::GetGlobalClassRef(env, "java/lang/management/ThreadInfo", &_threadInfoClass)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , + + _getThreadStateMethod, env, + GetMethodID(_threadInfoClass, "getThreadState", "()Ljava/lang/Thread$State;")); + + RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, "java/lang/Thread$State", &_threadStateClass)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, newThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "NEW", "Ljava/lang/Thread$State;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, runnableThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "RUNNABLE", "Ljava/lang/Thread$State;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, blockedThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "BLOCKED", "Ljava/lang/Thread$State;")); + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, waitingThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "WAITING", "Ljava/lang/Thread$State;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, timedWaitingThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "TIMED_WAITING", "Ljava/lang/Thread$State;")); + JNI_CALL_METHOD_CHECK_EXCEPTION( + jfieldID, terminatedThreadFieldID, env, + GetStaticFieldID(_threadStateClass, "TERMINATED", "Ljava/lang/Thread$State;")); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jobject, newThreadStateObj, env, + GetStaticObjectField(_threadStateClass, newThreadFieldID)); + RETURN_IF_ERROR(JniUtil::LocalToGlobalRef(env, newThreadStateObj, &_newThreadStateObj)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jobject, runnableThreadStateObj, env, + GetStaticObjectField(_threadStateClass, runnableThreadFieldID)); + RETURN_IF_ERROR( + JniUtil::LocalToGlobalRef(env, runnableThreadStateObj, &_runnableThreadStateObj)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jobject, blockedThreadStateObj, env, + GetStaticObjectField(_threadStateClass, blockedThreadFieldID)); + RETURN_IF_ERROR(JniUtil::LocalToGlobalRef(env, blockedThreadStateObj, &_blockedThreadStateObj)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jobject, waitingThreadStateObj, env, + GetStaticObjectField(_threadStateClass, waitingThreadFieldID)); + RETURN_IF_ERROR(JniUtil::LocalToGlobalRef(env, waitingThreadStateObj, &_waitingThreadStateObj)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jobject, timedWaitingThreadStateObj, env, + GetStaticObjectField(_threadStateClass, timedWaitingThreadFieldID)); + RETURN_IF_ERROR(JniUtil::LocalToGlobalRef(env, timedWaitingThreadStateObj, + &_timedWaitingThreadStateObj)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jobject, terminatedThreadStateObj, env, + GetStaticObjectField(_threadStateClass, terminatedThreadFieldID)); + RETURN_IF_ERROR( + JniUtil::LocalToGlobalRef(env, terminatedThreadStateObj, &_terminatedThreadStateObj)); _init_complete = true; - return; + + LOG(INFO) << "Start JVM monitoring."; + return Status::OK(); } -void JvmStats::refresh(JvmMetrics* jvm_metrics) { +Status JvmStats::refresh(JvmMetrics* jvm_metrics) const { if (!_init_complete) { - return; + return Status::InternalError("Jvm Stats not init complete."); } - Status st = JniUtil::GetJNIEnv(&env); - if (!st.ok()) { - LOG(WARNING) << "JVM STATS GET JNI ENV FAIL"; - return; - } + JNIEnv* env = nullptr; + RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); + + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, memoryMXBeanObj, env, + CallStaticObjectMethod(_managementFactoryClass, _getMemoryMXBeanMethod)); + + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, heapMemoryUsageObj, env, + CallObjectMethod(memoryMXBeanObj, _getHeapMemoryUsageMethod)); - jobject memoryMXBeanObj = - env->CallStaticObjectMethod(_managementFactoryClass, _getMemoryMXBeanMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, heapMemoryUsed, env, + CallLongMethod(heapMemoryUsageObj, _getMemoryUsageUsedMethod)); - jobject heapMemoryUsageObj = env->CallObjectMethod(memoryMXBeanObj, _getHeapMemoryUsageMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION( + jlong, heapMemoryCommitted, env, + CallLongMethod(heapMemoryUsageObj, _getMemoryUsageCommittedMethod)); - jlong heapMemoryUsed = env->CallLongMethod(heapMemoryUsageObj, _getMemoryUsageUsedMethod); - jlong heapMemoryCommitted = - env->CallLongMethod(heapMemoryUsageObj, _getMemoryUsageCommittedMethod); - jlong heapMemoryMax = env->CallLongMethod(heapMemoryUsageObj, _getMemoryUsageMaxMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, heapMemoryMax, env, + CallLongMethod(heapMemoryUsageObj, _getMemoryUsageMaxMethod)); jvm_metrics->jvm_heap_size_bytes_used->set_value(heapMemoryUsed < 0 ? 0 : heapMemoryUsed); jvm_metrics->jvm_heap_size_bytes_committed->set_value( heapMemoryCommitted < 0 ? 0 : heapMemoryCommitted); jvm_metrics->jvm_heap_size_bytes_max->set_value(heapMemoryMax < 0 ? 0 : heapMemoryMax); - jobject nonHeapMemoryUsageObj = - env->CallObjectMethod(memoryMXBeanObj, _getNonHeapMemoryUsageMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, nonHeapMemoryUsageObj, env, + CallObjectMethod(memoryMXBeanObj, _getNonHeapMemoryUsageMethod)); - jlong nonHeapMemoryCommitted = - env->CallLongMethod(nonHeapMemoryUsageObj, _getMemoryUsageCommittedMethod); - jlong nonHeapMemoryUsed = env->CallLongMethod(nonHeapMemoryUsageObj, _getMemoryUsageUsedMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION( + jlong, nonHeapMemoryCommitted, env, + CallLongMethod(nonHeapMemoryUsageObj, _getMemoryUsageCommittedMethod)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + jlong, nonHeapMemoryUsed, env, + CallLongMethod(nonHeapMemoryUsageObj, _getMemoryUsageUsedMethod)); jvm_metrics->jvm_non_heap_size_bytes_committed->set_value( nonHeapMemoryCommitted < 0 ? 0 : nonHeapMemoryCommitted); jvm_metrics->jvm_non_heap_size_bytes_used->set_value(nonHeapMemoryUsed < 0 ? 0 : nonHeapMemoryUsed); - jobject memoryPoolMXBeansList = - env->CallStaticObjectMethod(_managementFactoryClass, _getMemoryPoolMXBeansMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, memoryPoolMXBeansList, env, + CallStaticObjectMethod(_managementFactoryClass, _getMemoryPoolMXBeansMethod)); - jint size = env->CallIntMethod(memoryPoolMXBeansList, _getListSizeMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION(jint, size, env, + CallIntMethod(memoryPoolMXBeansList, _getListSizeMethod)); for (int i = 0; i < size; ++i) { - jobject memoryPoolMXBean = - env->CallObjectMethod(memoryPoolMXBeansList, _getListUseIndexMethod, i); - jobject usageObject = - env->CallObjectMethod(memoryPoolMXBean, _getMemoryPoolMXBeanUsageMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, memoryPoolMXBean, env, + CallObjectMethod(memoryPoolMXBeansList, _getListUseIndexMethod, i)); + + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, usageObject, env, + CallObjectMethod(memoryPoolMXBean, _getMemoryPoolMXBeanUsageMethod)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, used, env, + CallLongMethod(usageObject, _getMemoryUsageUsedMethod)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, max, env, + CallLongMethod(usageObject, _getMemoryUsageMaxMethod)); - jlong used = env->CallLongMethod(usageObject, _getMemoryUsageUsedMethod); - jlong max = env->CallLongMethod(usageObject, _getMemoryUsageMaxMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, peakUsageObject, env, + CallObjectMethod(memoryPoolMXBean, _getMemoryPollMXBeanPeakMethod)); - jobject peakUsageObject = - env->CallObjectMethod(memoryPoolMXBean, _getMemoryPollMXBeanPeakMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, peakUsed, env, + CallLongMethod(peakUsageObject, _getMemoryUsageUsedMethod)); - jlong peakUsed = env->CallLongMethod(peakUsageObject, _getMemoryUsageUsedMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, name, env, + CallObjectMethod(memoryPoolMXBean, _getMemoryPollMXBeanNameMethod)); - jstring name = - (jstring)env->CallObjectMethod(memoryPoolMXBean, _getMemoryPollMXBeanNameMethod); - const char* nameStr = env->GetStringUTFChars(name, nullptr); + const char* nameStr = env->GetStringUTFChars( + (jstring)name, nullptr); // GetStringUTFChars not throw exception if (nameStr != nullptr) { auto it = _memoryPoolName.find(nameStr); if (it == _memoryPoolName.end()) { @@ -385,36 +456,46 @@ void JvmStats::refresh(JvmMetrics* jvm_metrics) { jvm_metrics->jvm_old_size_bytes_max->set_value(max < 0 ? 0 : max); } - env->ReleaseStringUTFChars(name, nameStr); + env->ReleaseStringUTFChars((jstring)name, + nameStr); // ReleaseStringUTFChars not throw exception } - env->DeleteLocalRef(memoryPoolMXBean); - env->DeleteLocalRef(usageObject); - env->DeleteLocalRef(peakUsageObject); } + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, threadMXBean, env, + CallStaticObjectMethod(_managementFactoryClass, _getThreadMXBeanMethod)); - jobject threadMXBean = - env->CallStaticObjectMethod(_managementFactoryClass, _getThreadMXBeanMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, threadIdsObject, env, CallObjectMethod(threadMXBean, _getAllThreadIdsMethod)); - jlongArray threadIds = (jlongArray)env->CallObjectMethod(threadMXBean, _getAllThreadIdsMethod); - jint threadCount = env->GetArrayLength(threadIds); + auto threadIds = (jlongArray)threadIdsObject; - jobjectArray threadInfos = - (jobjectArray)env->CallObjectMethod(threadMXBean, _getThreadInfoMethod, threadIds, 0); + JNI_CALL_METHOD_CHECK_EXCEPTION(jint, threadCount, env, GetArrayLength(threadIds)); + + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, threadInfos, env, + CallObjectMethod(threadMXBean, _getThreadInfoMethod, (jlongArray)threadIds, 0)); int threadsNew = 0, threadsRunnable = 0, threadsBlocked = 0, threadsWaiting = 0, threadsTimedWaiting = 0, threadsTerminated = 0; - jint peakThreadCount = env->CallIntMethod(threadMXBean, _getPeakThreadCountMethod); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jint, peakThreadCount, env, + CallIntMethod(threadMXBean, _getPeakThreadCountMethod)); jvm_metrics->jvm_thread_peak_count->set_value(peakThreadCount < 0 ? 0 : peakThreadCount); jvm_metrics->jvm_thread_count->set_value(threadCount < 0 ? 0 : threadCount); for (int i = 0; i < threadCount; i++) { - jobject threadInfo = env->GetObjectArrayElement(threadInfos, i); + JNI_CALL_METHOD_CHECK_EXCEPTION(jobject, threadInfo, env, + GetObjectArrayElement((jobjectArray)threadInfos, i)); + if (threadInfo == nullptr) { continue; } - jobject threadState = env->CallObjectMethod(threadInfo, _getThreadStateMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, threadState, env, CallObjectMethod(threadInfo, _getThreadStateMethod)); + + //IsSameObject not throw exception if (env->IsSameObject(threadState, _newThreadStateObj)) { threadsNew++; } else if (env->IsSameObject(threadState, _runnableThreadStateObj)) { @@ -428,8 +509,6 @@ void JvmStats::refresh(JvmMetrics* jvm_metrics) { } else if (env->IsSameObject(threadState, _terminatedThreadStateObj)) { threadsTerminated++; } - env->DeleteLocalRef(threadInfo); - env->DeleteLocalRef(threadState); } jvm_metrics->jvm_thread_new_count->set_value(threadsNew < 0 ? 0 : threadsNew); @@ -441,18 +520,27 @@ void JvmStats::refresh(JvmMetrics* jvm_metrics) { jvm_metrics->jvm_thread_terminated_count->set_value(threadsTerminated < 0 ? 0 : threadsTerminated); - jobject gcMXBeansList = - env->CallStaticObjectMethod(_managementFactoryClass, _getGarbageCollectorMXBeansMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, gcMXBeansList, env, + CallStaticObjectMethod(_managementFactoryClass, _getGarbageCollectorMXBeansMethod)); - jint numCollectors = env->CallIntMethod(gcMXBeansList, _getListSizeMethod); + JNI_CALL_METHOD_CHECK_EXCEPTION(jint, numCollectors, env, + CallIntMethod(gcMXBeansList, _getListSizeMethod)); for (int i = 0; i < numCollectors; i++) { - jobject gcMXBean = env->CallObjectMethod(gcMXBeansList, _getListUseIndexMethod, i); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, gcMXBean, env, CallObjectMethod(gcMXBeansList, _getListUseIndexMethod, i)); + + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF(jobject, gcName, env, + CallObjectMethod(gcMXBean, _getGCNameMethod)); - jstring gcName = (jstring)env->CallObjectMethod(gcMXBean, _getGCNameMethod); - jlong gcCollectionCount = env->CallLongMethod(gcMXBean, _getGCCollectionCountMethod); - jlong gcCollectionTime = env->CallLongMethod(gcMXBean, _getGCCollectionTimeMethod); - const char* gcNameStr = env->GetStringUTFChars(gcName, NULL); + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, gcCollectionCount, env, + CallLongMethod(gcMXBean, _getGCCollectionCountMethod)); + + JNI_CALL_METHOD_CHECK_EXCEPTION(jlong, gcCollectionTime, env, + CallLongMethod(gcMXBean, _getGCCollectionTimeMethod)); + + const char* gcNameStr = env->GetStringUTFChars((jstring)gcName, NULL); if (gcNameStr != nullptr) { if (strcmp(gcNameStr, "G1 Young Generation") == 0) { jvm_metrics->jvm_gc_g1_young_generation_count->set_value(gcCollectionCount); @@ -463,31 +551,40 @@ void JvmStats::refresh(JvmMetrics* jvm_metrics) { jvm_metrics->jvm_gc_g1_old_generation_time_ms->set_value(gcCollectionTime); } - env->ReleaseStringUTFChars(gcName, gcNameStr); + env->ReleaseStringUTFChars((jstring)gcName, gcNameStr); } - env->DeleteLocalRef(gcMXBean); } - env->DeleteLocalRef(memoryMXBeanObj); - env->DeleteLocalRef(heapMemoryUsageObj); - env->DeleteLocalRef(nonHeapMemoryUsageObj); - env->DeleteLocalRef(memoryPoolMXBeansList); - env->DeleteLocalRef(threadMXBean); - env->DeleteLocalRef(gcMXBeansList); + + return Status::OK(); } JvmStats::~JvmStats() { if (!_init_complete) { return; } try { - env->DeleteLocalRef(_newThreadStateObj); - env->DeleteLocalRef(_runnableThreadStateObj); - env->DeleteLocalRef(_blockedThreadStateObj); - env->DeleteLocalRef(_waitingThreadStateObj); - env->DeleteLocalRef(_timedWaitingThreadStateObj); - env->DeleteLocalRef(_terminatedThreadStateObj); + JNIEnv* env = nullptr; + Status st = JniUtil::GetJNIEnv(&env); + if (!st.ok()) { + return; + } + env->DeleteGlobalRef(_managementFactoryClass); + env->DeleteGlobalRef(_memoryUsageClass); + env->DeleteGlobalRef(_memoryMXBeanClass); + env->DeleteGlobalRef(_listClass); + env->DeleteGlobalRef(_memoryPoolMXBeanClass); + env->DeleteGlobalRef(_threadMXBeanClass); + env->DeleteGlobalRef(_threadInfoClass); + env->DeleteGlobalRef(_threadStateClass); + env->DeleteGlobalRef(_garbageCollectorMXBeanClass); + + env->DeleteGlobalRef(_newThreadStateObj); + env->DeleteGlobalRef(_runnableThreadStateObj); + env->DeleteGlobalRef(_blockedThreadStateObj); + env->DeleteGlobalRef(_waitingThreadStateObj); + env->DeleteGlobalRef(_timedWaitingThreadStateObj); + env->DeleteGlobalRef(_terminatedThreadStateObj); } catch (...) { - // When be is killed, DeleteLocalRef may fail. // In order to exit more gracefully, we catch the exception here. } } diff --git a/be/src/util/jvm_metrics.h b/be/src/util/jvm_metrics.h index 459a3cbf938f79..78346c022b0aba 100644 --- a/be/src/util/jvm_metrics.h +++ b/be/src/util/jvm_metrics.h @@ -27,7 +27,6 @@ class JvmMetrics; class JvmStats { private: - JNIEnv* env = nullptr; jclass _managementFactoryClass = nullptr; jmethodID _getMemoryMXBeanMethod = nullptr; jclass _memoryUsageClass = nullptr; @@ -96,11 +95,10 @@ class JvmStats { bool _init_complete = false; public: - // JvmStats(JNIEnv* ENV); - void init(JNIEnv* ENV); + Status init(JNIEnv* env); bool init_complete() const { return _init_complete; } void set_complete(bool val) { _init_complete = val; } - void refresh(JvmMetrics* jvm_metrics); + Status refresh(JvmMetrics* jvm_metrics) const; ~JvmStats(); }; diff --git a/be/src/util/murmur_hash3.cpp b/be/src/util/murmur_hash3.cpp index 96568d6978e225..edd1c44f338473 100644 --- a/be/src/util/murmur_hash3.cpp +++ b/be/src/util/murmur_hash3.cpp @@ -85,7 +85,7 @@ FORCE_INLINE uint64_t fmix64(uint64_t k) { //----------------------------------------------------------------------------- -void murmur_hash3_x86_32(const void* key, int len, uint32_t seed, void* out) { +void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out) { const uint8_t* data = (const uint8_t*)key; const int nblocks = len / 4; @@ -435,7 +435,7 @@ void murmur_hash3_x64_128(const void* key, const int len, const uint32_t seed, v ((uint64_t*)out)[1] = h2; } -void murmur_hash3_x64_64(const void* key, const int len, const uint64_t seed, void* out) { +void murmur_hash3_x64_64(const void* key, const int64_t len, const uint64_t seed, void* out) { const uint8_t* data = (const uint8_t*)key; const int nblocks = len / 8; uint64_t h1 = seed; diff --git a/be/src/util/murmur_hash3.h b/be/src/util/murmur_hash3.h index c8e8964bf6a20e..249966460221a3 100644 --- a/be/src/util/murmur_hash3.h +++ b/be/src/util/murmur_hash3.h @@ -25,12 +25,12 @@ typedef unsigned __int64 uint64_t; //----------------------------------------------------------------------------- -void murmur_hash3_x86_32(const void* key, int len, uint32_t seed, void* out); +void murmur_hash3_x86_32(const void* key, int64_t len, uint32_t seed, void* out); void murmur_hash3_x86_128(const void* key, int len, uint32_t seed, void* out); void murmur_hash3_x64_128(const void* key, int len, uint32_t seed, void* out); -void murmur_hash3_x64_64(const void* key, int len, uint64_t seed, void* out); +void murmur_hash3_x64_64(const void* key, int64_t len, uint64_t seed, void* out); //----------------------------------------------------------------------------- diff --git a/be/src/util/string_parser.hpp b/be/src/util/string_parser.hpp index 67ab41cc1c7bee..5771434c4c6321 100644 --- a/be/src/util/string_parser.hpp +++ b/be/src/util/string_parser.hpp @@ -128,7 +128,7 @@ class StringParser { // Convert a string s representing a number in given base into a decimal number. template - static inline T string_to_int(const char* __restrict s, int len, int base, + static inline T string_to_int(const char* __restrict s, int64_t len, int base, ParseResult* result) { T ans = string_to_int_internal(s, len, base, result); if (LIKELY(*result == PARSE_SUCCESS)) { @@ -207,7 +207,7 @@ class StringParser { // Convert a string s representing a number in given base into a decimal number. // Return PARSE_FAILURE on leading whitespace. Trailing whitespace is allowed. template - static inline T string_to_int_internal(const char* __restrict s, int len, int base, + static inline T string_to_int_internal(const char* __restrict s, int64_t len, int base, ParseResult* result); // Converts an ascii string to an integer of type T assuming it cannot overflow @@ -385,7 +385,7 @@ T StringParser::string_to_unsigned_int_internal(const char* __restrict s, int le } template -T StringParser::string_to_int_internal(const char* __restrict s, int len, int base, +T StringParser::string_to_int_internal(const char* __restrict s, int64_t len, int base, ParseResult* result) { typedef typename std::make_unsigned::type UnsignedT; UnsignedT val = 0; diff --git a/be/src/vec/aggregate_functions/aggregate_function.h b/be/src/vec/aggregate_functions/aggregate_function.h index cd1f8922e1b459..f67fe14fa42600 100644 --- a/be/src/vec/aggregate_functions/aggregate_function.h +++ b/be/src/vec/aggregate_functions/aggregate_function.h @@ -115,21 +115,21 @@ class IAggregateFunction { * Additional parameter arena should be used instead of standard memory allocator if the addition requires memory allocation. */ virtual void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const = 0; + Arena*) const = 0; virtual void add_many(AggregateDataPtr __restrict place, const IColumn** columns, - std::vector& rows, Arena* arena) const {} + std::vector& rows, Arena*) const {} /// Merges state (on which place points to) with other state of current aggregation function. virtual void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const = 0; + Arena*) const = 0; virtual void merge_vec(const AggregateDataPtr* places, size_t offset, ConstAggregateDataPtr rhs, - Arena* arena, const size_t num_rows) const = 0; + Arena*, const size_t num_rows) const = 0; // same as merge_vec, but only call "merge" function when place is not nullptr virtual void merge_vec_selected(const AggregateDataPtr* places, size_t offset, - ConstAggregateDataPtr rhs, Arena* arena, + ConstAggregateDataPtr rhs, Arena*, const size_t num_rows) const = 0; /// Serializes state (to transmit it over the network, for example). @@ -146,21 +146,21 @@ class IAggregateFunction { /// Deserializes state. This function is called only for empty (just created) states. virtual void deserialize(AggregateDataPtr __restrict place, BufferReadable& buf, - Arena* arena) const = 0; + Arena*) const = 0; - virtual void deserialize_vec(AggregateDataPtr places, const ColumnString* column, Arena* arena, + virtual void deserialize_vec(AggregateDataPtr places, const ColumnString* column, Arena*, size_t num_rows) const = 0; virtual void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const = 0; + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const = 0; virtual void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const = 0; + Arena*, const size_t num_rows) const = 0; - virtual void deserialize_from_column(AggregateDataPtr places, const IColumn& column, - Arena* arena, size_t num_rows) const = 0; + virtual void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, + size_t num_rows) const = 0; /// Deserializes state and merge it with current aggregation function. virtual void deserialize_and_merge(AggregateDataPtr __restrict place, @@ -169,10 +169,10 @@ class IAggregateFunction { virtual void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, - size_t end, Arena* arena) const = 0; + size_t end, Arena*) const = 0; virtual void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, - const IColumn& column, Arena* arena) const = 0; + const IColumn& column, Arena*) const = 0; /// Inserts results into a column. virtual void insert_result_into(ConstAggregateDataPtr __restrict place, IColumn& to) const = 0; @@ -185,33 +185,32 @@ class IAggregateFunction { * and do a single call to "add_batch" for devirtualization and inlining. */ virtual void add_batch(size_t batch_size, AggregateDataPtr* places, size_t place_offset, - const IColumn** columns, Arena* arena, bool agg_many = false) const = 0; + const IColumn** columns, Arena*, bool agg_many = false) const = 0; // same as add_batch, but only call "add" function when place is not nullptr virtual void add_batch_selected(size_t batch_size, AggregateDataPtr* places, - size_t place_offset, const IColumn** columns, - Arena* arena) const = 0; + size_t place_offset, const IColumn** columns, Arena*) const = 0; /** The same for single place. */ virtual void add_batch_single_place(size_t batch_size, AggregateDataPtr place, - const IColumn** columns, Arena* arena) const = 0; + const IColumn** columns, Arena*) const = 0; // only used at agg reader virtual void add_batch_range(size_t batch_begin, size_t batch_end, AggregateDataPtr place, - const IColumn** columns, Arena* arena, bool has_null = false) = 0; + const IColumn** columns, Arena*, bool has_null = false) = 0; // only used at window function virtual void add_range_single_place(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const = 0; + Arena*) const = 0; virtual void streaming_agg_serialize(const IColumn** columns, BufferWritable& buf, - const size_t num_rows, Arena* arena) const = 0; + const size_t num_rows, Arena*) const = 0; virtual void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const = 0; + const size_t num_rows, Arena*) const = 0; const DataTypes& get_argument_types() const { return argument_types; } diff --git a/be/src/vec/aggregate_functions/aggregate_function_avg.h b/be/src/vec/aggregate_functions/aggregate_function_avg.h index 8a18a88839b4db..62fbb8078ea949 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_avg.h +++ b/be/src/vec/aggregate_functions/aggregate_function_avg.h @@ -184,7 +184,7 @@ class AggregateFunctionAvg final column.get_data().push_back(this->data(place).template result()); } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto& col = assert_cast(column); DCHECK(col.size() >= num_rows) << "source column's size should greater than num_rows"; @@ -205,7 +205,7 @@ class AggregateFunctionAvg final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto* src_data = assert_cast(*columns[0]).get_data().data(); auto& dst_col = assert_cast(*dst); dst_col.set_item_size(sizeof(Data)); @@ -219,7 +219,7 @@ class AggregateFunctionAvg final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); DCHECK(col.size() >= num_rows) << "source column's size should greater than num_rows"; @@ -233,7 +233,7 @@ class AggregateFunctionAvg final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -245,19 +245,19 @@ class AggregateFunctionAvg final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, diff --git a/be/src/vec/aggregate_functions/aggregate_function_bitmap.h b/be/src/vec/aggregate_functions/aggregate_function_bitmap.h index 6c504b91bf4abd..b0619a63e1ffe8 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_bitmap.h +++ b/be/src/vec/aggregate_functions/aggregate_function_bitmap.h @@ -159,7 +159,7 @@ class AggregateFunctionBitmapSerializationHelper : IAggregateFunctionDataHelper(argument_types_) {} void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { if (version >= BITMAP_SERDE) { auto& col = assert_cast(*dst); char place[sizeof(Data)]; @@ -171,11 +171,11 @@ class AggregateFunctionBitmapSerializationHelper assert_cast(this)->destroy(place); }); assert_cast(this)->add(place, columns, - i, arena); + i, nullptr); data[i] = std::move(this->data(place).value); } } else { - BaseHelper::streaming_agg_serialize_to_column(columns, dst, num_rows, arena); + BaseHelper::streaming_agg_serialize_to_column(columns, dst, num_rows, nullptr); } } @@ -194,7 +194,7 @@ class AggregateFunctionBitmapSerializationHelper } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { if (version >= BITMAP_SERDE) { auto& col = assert_cast(column); const size_t num_rows = column.size(); @@ -204,13 +204,13 @@ class AggregateFunctionBitmapSerializationHelper this->data(place).merge(data[i]); } } else { - BaseHelper::deserialize_and_merge_from_column(place, column, arena); + BaseHelper::deserialize_and_merge_from_column(place, column, nullptr); } } void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); if (version >= BITMAP_SERDE) { @@ -220,12 +220,12 @@ class AggregateFunctionBitmapSerializationHelper this->data(place).merge(data[i]); } } else { - BaseHelper::deserialize_and_merge_from_column_range(place, column, begin, end, arena); + BaseHelper::deserialize_and_merge_from_column_range(place, column, begin, end, nullptr); } } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { if (version >= BITMAP_SERDE) { const auto& col = assert_cast(*column); @@ -234,13 +234,13 @@ class AggregateFunctionBitmapSerializationHelper this->data(places[i] + offset).merge(data[i]); } } else { - BaseHelper::deserialize_and_merge_vec(places, offset, rhs, column, arena, num_rows); + BaseHelper::deserialize_and_merge_vec(places, offset, rhs, column, nullptr, num_rows); } } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { if (version >= BITMAP_SERDE) { const auto& col = assert_cast(*column); const auto* data = col.get_data().data(); @@ -250,7 +250,7 @@ class AggregateFunctionBitmapSerializationHelper } } } else { - BaseHelper::deserialize_and_merge_vec_selected(places, offset, rhs, column, arena, + BaseHelper::deserialize_and_merge_vec_selected(places, offset, rhs, column, nullptr, num_rows); } } diff --git a/be/src/vec/aggregate_functions/aggregate_function_bitmap_agg.h b/be/src/vec/aggregate_functions/aggregate_function_bitmap_agg.h index 19352e022fa7a2..5747faf1b8e8c1 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_bitmap_agg.h +++ b/be/src/vec/aggregate_functions/aggregate_function_bitmap_agg.h @@ -72,7 +72,7 @@ class AggregateFunctionBitmapAgg final DataTypePtr get_return_type() const override { return std::make_shared(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { DCHECK_LT(row_num, columns[0]->size()); if constexpr (arg_nullable) { auto& nullable_col = @@ -90,7 +90,7 @@ class AggregateFunctionBitmapAgg final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { if constexpr (arg_nullable) { auto& nullable_column = assert_cast(*columns[0]); const auto& column = @@ -111,7 +111,7 @@ class AggregateFunctionBitmapAgg final void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } @@ -130,7 +130,7 @@ class AggregateFunctionBitmapAgg final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& col = assert_cast(*dst); char place[sizeof(Data)]; col.resize(num_rows); @@ -138,12 +138,12 @@ class AggregateFunctionBitmapAgg final for (size_t i = 0; i != num_rows; ++i) { this->create(place); DEFER({ this->destroy(place); }); - this->add(place, columns, i, arena); + this->add(place, columns, i, nullptr); data[i] = std::move(this->data(place).value); } } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto& col = assert_cast(column); DCHECK(col.size() >= num_rows) << "source column's size should greater than num_rows"; @@ -165,7 +165,7 @@ class AggregateFunctionBitmapAgg final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); auto* data = col.get_data().data(); @@ -177,7 +177,7 @@ class AggregateFunctionBitmapAgg final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -188,7 +188,7 @@ class AggregateFunctionBitmapAgg final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { const auto& col = assert_cast(*column); const auto* data = col.get_data().data(); @@ -198,8 +198,8 @@ class AggregateFunctionBitmapAgg final } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { const auto& col = assert_cast(*column); const auto* data = col.get_data().data(); for (size_t i = 0; i != num_rows; ++i) { diff --git a/be/src/vec/aggregate_functions/aggregate_function_count.h b/be/src/vec/aggregate_functions/aggregate_function_count.h index 62aa869771c0a5..7b54d074683b04 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_count.h +++ b/be/src/vec/aggregate_functions/aggregate_function_count.h @@ -91,7 +91,7 @@ class AggregateFunctionCount final assert_cast(to).get_data().push_back(data(place).count); } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto data = assert_cast(column).get_data().data(); memcpy(places, data, sizeof(Data) * num_rows); @@ -111,7 +111,7 @@ class AggregateFunctionCount final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& dst_col = assert_cast(*dst); DCHECK(dst_col.item_size() == sizeof(Data)) << "size is not equal: " << dst_col.item_size() << " " << sizeof(Data); @@ -124,7 +124,7 @@ class AggregateFunctionCount final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); auto* data = reinterpret_cast(col.get_data().data()); @@ -135,7 +135,7 @@ class AggregateFunctionCount final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -146,19 +146,19 @@ class AggregateFunctionCount final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, @@ -229,7 +229,7 @@ class AggregateFunctionCountNotNullUnary final } } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto data = assert_cast(column).get_data().data(); memcpy(places, data, sizeof(Data) * num_rows); @@ -249,7 +249,7 @@ class AggregateFunctionCountNotNullUnary final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& col = assert_cast(*dst); DCHECK(col.item_size() == sizeof(Data)) << "size is not equal: " << col.item_size() << " " << sizeof(Data); @@ -263,7 +263,7 @@ class AggregateFunctionCountNotNullUnary final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); auto* data = reinterpret_cast(col.get_data().data()); @@ -274,7 +274,7 @@ class AggregateFunctionCountNotNullUnary final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -286,19 +286,19 @@ class AggregateFunctionCountNotNullUnary final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, diff --git a/be/src/vec/aggregate_functions/aggregate_function_count_by_enum.h b/be/src/vec/aggregate_functions/aggregate_function_count_by_enum.h index 5d4a3dde3550a1..1f5093de68263e 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_count_by_enum.h +++ b/be/src/vec/aggregate_functions/aggregate_function_count_by_enum.h @@ -197,7 +197,7 @@ class AggregateFunctionCountByEnum final DataTypePtr get_return_type() const override { return std::make_shared(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { for (int i = 0; i < arg_count; i++) { const auto* nullable_column = check_and_get_column(columns[i]); if (nullable_column == nullptr) { @@ -217,7 +217,7 @@ class AggregateFunctionCountByEnum final void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_group_concat.h b/be/src/vec/aggregate_functions/aggregate_function_group_concat.h index a62ffb8da619f9..a0cac9ab78016d 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_group_concat.h +++ b/be/src/vec/aggregate_functions/aggregate_function_group_concat.h @@ -43,20 +43,27 @@ class IColumn; namespace doris::vectorized { struct AggregateFunctionGroupConcatData { - std::string data; + ColumnString::Chars data; std::string separator; bool inited = false; void add(StringRef ref, StringRef sep) { + auto delta_size = ref.size; if (!inited) { - inited = true; separator.assign(sep.data, sep.data + sep.size); } else { - data += separator; + delta_size += separator.size(); } + auto offset = data.size(); + data.resize(data.size() + delta_size); - data.resize(data.length() + ref.size); - memcpy(data.data() + data.length() - ref.size, ref.data, ref.size); + if (!inited) { + inited = true; + } else { + memcpy(data.data() + offset, separator.data(), separator.size()); + offset += separator.size(); + } + memcpy(data.data() + offset, ref.data, ref.size); } void merge(const AggregateFunctionGroupConcatData& rhs) { @@ -67,17 +74,23 @@ struct AggregateFunctionGroupConcatData { if (!inited) { inited = true; separator = rhs.separator; - data = rhs.data; + data.assign(rhs.data); } else { - data += separator; - data += rhs.data; + auto offset = data.size(); + + auto delta_size = separator.size() + rhs.data.size(); + data.resize(data.size() + delta_size); + + memcpy(data.data() + offset, separator.data(), separator.size()); + offset += separator.size(); + memcpy(data.data() + offset, rhs.data.data(), rhs.data.size()); } } - const std::string& get() const { return data; } + StringRef get() const { return StringRef {data.data(), data.size()}; } void write(BufferWritable& buf) const { - write_binary(data, buf); + write_binary(StringRef {data.data(), data.size()}, buf); write_binary(separator, buf); write_binary(inited, buf); } @@ -89,7 +102,7 @@ struct AggregateFunctionGroupConcatData { } void reset() { - data = ""; + data.clear(); separator = ""; inited = false; } @@ -150,8 +163,8 @@ class AggregateFunctionGroupConcat final } void insert_result_into(ConstAggregateDataPtr __restrict place, IColumn& to) const override { - const std::string& result = this->data(place).get(); - assert_cast(to).insert_data(result.c_str(), result.length()); + const auto result = this->data(place).get(); + assert_cast(to).insert_data(result.data, result.size); } }; diff --git a/be/src/vec/aggregate_functions/aggregate_function_histogram.h b/be/src/vec/aggregate_functions/aggregate_function_histogram.h index 25fc6957321586..1d2c5725ed370f 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_histogram.h +++ b/be/src/vec/aggregate_functions/aggregate_function_histogram.h @@ -192,7 +192,7 @@ class AggregateFunctionHistogram final DataTypePtr get_return_type() const override { return std::make_shared(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { if constexpr (has_input_param) { Int32 input_max_num_buckets = assert_cast(columns[1])->get_element(row_num); @@ -220,7 +220,7 @@ class AggregateFunctionHistogram final void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_hll_union_agg.h b/be/src/vec/aggregate_functions/aggregate_function_hll_union_agg.h index 1cf6dc7f2a29a9..44835194eb4b88 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_hll_union_agg.h +++ b/be/src/vec/aggregate_functions/aggregate_function_hll_union_agg.h @@ -122,7 +122,7 @@ class AggregateFunctionHLLUnion } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { this->data(place).add(columns[0], row_num); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h b/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h index d314cba7a656a9..d16da1a34e66e3 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h +++ b/be/src/vec/aggregate_functions/aggregate_function_java_udaf.h @@ -148,6 +148,7 @@ struct AggregateJavaUdafData { jbyteArray arr = env->NewByteArray(len); env->SetByteArrayRegion(arr, 0, len, reinterpret_cast(serialize_data.data())); env->CallNonvirtualVoidMethod(executor_obj, executor_cl, executor_merge_id, place, arr); + RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env)); jbyte* pBytes = env->GetByteArrayElements(arr, nullptr); env->ReleaseByteArrayElements(arr, pBytes, JNI_ABORT); env->DeleteLocalRef(arr); @@ -332,7 +333,7 @@ class AggregateJavaUdaf final } void add_batch(size_t batch_size, AggregateDataPtr* places, size_t place_offset, - const IColumn** columns, Arena* /*arena*/, bool /*agg_many*/) const override { + const IColumn** columns, Arena*, bool /*agg_many*/) const override { int64_t places_address = reinterpret_cast(places); Status st = this->data(_exec_place) .add(places_address, false, columns, 0, batch_size, argument_types, @@ -343,7 +344,7 @@ class AggregateJavaUdaf final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* /*arena*/) const override { + Arena*) const override { int64_t places_address = reinterpret_cast(place); Status st = this->data(_exec_place) .add(places_address, true, columns, 0, batch_size, argument_types, 0); @@ -354,7 +355,7 @@ class AggregateJavaUdaf final void add_range_single_place(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { frame_start = std::max(frame_start, partition_start); frame_end = std::min(frame_end, partition_end); int64_t places_address = reinterpret_cast(place); diff --git a/be/src/vec/aggregate_functions/aggregate_function_linear_histogram.h b/be/src/vec/aggregate_functions/aggregate_function_linear_histogram.h index 80572e4c2235db..173324b9463750 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_linear_histogram.h +++ b/be/src/vec/aggregate_functions/aggregate_function_linear_histogram.h @@ -199,7 +199,7 @@ class AggregateFunctionLinearHistogram final DataTypePtr get_return_type() const override { return std::make_shared(); } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { double interval = assert_cast(*columns[1]) .get_data()[row_num]; @@ -233,7 +233,7 @@ class AggregateFunctionLinearHistogram final void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_map.h b/be/src/vec/aggregate_functions/aggregate_function_map.h index d56cbf21f31136..3ec25cdc706152 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_map.h +++ b/be/src/vec/aggregate_functions/aggregate_function_map.h @@ -203,7 +203,7 @@ class AggregateFunctionMapAgg final } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { if (columns[0]->is_nullable()) { const auto& nullable_col = assert_cast(*columns[0]); @@ -234,7 +234,7 @@ class AggregateFunctionMapAgg final void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } @@ -248,7 +248,7 @@ class AggregateFunctionMapAgg final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& col = assert_cast(*dst); for (size_t i = 0; i != num_rows; ++i) { Field key, value; @@ -263,7 +263,7 @@ class AggregateFunctionMapAgg final } } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { const auto& col = assert_cast(column); auto* data = &(this->data(places)); @@ -282,7 +282,7 @@ class AggregateFunctionMapAgg final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); for (size_t i = 0; i != num_rows; ++i) { @@ -293,7 +293,7 @@ class AggregateFunctionMapAgg final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); const auto& col = assert_cast(column); @@ -304,7 +304,7 @@ class AggregateFunctionMapAgg final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { const auto& col = assert_cast(*column); for (size_t i = 0; i != num_rows; ++i) { @@ -314,8 +314,8 @@ class AggregateFunctionMapAgg final } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { const auto& col = assert_cast(*column); for (size_t i = 0; i != num_rows; ++i) { if (places[i]) { diff --git a/be/src/vec/aggregate_functions/aggregate_function_min_max.h b/be/src/vec/aggregate_functions/aggregate_function_min_max.h index a5423cd72f511a..efc2854ff149c8 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_min_max.h +++ b/be/src/vec/aggregate_functions/aggregate_function_min_max.h @@ -104,7 +104,7 @@ struct SingleValueDataFixed { } } - void read(BufferReadable& buf, Arena* arena) { + void read(BufferReadable& buf, Arena*) { read_binary(has_value, buf); if (has()) { read_binary(value, buf); @@ -123,53 +123,53 @@ struct SingleValueDataFixed { value = to.value; } - bool change_if_less(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_less(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast&, TypeCheckOnRelease::DISABLE>(column) .get_data()[row_num] < value) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_less(const Self& to, Arena* arena) { + bool change_if_less(const Self& to, Arena*) { if (to.has() && (!has() || to.value < value)) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - bool change_if_greater(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_greater(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast&, TypeCheckOnRelease::DISABLE>(column) .get_data()[row_num] > value) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_greater(const Self& to, Arena* arena) { + bool change_if_greater(const Self& to, Arena*) { if (to.has() && (!has() || to.value > value)) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - void change_first_time(const IColumn& column, size_t row_num, Arena* arena) { + void change_first_time(const IColumn& column, size_t row_num, Arena*) { if (UNLIKELY(!has())) { - change(column, row_num, arena); + change(column, row_num, nullptr); } } - void change_first_time(const Self& to, Arena* arena) { + void change_first_time(const Self& to, Arena*) { if (UNLIKELY(!has() && to.has())) { - change(to, arena); + change(to, nullptr); } } }; @@ -226,7 +226,7 @@ struct SingleValueDataDecimal { } } - void read(BufferReadable& buf, Arena* arena) { + void read(BufferReadable& buf, Arena*) { read_binary(has_value, buf); if (has()) { read_binary(value, buf); @@ -245,53 +245,53 @@ struct SingleValueDataDecimal { value = to.value; } - bool change_if_less(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_less(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast&, TypeCheckOnRelease::DISABLE>(column) .get_data()[row_num] < value) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_less(const Self& to, Arena* arena) { + bool change_if_less(const Self& to, Arena*) { if (to.has() && (!has() || to.value < value)) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - bool change_if_greater(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_greater(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast&, TypeCheckOnRelease::DISABLE>(column) .get_data()[row_num] > value) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_greater(const Self& to, Arena* arena) { + bool change_if_greater(const Self& to, Arena*) { if (to.has() && (!has() || to.value > value)) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - void change_first_time(const IColumn& column, size_t row_num, Arena* arena) { + void change_first_time(const IColumn& column, size_t row_num, Arena*) { if (UNLIKELY(!has())) { - change(column, row_num, arena); + change(column, row_num, nullptr); } } - void change_first_time(const Self& to, Arena* arena) { + void change_first_time(const Self& to, Arena*) { if (UNLIKELY(!has() && to.has())) { - change(to, arena); + change(to, nullptr); } } }; @@ -349,7 +349,7 @@ struct SingleValueDataString { } } - void read(BufferReadable& buf, Arena* arena) { + void read(BufferReadable& buf, Arena*) { Int32 rhs_size; read_binary(rhs_size, buf); @@ -380,7 +380,7 @@ struct SingleValueDataString { StringRef get_string_ref() const { return StringRef(get_data(), size); } /// Assuming to.has() - void change_impl(StringRef value, Arena* arena) { + void change_impl(StringRef value, Arena*) { Int32 value_size = value.size; if (value_size <= MAX_SMALL_STRING_SIZE) { @@ -402,64 +402,64 @@ struct SingleValueDataString { } } - void change(const IColumn& column, size_t row_num, Arena* arena) { + void change(const IColumn& column, size_t row_num, Arena*) { change_impl( assert_cast(column).get_data_at( row_num), - arena); + nullptr); } - void change(const Self& to, Arena* arena) { change_impl(to.get_string_ref(), arena); } + void change(const Self& to, Arena*) { change_impl(to.get_string_ref(), nullptr); } - bool change_if_less(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_less(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast(column).get_data_at( row_num) < get_string_ref()) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_greater(const IColumn& column, size_t row_num, Arena* arena) { + bool change_if_greater(const IColumn& column, size_t row_num, Arena*) { if (!has() || assert_cast(column).get_data_at( row_num) > get_string_ref()) { - change(column, row_num, arena); + change(column, row_num, nullptr); return true; } else { return false; } } - bool change_if_less(const Self& to, Arena* arena) { + bool change_if_less(const Self& to, Arena*) { if (to.has() && (!has() || to.get_string_ref() < get_string_ref())) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - bool change_if_greater(const Self& to, Arena* arena) { + bool change_if_greater(const Self& to, Arena*) { if (to.has() && (!has() || to.get_string_ref() > get_string_ref())) { - change(to, arena); + change(to, nullptr); return true; } else { return false; } } - void change_first_time(const IColumn& column, size_t row_num, Arena* arena) { + void change_first_time(const IColumn& column, size_t row_num, Arena*) { if (UNLIKELY(!has())) { - change(column, row_num, arena); + change(column, row_num, nullptr); } } - void change_first_time(const Self& to, Arena* arena) { + void change_first_time(const Self& to, Arena*) { if (UNLIKELY(!has() && to.has())) { - change(to, arena); + change(to, nullptr); } } }; @@ -472,15 +472,15 @@ struct AggregateFunctionMaxData : public Data { AggregateFunctionMaxData() { reset(); } - void change_if_better(const IColumn& column, size_t row_num, Arena* arena) { + void change_if_better(const IColumn& column, size_t row_num, Arena*) { if constexpr (Data::IsFixedLength) { this->change_if(column, row_num, false); } else { - this->change_if_greater(column, row_num, arena); + this->change_if_greater(column, row_num, nullptr); } } - void change_if_better(const Self& to, Arena* arena) { this->change_if_greater(to, arena); } + void change_if_better(const Self& to, Arena*) { this->change_if_greater(to, nullptr); } void reset() { if constexpr (Data::IsFixedLength) { @@ -500,14 +500,14 @@ struct AggregateFunctionMinData : Data { AggregateFunctionMinData() { reset(); } - void change_if_better(const IColumn& column, size_t row_num, Arena* arena) { + void change_if_better(const IColumn& column, size_t row_num, Arena*) { if constexpr (Data::IsFixedLength) { this->change_if(column, row_num, true); } else { - this->change_if_less(column, row_num, arena); + this->change_if_less(column, row_num, nullptr); } } - void change_if_better(const Self& to, Arena* arena) { this->change_if_less(to, arena); } + void change_if_better(const Self& to, Arena*) { this->change_if_less(to, nullptr); } void reset() { if constexpr (Data::IsFixedLength) { @@ -525,10 +525,10 @@ struct AggregateFunctionAnyData : Data { using Data::IsFixedLength; constexpr static bool IS_ANY = true; - void change_if_better(const IColumn& column, size_t row_num, Arena* arena) { - this->change_first_time(column, row_num, arena); + void change_if_better(const IColumn& column, size_t row_num, Arena*) { + this->change_first_time(column, row_num, nullptr); } - void change_if_better(const Self& to, Arena* arena) { this->change_first_time(to, arena); } + void change_if_better(const Self& to, Arena*) { this->change_first_time(to, nullptr); } static const char* name() { return "any"; } }; @@ -560,25 +560,25 @@ class AggregateFunctionsSingleValue final DataTypePtr get_return_type() const override { return type; } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { - this->data(place).change_if_better(*columns[0], row_num, arena); + Arena*) const override { + this->data(place).change_if_better(*columns[0], row_num, nullptr); } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { if constexpr (Data::IS_ANY) { DCHECK_GT(batch_size, 0); - this->data(place).change_if_better(*columns[0], 0, arena); + this->data(place).change_if_better(*columns[0], 0, nullptr); } else { - Base::add_batch_single_place(batch_size, place, columns, arena); + Base::add_batch_single_place(batch_size, place, columns, nullptr); } } void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { - this->data(place).change_if_better(this->data(rhs), arena); + Arena*) const override { + this->data(place).change_if_better(this->data(rhs), nullptr); } void serialize(ConstAggregateDataPtr __restrict place, BufferWritable& buf) const override { @@ -586,15 +586,15 @@ class AggregateFunctionsSingleValue final } void deserialize(AggregateDataPtr __restrict place, BufferReadable& buf, - Arena* arena) const override { - this->data(place).read(buf, arena); + Arena*) const override { + this->data(place).read(buf, nullptr); } void insert_result_into(ConstAggregateDataPtr __restrict place, IColumn& to) const override { this->data(place).insert_result_into(to); } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { if constexpr (Data::IsFixedLength) { const auto& col = assert_cast(column); @@ -604,7 +604,7 @@ class AggregateFunctionsSingleValue final data[i] = column_data[i]; } } else { - Base::deserialize_from_column(places, column, arena, num_rows); + Base::deserialize_from_column(places, column, nullptr, num_rows); } } @@ -623,63 +623,63 @@ class AggregateFunctionsSingleValue final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { if constexpr (Data::IsFixedLength) { auto& dst_column = assert_cast(*dst); dst_column.resize(num_rows); auto* dst_data = reinterpret_cast(dst_column.get_data().data()); for (size_t i = 0; i != num_rows; ++i) { - dst_data[i].change(*columns[0], i, arena); + dst_data[i].change(*columns[0], i, nullptr); } } else { - Base::streaming_agg_serialize_to_column(columns, dst, num_rows, arena); + Base::streaming_agg_serialize_to_column(columns, dst, num_rows, nullptr); } } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { if constexpr (Data::IsFixedLength) { const auto& col = assert_cast(column); auto* column_data = reinterpret_cast(col.get_data().data()); const size_t num_rows = column.size(); for (size_t i = 0; i != num_rows; ++i) { - this->data(place).change_if_better(column_data[i], arena); + this->data(place).change_if_better(column_data[i], nullptr); } } else { - Base::deserialize_and_merge_from_column(place, column, arena); + Base::deserialize_and_merge_from_column(place, column, nullptr); } } void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { if constexpr (Data::IsFixedLength) { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); auto* data = reinterpret_cast(col.get_data().data()); for (size_t i = begin; i <= end; ++i) { - this->data(place).change_if_better(data[i], arena); + this->data(place).change_if_better(data[i], nullptr); } } else { - Base::deserialize_and_merge_from_column_range(place, column, begin, end, arena); + Base::deserialize_and_merge_from_column_range(place, column, begin, end, nullptr); } } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, diff --git a/be/src/vec/aggregate_functions/aggregate_function_min_max_by.h b/be/src/vec/aggregate_functions/aggregate_function_min_max_by.h index 4caded0011a81b..5c73ac9aa67cbe 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_min_max_by.h +++ b/be/src/vec/aggregate_functions/aggregate_function_min_max_by.h @@ -64,7 +64,7 @@ struct BitmapValueData { } } - void read(BufferReadable& buf, Arena* arena) { + void read(BufferReadable& buf, Arena*) { read_binary(has_value, buf); if (has()) { DataTypeBitMap::deserialize_as_stream(value, buf); @@ -101,9 +101,9 @@ struct AggregateFunctionMinMaxByBaseData { key.write(buf); } - void read(BufferReadable& buf, Arena* arena) { - value.read(buf, arena); - key.read(buf, arena); + void read(BufferReadable& buf, Arena*) { + value.read(buf, nullptr); + key.read(buf, nullptr); } }; @@ -111,15 +111,15 @@ template struct AggregateFunctionMaxByData : public AggregateFunctionMinMaxByBaseData { using Self = AggregateFunctionMaxByData; void change_if_better(const IColumn& value_column, const IColumn& key_column, size_t row_num, - Arena* arena) { - if (this->key.change_if_greater(key_column, row_num, arena)) { - this->value.change(value_column, row_num, arena); + Arena*) { + if (this->key.change_if_greater(key_column, row_num, nullptr)) { + this->value.change(value_column, row_num, nullptr); } } - void change_if_better(const Self& to, Arena* arena) { - if (this->key.change_if_greater(to.key, arena)) { - this->value.change(to.value, arena); + void change_if_better(const Self& to, Arena*) { + if (this->key.change_if_greater(to.key, nullptr)) { + this->value.change(to.value, nullptr); } } @@ -130,15 +130,15 @@ template struct AggregateFunctionMinByData : public AggregateFunctionMinMaxByBaseData { using Self = AggregateFunctionMinByData; void change_if_better(const IColumn& value_column, const IColumn& key_column, size_t row_num, - Arena* arena) { - if (this->key.change_if_less(key_column, row_num, arena)) { - this->value.change(value_column, row_num, arena); + Arena*) { + if (this->key.change_if_less(key_column, row_num, nullptr)) { + this->value.change(value_column, row_num, nullptr); } } - void change_if_better(const Self& to, Arena* arena) { - if (this->key.change_if_less(to.key, arena)) { - this->value.change(to.value, arena); + void change_if_better(const Self& to, Arena*) { + if (this->key.change_if_less(to.key, nullptr)) { + this->value.change(to.value, nullptr); } } @@ -169,15 +169,15 @@ class AggregateFunctionsMinMaxBy final DataTypePtr get_return_type() const override { return value_type; } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { - this->data(place).change_if_better(*columns[0], *columns[1], row_num, arena); + Arena*) const override { + this->data(place).change_if_better(*columns[0], *columns[1], row_num, nullptr); } void reset(AggregateDataPtr place) const override { this->data(place).reset(); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { - this->data(place).change_if_better(this->data(rhs), arena); + Arena*) const override { + this->data(place).change_if_better(this->data(rhs), nullptr); } void serialize(ConstAggregateDataPtr __restrict place, BufferWritable& buf) const override { @@ -185,8 +185,8 @@ class AggregateFunctionsMinMaxBy final } void deserialize(AggregateDataPtr __restrict place, BufferReadable& buf, - Arena* arena) const override { - this->data(place).read(buf, arena); + Arena*) const override { + this->data(place).read(buf, nullptr); } void insert_result_into(ConstAggregateDataPtr __restrict place, IColumn& to) const override { diff --git a/be/src/vec/aggregate_functions/aggregate_function_percentile.h b/be/src/vec/aggregate_functions/aggregate_function_percentile.h index a1e739d8758fa7..0766c59f3de1c3 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_percentile.h +++ b/be/src/vec/aggregate_functions/aggregate_function_percentile.h @@ -433,7 +433,7 @@ class AggregateFunctionPercentile final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { const auto& sources = assert_cast(*columns[0]); const auto& quantile = diff --git a/be/src/vec/aggregate_functions/aggregate_function_reader_first_last.h b/be/src/vec/aggregate_functions/aggregate_function_reader_first_last.h index 60ab42b5298e8e..1a6ac288583b3e 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_reader_first_last.h +++ b/be/src/vec/aggregate_functions/aggregate_function_reader_first_last.h @@ -223,7 +223,7 @@ class ReaderFunctionData final } void add(AggregateDataPtr place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { this->data(place).add(row_num, columns); } @@ -231,7 +231,7 @@ class ReaderFunctionData final void add_range_single_place(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { throw doris::Exception(ErrorCode::INTERNAL_ERROR, "ReaderFunctionData do not support add_range_single_place"); __builtin_unreachable(); diff --git a/be/src/vec/aggregate_functions/aggregate_function_rpc.h b/be/src/vec/aggregate_functions/aggregate_function_rpc.h index c92e96aaf9d935..f055d2c8c103a0 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_rpc.h +++ b/be/src/vec/aggregate_functions/aggregate_function_rpc.h @@ -364,7 +364,7 @@ class AggregateRpcUdaf final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { static_cast(this->data(place).add(columns, 0, batch_size, argument_types)); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_sort.h b/be/src/vec/aggregate_functions/aggregate_function_sort.h index 145a07d5446b5c..981580f8e6ac38 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_sort.h +++ b/be/src/vec/aggregate_functions/aggregate_function_sort.h @@ -142,12 +142,12 @@ class AggregateFunctionSort } void add(AggregateDataPtr __restrict place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { this->data(place).add(columns, _arguments.size(), row_num); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, - Arena* arena) const override { + Arena*) const override { this->data(place).merge(this->data(rhs)); } @@ -156,7 +156,7 @@ class AggregateFunctionSort } void deserialize(AggregateDataPtr __restrict place, BufferReadable& buf, - Arena* arena) const override { + Arena*) const override { this->data(place).deserialize(buf); } diff --git a/be/src/vec/aggregate_functions/aggregate_function_sum.h b/be/src/vec/aggregate_functions/aggregate_function_sum.h index 846104915b1e69..13fb3864bd1aaf 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_sum.h +++ b/be/src/vec/aggregate_functions/aggregate_function_sum.h @@ -126,7 +126,7 @@ class AggregateFunctionSum final column.get_data().push_back(this->data(place).get()); } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto& col = assert_cast(column); auto* data = col.get_data().data(); @@ -147,7 +147,7 @@ class AggregateFunctionSum final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& col = assert_cast(*dst); auto& src = assert_cast(*columns[0]); DCHECK(col.item_size() == sizeof(Data)) @@ -162,7 +162,7 @@ class AggregateFunctionSum final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); auto* data = reinterpret_cast(col.get_data().data()); @@ -173,7 +173,7 @@ class AggregateFunctionSum final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { DCHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -184,19 +184,19 @@ class AggregateFunctionSum final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, diff --git a/be/src/vec/aggregate_functions/aggregate_function_uniq.h b/be/src/vec/aggregate_functions/aggregate_function_uniq.h index e97923a08e6a2d..a3bdad635057fd 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_uniq.h +++ b/be/src/vec/aggregate_functions/aggregate_function_uniq.h @@ -141,7 +141,7 @@ class AggregateFunctionUniq final } void add_batch(size_t batch_size, AggregateDataPtr* places, size_t place_offset, - const IColumn** columns, Arena* arena, bool /*agg_many*/) const override { + const IColumn** columns, Arena*, bool /*agg_many*/) const override { std::vector keys_container; const KeyType* keys = get_keys(keys_container, *columns[0], batch_size); @@ -175,7 +175,7 @@ class AggregateFunctionUniq final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { std::vector keys_container; const KeyType* keys = get_keys(keys_container, *columns[0], batch_size); auto& set = this->data(place).set; @@ -197,7 +197,7 @@ class AggregateFunctionUniq final } void deserialize_and_merge(AggregateDataPtr __restrict place, AggregateDataPtr __restrict rhs, - BufferReadable& buf, Arena* arena) const override { + BufferReadable& buf, Arena*) const override { auto& set = this->data(place).set; UInt64 size; read_var_uint(size, buf); @@ -212,7 +212,7 @@ class AggregateFunctionUniq final } void deserialize(AggregateDataPtr __restrict place, BufferReadable& buf, - Arena* arena) const override { + Arena*) const override { auto& set = this->data(place).set; UInt64 size; read_var_uint(size, buf); diff --git a/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h b/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h index 4c3fa67e1626ae..90d137c62384f6 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h +++ b/be/src/vec/aggregate_functions/aggregate_function_uniq_distribute_key.h @@ -112,7 +112,7 @@ class AggregateFunctionUniqDistributeKey final } void add_batch(size_t batch_size, AggregateDataPtr* places, size_t place_offset, - const IColumn** columns, Arena* arena, bool /*agg_many*/) const override { + const IColumn** columns, Arena*, bool /*agg_many*/) const override { std::vector keys_container; const KeyType* keys = get_keys(keys_container, *columns[0], batch_size); @@ -133,7 +133,7 @@ class AggregateFunctionUniqDistributeKey final } void add_batch_single_place(size_t batch_size, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { std::vector keys_container; const KeyType* keys = get_keys(keys_container, *columns[0], batch_size); auto& set = this->data(place).set; @@ -164,7 +164,7 @@ class AggregateFunctionUniqDistributeKey final assert_cast(to).get_data().push_back(this->data(place).count); } - void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena* arena, + void deserialize_from_column(AggregateDataPtr places, const IColumn& column, Arena*, size_t num_rows) const override { auto data = reinterpret_cast( assert_cast(column).get_data().data()); @@ -188,7 +188,7 @@ class AggregateFunctionUniqDistributeKey final } void streaming_agg_serialize_to_column(const IColumn** columns, MutableColumnPtr& dst, - const size_t num_rows, Arena* arena) const override { + const size_t num_rows, Arena*) const override { auto& dst_col = assert_cast(*dst); CHECK(dst_col.item_size() == sizeof(UInt64)) << "size is not equal: " << dst_col.item_size() << " " << sizeof(UInt64); @@ -200,7 +200,7 @@ class AggregateFunctionUniqDistributeKey final } void deserialize_and_merge_from_column(AggregateDataPtr __restrict place, const IColumn& column, - Arena* arena) const override { + Arena*) const override { auto& col = assert_cast(column); const size_t num_rows = column.size(); auto* data = reinterpret_cast(col.get_data().data()); @@ -211,7 +211,7 @@ class AggregateFunctionUniqDistributeKey final void deserialize_and_merge_from_column_range(AggregateDataPtr __restrict place, const IColumn& column, size_t begin, size_t end, - Arena* arena) const override { + Arena*) const override { CHECK(end <= column.size() && begin <= end) << ", begin:" << begin << ", end:" << end << ", column.size():" << column.size(); auto& col = assert_cast(column); @@ -222,19 +222,19 @@ class AggregateFunctionUniqDistributeKey final } void deserialize_and_merge_vec(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, Arena* arena, + AggregateDataPtr rhs, const IColumn* column, Arena*, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec(places, offset, rhs, arena, num_rows); + this->merge_vec(places, offset, rhs, nullptr, num_rows); } void deserialize_and_merge_vec_selected(const AggregateDataPtr* places, size_t offset, - AggregateDataPtr rhs, const IColumn* column, - Arena* arena, const size_t num_rows) const override { - this->deserialize_from_column(rhs, *column, arena, num_rows); + AggregateDataPtr rhs, const IColumn* column, Arena*, + const size_t num_rows) const override { + this->deserialize_from_column(rhs, *column, nullptr, num_rows); DEFER({ this->destroy_vec(rhs, num_rows); }); - this->merge_vec_selected(places, offset, rhs, arena, num_rows); + this->merge_vec_selected(places, offset, rhs, nullptr, num_rows); } void serialize_without_key_to_column(ConstAggregateDataPtr __restrict place, diff --git a/be/src/vec/aggregate_functions/aggregate_function_window.h b/be/src/vec/aggregate_functions/aggregate_function_window.h index 517871e2fb642d..cb038fe31168b9 100644 --- a/be/src/vec/aggregate_functions/aggregate_function_window.h +++ b/be/src/vec/aggregate_functions/aggregate_function_window.h @@ -66,7 +66,7 @@ class WindowFunctionRowNumber final void add_range_single_place(int64_t partition_start, int64_t partition_end, int64_t frame_start, int64_t frame_end, AggregateDataPtr place, const IColumn** columns, - Arena* arena) const override { + Arena*) const override { ++data(place).count; } @@ -104,7 +104,7 @@ class WindowFunctionRank final : public IAggregateFunctionDataHelperdata(place).add_range_single_place(partition_start, partition_end, frame_start, frame_end, columns); } @@ -554,7 +554,7 @@ class WindowFunctionData final } void add(AggregateDataPtr place, const IColumn** columns, ssize_t row_num, - Arena* arena) const override { + Arena*) const override { throw doris::Exception(ErrorCode::INTERNAL_ERROR, "WindowFunctionLeadLagData do not support add"); __builtin_unreachable(); diff --git a/be/src/vec/common/hash_table/hash_key_type.h b/be/src/vec/common/hash_table/hash_key_type.h index 38802fe716711f..2c14e4ab687f87 100644 --- a/be/src/vec/common/hash_table/hash_key_type.h +++ b/be/src/vec/common/hash_table/hash_key_type.h @@ -97,16 +97,16 @@ inline HashKeyType get_hash_key_type(const std::vector& return HashKeyType::without_key; } - if (!data_types[0]->have_maximum_size_of_value()) { - if (is_string(data_types[0])) { + auto t = remove_nullable(data_types[0]); + // serialized cannot be used in the case of single column, because the join operator will have some processing of column nullable, resulting in incorrect serialized results. + if (!t->have_maximum_size_of_value()) { + if (is_string(t)) { return HashKeyType::string_key; - } else { - return HashKeyType::serialized; } + throw Exception(ErrorCode::INTERNAL_ERROR, "meet invalid type, type={}", t->get_name()); } - size_t size = - data_types[0]->get_maximum_size_of_value_in_memory() - data_types[0]->is_nullable(); + size_t size = t->get_maximum_size_of_value_in_memory(); if (size == sizeof(vectorized::UInt8)) { return HashKeyType::int8_key; } else if (size == sizeof(vectorized::UInt16)) { @@ -121,7 +121,7 @@ inline HashKeyType get_hash_key_type(const std::vector& return HashKeyType::int256_key; } else { throw Exception(ErrorCode::INTERNAL_ERROR, "meet invalid type size, size={}, type={}", size, - data_types[0]->get_name()); + t->get_name()); } } diff --git a/be/src/vec/common/hash_table/hash_map_util.h b/be/src/vec/common/hash_table/hash_map_util.h index 200e6372ea8ac4..292e6307851c58 100644 --- a/be/src/vec/common/hash_table/hash_map_util.h +++ b/be/src/vec/common/hash_table/hash_map_util.h @@ -33,8 +33,9 @@ inline std::vector get_data_types( template Status init_hash_method(DataVariants* data, const std::vector& data_types, bool is_first_phase) { - auto type = get_hash_key_type_with_phase(get_hash_key_type(data_types), !is_first_phase); + auto type = HashKeyType::EMPTY; try { + type = get_hash_key_type_with_phase(get_hash_key_type(data_types), !is_first_phase); if (has_nullable_key(data_types)) { data->template init(data_types, type); } else { @@ -48,7 +49,7 @@ Status init_hash_method(DataVariants* data, const std::vectormethod_variant.valueless_by_exception()); - if (type != HashKeyType::without_key && + if (type != HashKeyType::without_key && type != HashKeyType::EMPTY && data->method_variant.index() == 0) { // index is 0 means variant is monostate return Status::InternalError("method_variant init failed"); } diff --git a/be/src/vec/common/hash_table/join_hash_table.h b/be/src/vec/common/hash_table/join_hash_table.h index 485c5f7b3b22c9..25ca8844cd280f 100644 --- a/be/src/vec/common/hash_table/join_hash_table.h +++ b/be/src/vec/common/hash_table/join_hash_table.h @@ -71,20 +71,16 @@ class JoinHashTable { std::vector& get_visited() { return visited; } - template - void build(const Key* __restrict keys, const uint32_t* __restrict bucket_nums, - size_t num_elem) { + void build(const Key* __restrict keys, const uint32_t* __restrict bucket_nums, size_t num_elem, + bool keep_null_key) { build_keys = keys; for (size_t i = 1; i < num_elem; i++) { uint32_t bucket_num = bucket_nums[i]; next[i] = first[bucket_num]; first[bucket_num] = i; } - if constexpr ((JoinOpType != TJoinOp::NULL_AWARE_LEFT_ANTI_JOIN && - JoinOpType != TJoinOp::NULL_AWARE_LEFT_SEMI_JOIN) || - !with_other_conjuncts) { - /// Only null aware join with other conjuncts need to access the null value in hash table - first[bucket_size] = 0; // index = bucket_num means null + if (!keep_null_key) { + first[bucket_size] = 0; // index = bucket_size means null } } diff --git a/be/src/vec/core/block.cpp b/be/src/vec/core/block.cpp index 2eb06e3c6a553e..11075335fb17af 100644 --- a/be/src/vec/core/block.cpp +++ b/be/src/vec/core/block.cpp @@ -1083,7 +1083,7 @@ Status MutableBlock::add_rows(const Block* block, size_t row_begin, size_t lengt return Status::OK(); } -Status MutableBlock::add_rows(const Block* block, std::vector rows) { +Status MutableBlock::add_rows(const Block* block, const std::vector& rows) { RETURN_IF_CATCH_EXCEPTION({ DCHECK_LE(columns(), block->columns()); const auto& block_data = block->get_columns_with_type_and_name(); @@ -1093,7 +1093,7 @@ Status MutableBlock::add_rows(const Block* block, std::vector rows) { auto& dst = _columns[i]; const auto& src = *block_data[i].column.get(); dst->reserve(dst->size() + length); - for (size_t row : rows) { + for (auto row : rows) { // we can introduce a new function like `insert_assume_reserved` for IColumn. dst->insert_from(src, row); } diff --git a/be/src/vec/core/block.h b/be/src/vec/core/block.h index bbcdd9472ae178..d1af45e1297d4f 100644 --- a/be/src/vec/core/block.h +++ b/be/src/vec/core/block.h @@ -624,7 +624,7 @@ class MutableBlock { Status add_rows(const Block* block, const uint32_t* row_begin, const uint32_t* row_end, const std::vector* column_offset = nullptr); Status add_rows(const Block* block, size_t row_begin, size_t length); - Status add_rows(const Block* block, std::vector rows); + Status add_rows(const Block* block, const std::vector& rows); /// remove the column with the specified name void erase(const String& name); diff --git a/be/src/vec/exec/jni_connector.cpp b/be/src/vec/exec/jni_connector.cpp index f06524944ffc51..a7b0d5144ee623 100644 --- a/be/src/vec/exec/jni_connector.cpp +++ b/be/src/vec/exec/jni_connector.cpp @@ -80,16 +80,13 @@ Status JniConnector::open(RuntimeState* state, RuntimeProfile* profile) { batch_size = _state->batch_size(); } RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); - if (env == nullptr) { - return Status::InternalError("Failed to get/create JVM"); - } SCOPED_TIMER(_open_scanner_time); _scanner_params.emplace("time_zone", _state->timezone()); RETURN_IF_ERROR(_init_jni_scanner(env, batch_size)); // Call org.apache.doris.common.jni.JniScanner#open env->CallVoidMethod(_jni_scanner_obj, _jni_scanner_open); - _scanner_opened = true; RETURN_ERROR_IF_EXC(env); + _scanner_opened = true; return Status::OK(); } diff --git a/be/src/vec/exec/scan/new_es_scanner.cpp b/be/src/vec/exec/scan/new_es_scanner.cpp index d59aebd98c7341..fae83854be0910 100644 --- a/be/src/vec/exec/scan/new_es_scanner.cpp +++ b/be/src/vec/exec/scan/new_es_scanner.cpp @@ -169,8 +169,7 @@ Status NewEsScanner::_get_block_impl(RuntimeState* state, Block* block, bool* eo } Status NewEsScanner::_get_next(std::vector& columns) { - auto read_timer = _local_state->cast()._read_timer; - SCOPED_TIMER(read_timer); + SCOPED_TIMER(_local_state->cast()._read_timer); if (_line_eof && _batch_eof) { _es_eof = true; return Status::OK(); @@ -185,12 +184,8 @@ Status NewEsScanner::_get_next(std::vector& column } } - auto rows_read_counter = - _local_state->cast()._rows_read_counter; - auto materialize_timer = - _local_state->cast()._materialize_timer; - COUNTER_UPDATE(rows_read_counter, 1); - SCOPED_TIMER(materialize_timer); + COUNTER_UPDATE(_local_state->cast()._blocks_read_counter, 1); + SCOPED_TIMER(_local_state->cast()._materialize_timer); RETURN_IF_ERROR(_es_scroll_parser->fill_columns(_tuple_desc, columns, &_line_eof, _docvalue_context, _state->timezone_obj())); if (!_line_eof) { diff --git a/be/src/vec/exec/scan/new_jdbc_scanner.cpp b/be/src/vec/exec/scan/new_jdbc_scanner.cpp index a23e83e2426c07..7eaa9ab3eab788 100644 --- a/be/src/vec/exec/scan/new_jdbc_scanner.cpp +++ b/be/src/vec/exec/scan/new_jdbc_scanner.cpp @@ -89,7 +89,6 @@ Status NewJdbcScanner::prepare(RuntimeState* state, const VExprContextSPtrs& con _jdbc_param.connection_pool_max_life_time = jdbc_table->connection_pool_max_life_time(); _jdbc_param.connection_pool_max_wait_time = jdbc_table->connection_pool_max_wait_time(); _jdbc_param.connection_pool_keep_alive = jdbc_table->connection_pool_keep_alive(); - _jdbc_param.enable_connection_pool = jdbc_table->enable_connection_pool(); _local_state->scanner_profile()->add_info_string("JdbcDriverClass", _jdbc_param.driver_class); _local_state->scanner_profile()->add_info_string("JdbcDriverUrl", _jdbc_param.driver_path); diff --git a/be/src/vec/exec/scan/new_olap_scanner.cpp b/be/src/vec/exec/scan/new_olap_scanner.cpp index 60240618655322..f40f30f5b16e67 100644 --- a/be/src/vec/exec/scan/new_olap_scanner.cpp +++ b/be/src/vec/exec/scan/new_olap_scanner.cpp @@ -226,8 +226,7 @@ Status NewOlapScanner::init() { Status NewOlapScanner::open(RuntimeState* state) { RETURN_IF_ERROR(VScanner::open(state)); - auto* timer = ((pipeline::OlapScanLocalState*)_local_state)->_reader_init_timer; - SCOPED_TIMER(timer); + SCOPED_TIMER(_local_state->cast()._reader_init_timer); auto res = _tablet_reader->init(_tablet_reader_params); if (!res.ok()) { @@ -543,11 +542,9 @@ void NewOlapScanner::_update_realtime_counters() { const OlapReaderStatistics& stats = _tablet_reader->stats(); COUNTER_UPDATE(local_state->_read_compressed_counter, stats.compressed_bytes_read); COUNTER_UPDATE(local_state->_scan_bytes, stats.compressed_bytes_read); - _scan_bytes += stats.compressed_bytes_read; _tablet_reader->mutable_stats()->compressed_bytes_read = 0; COUNTER_UPDATE(local_state->_scan_rows, stats.raw_rows_read); - _scan_rows += stats.raw_rows_read; // if raw_rows_read is reset, scanNode will scan all table rows which may cause BE crash _tablet_reader->mutable_stats()->raw_rows_read = 0; } @@ -562,97 +559,92 @@ void NewOlapScanner::_collect_profile_before_close() { VScanner::_collect_profile_before_close(); #ifndef INCR_COUNTER -#define INCR_COUNTER(Parent) \ - COUNTER_UPDATE(Parent->_io_timer, stats.io_ns); \ - COUNTER_UPDATE(Parent->_read_compressed_counter, stats.compressed_bytes_read); \ - COUNTER_UPDATE(Parent->_scan_bytes, stats.compressed_bytes_read); \ - _scan_bytes += stats.compressed_bytes_read; \ - COUNTER_UPDATE(Parent->_decompressor_timer, stats.decompress_ns); \ - COUNTER_UPDATE(Parent->_read_uncompressed_counter, stats.uncompressed_bytes_read); \ - COUNTER_UPDATE(Parent->_block_load_timer, stats.block_load_ns); \ - COUNTER_UPDATE(Parent->_block_load_counter, stats.blocks_load); \ - COUNTER_UPDATE(Parent->_block_fetch_timer, stats.block_fetch_ns); \ - COUNTER_UPDATE(Parent->_delete_bitmap_get_agg_timer, stats.delete_bitmap_get_agg_ns); \ - COUNTER_UPDATE(Parent->_block_convert_timer, stats.block_convert_ns); \ - COUNTER_UPDATE(Parent->_scan_rows, stats.raw_rows_read); \ - _scan_rows += _tablet_reader->mutable_stats()->raw_rows_read; \ - COUNTER_UPDATE(Parent->_vec_cond_timer, stats.vec_cond_ns); \ - COUNTER_UPDATE(Parent->_short_cond_timer, stats.short_cond_ns); \ - COUNTER_UPDATE(Parent->_expr_filter_timer, stats.expr_filter_ns); \ - COUNTER_UPDATE(Parent->_block_init_timer, stats.block_init_ns); \ - COUNTER_UPDATE(Parent->_block_init_seek_timer, stats.block_init_seek_ns); \ - COUNTER_UPDATE(Parent->_block_init_seek_counter, stats.block_init_seek_num); \ - COUNTER_UPDATE(Parent->_block_conditions_filtered_timer, stats.block_conditions_filtered_ns); \ - COUNTER_UPDATE(Parent->_block_conditions_filtered_bf_timer, \ - stats.block_conditions_filtered_bf_ns); \ - COUNTER_UPDATE(Parent->_collect_iterator_merge_next_timer, \ - stats.collect_iterator_merge_next_timer); \ - COUNTER_UPDATE(Parent->_block_conditions_filtered_zonemap_timer, \ - stats.block_conditions_filtered_zonemap_ns); \ - COUNTER_UPDATE(Parent->_block_conditions_filtered_zonemap_rp_timer, \ - stats.block_conditions_filtered_zonemap_rp_ns); \ - COUNTER_UPDATE(Parent->_block_conditions_filtered_dict_timer, \ - stats.block_conditions_filtered_dict_ns); \ - COUNTER_UPDATE(Parent->_first_read_timer, stats.first_read_ns); \ - COUNTER_UPDATE(Parent->_second_read_timer, stats.second_read_ns); \ - COUNTER_UPDATE(Parent->_first_read_seek_timer, stats.block_first_read_seek_ns); \ - COUNTER_UPDATE(Parent->_first_read_seek_counter, stats.block_first_read_seek_num); \ - COUNTER_UPDATE(Parent->_lazy_read_timer, stats.lazy_read_ns); \ - COUNTER_UPDATE(Parent->_lazy_read_seek_timer, stats.block_lazy_read_seek_ns); \ - COUNTER_UPDATE(Parent->_lazy_read_seek_counter, stats.block_lazy_read_seek_num); \ - COUNTER_UPDATE(Parent->_output_col_timer, stats.output_col_ns); \ - COUNTER_UPDATE(Parent->_rows_vec_cond_filtered_counter, stats.rows_vec_cond_filtered); \ - COUNTER_UPDATE(Parent->_rows_short_circuit_cond_filtered_counter, \ - stats.rows_short_circuit_cond_filtered); \ - COUNTER_UPDATE(Parent->_rows_vec_cond_input_counter, stats.vec_cond_input_rows); \ - COUNTER_UPDATE(Parent->_rows_short_circuit_cond_input_counter, \ - stats.short_circuit_cond_input_rows); \ - for (auto& [id, info] : stats.filter_info) { \ - Parent->add_filter_info(id, info); \ - } \ - COUNTER_UPDATE(Parent->_stats_filtered_counter, stats.rows_stats_filtered); \ - COUNTER_UPDATE(Parent->_stats_rp_filtered_counter, stats.rows_stats_rp_filtered); \ - COUNTER_UPDATE(Parent->_dict_filtered_counter, stats.rows_dict_filtered); \ - COUNTER_UPDATE(Parent->_bf_filtered_counter, stats.rows_bf_filtered); \ - COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_del_filtered); \ - COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_del_by_bitmap); \ - COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_vec_del_cond_filtered); \ - COUNTER_UPDATE(Parent->_conditions_filtered_counter, stats.rows_conditions_filtered); \ - COUNTER_UPDATE(Parent->_key_range_filtered_counter, stats.rows_key_range_filtered); \ - COUNTER_UPDATE(Parent->_total_pages_num_counter, stats.total_pages_num); \ - COUNTER_UPDATE(Parent->_cached_pages_num_counter, stats.cached_pages_num); \ - COUNTER_UPDATE(Parent->_bitmap_index_filter_counter, stats.rows_bitmap_index_filtered); \ - COUNTER_UPDATE(Parent->_bitmap_index_filter_timer, stats.bitmap_index_filter_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_filter_counter, stats.rows_inverted_index_filtered); \ - COUNTER_UPDATE(Parent->_inverted_index_filter_timer, stats.inverted_index_filter_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_query_cache_hit_counter, \ - stats.inverted_index_query_cache_hit); \ - COUNTER_UPDATE(Parent->_inverted_index_query_cache_miss_counter, \ - stats.inverted_index_query_cache_miss); \ - COUNTER_UPDATE(Parent->_inverted_index_query_timer, stats.inverted_index_query_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_query_null_bitmap_timer, \ - stats.inverted_index_query_null_bitmap_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_query_bitmap_copy_timer, \ - stats.inverted_index_query_bitmap_copy_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_query_bitmap_op_timer, \ - stats.inverted_index_query_bitmap_op_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_searcher_open_timer, \ - stats.inverted_index_searcher_open_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_searcher_search_timer, \ - stats.inverted_index_searcher_search_timer); \ - COUNTER_UPDATE(Parent->_inverted_index_searcher_cache_hit_counter, \ - stats.inverted_index_searcher_cache_hit); \ - COUNTER_UPDATE(Parent->_inverted_index_searcher_cache_miss_counter, \ - stats.inverted_index_searcher_cache_miss); \ - COUNTER_UPDATE(Parent->_inverted_index_downgrade_count_counter, \ - stats.inverted_index_downgrade_count); \ - if (config::enable_file_cache) { \ - io::FileCacheProfileReporter cache_profile(Parent->_segment_profile.get()); \ - cache_profile.update(&stats.file_cache_stats); \ - } \ - COUNTER_UPDATE(Parent->_output_index_result_column_timer, \ - stats.output_index_result_column_timer); \ - COUNTER_UPDATE(Parent->_filtered_segment_counter, stats.filtered_segment_number); \ +#define INCR_COUNTER(Parent) \ + COUNTER_UPDATE(Parent->_io_timer, stats.io_ns); \ + COUNTER_UPDATE(Parent->_read_compressed_counter, stats.compressed_bytes_read); \ + COUNTER_UPDATE(Parent->_scan_bytes, stats.compressed_bytes_read); \ + COUNTER_UPDATE(Parent->_decompressor_timer, stats.decompress_ns); \ + COUNTER_UPDATE(Parent->_read_uncompressed_counter, stats.uncompressed_bytes_read); \ + COUNTER_UPDATE(Parent->_block_load_timer, stats.block_load_ns); \ + COUNTER_UPDATE(Parent->_block_load_counter, stats.blocks_load); \ + COUNTER_UPDATE(Parent->_block_fetch_timer, stats.block_fetch_ns); \ + COUNTER_UPDATE(Parent->_delete_bitmap_get_agg_timer, stats.delete_bitmap_get_agg_ns); \ + COUNTER_UPDATE(Parent->_scan_rows, stats.raw_rows_read); \ + COUNTER_UPDATE(Parent->_vec_cond_timer, stats.vec_cond_ns); \ + COUNTER_UPDATE(Parent->_short_cond_timer, stats.short_cond_ns); \ + COUNTER_UPDATE(Parent->_expr_filter_timer, stats.expr_filter_ns); \ + COUNTER_UPDATE(Parent->_block_init_timer, stats.block_init_ns); \ + COUNTER_UPDATE(Parent->_block_init_seek_timer, stats.block_init_seek_ns); \ + COUNTER_UPDATE(Parent->_block_init_seek_counter, stats.block_init_seek_num); \ + COUNTER_UPDATE(Parent->_segment_generate_row_range_timer, stats.generate_row_ranges_ns); \ + COUNTER_UPDATE(Parent->_segment_generate_row_range_by_bf_timer, \ + stats.generate_row_ranges_by_bf_ns); \ + COUNTER_UPDATE(Parent->_collect_iterator_merge_next_timer, \ + stats.collect_iterator_merge_next_timer); \ + COUNTER_UPDATE(Parent->_segment_generate_row_range_by_zonemap_timer, \ + stats.generate_row_ranges_by_zonemap_ns); \ + COUNTER_UPDATE(Parent->_segment_generate_row_range_by_dict_timer, \ + stats.generate_row_ranges_by_dict_ns); \ + COUNTER_UPDATE(Parent->_predicate_column_read_timer, stats.predicate_column_read_ns); \ + COUNTER_UPDATE(Parent->_non_predicate_column_read_timer, stats.non_predicate_read_ns); \ + COUNTER_UPDATE(Parent->_predicate_column_read_seek_timer, \ + stats.predicate_column_read_seek_ns); \ + COUNTER_UPDATE(Parent->_predicate_column_read_seek_counter, \ + stats.predicate_column_read_seek_num); \ + COUNTER_UPDATE(Parent->_lazy_read_timer, stats.lazy_read_ns); \ + COUNTER_UPDATE(Parent->_lazy_read_seek_timer, stats.block_lazy_read_seek_ns); \ + COUNTER_UPDATE(Parent->_lazy_read_seek_counter, stats.block_lazy_read_seek_num); \ + COUNTER_UPDATE(Parent->_output_col_timer, stats.output_col_ns); \ + COUNTER_UPDATE(Parent->_rows_vec_cond_filtered_counter, stats.rows_vec_cond_filtered); \ + COUNTER_UPDATE(Parent->_rows_short_circuit_cond_filtered_counter, \ + stats.rows_short_circuit_cond_filtered); \ + COUNTER_UPDATE(Parent->_rows_vec_cond_input_counter, stats.vec_cond_input_rows); \ + COUNTER_UPDATE(Parent->_rows_short_circuit_cond_input_counter, \ + stats.short_circuit_cond_input_rows); \ + for (auto& [id, info] : stats.filter_info) { \ + Parent->add_filter_info(id, info); \ + } \ + COUNTER_UPDATE(Parent->_stats_filtered_counter, stats.rows_stats_filtered); \ + COUNTER_UPDATE(Parent->_stats_rp_filtered_counter, stats.rows_stats_rp_filtered); \ + COUNTER_UPDATE(Parent->_dict_filtered_counter, stats.rows_dict_filtered); \ + COUNTER_UPDATE(Parent->_bf_filtered_counter, stats.rows_bf_filtered); \ + COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_del_filtered); \ + COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_del_by_bitmap); \ + COUNTER_UPDATE(Parent->_del_filtered_counter, stats.rows_vec_del_cond_filtered); \ + COUNTER_UPDATE(Parent->_conditions_filtered_counter, stats.rows_conditions_filtered); \ + COUNTER_UPDATE(Parent->_key_range_filtered_counter, stats.rows_key_range_filtered); \ + COUNTER_UPDATE(Parent->_total_pages_num_counter, stats.total_pages_num); \ + COUNTER_UPDATE(Parent->_cached_pages_num_counter, stats.cached_pages_num); \ + COUNTER_UPDATE(Parent->_bitmap_index_filter_counter, stats.rows_bitmap_index_filtered); \ + COUNTER_UPDATE(Parent->_bitmap_index_filter_timer, stats.bitmap_index_filter_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_filter_counter, stats.rows_inverted_index_filtered); \ + COUNTER_UPDATE(Parent->_inverted_index_filter_timer, stats.inverted_index_filter_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_query_cache_hit_counter, \ + stats.inverted_index_query_cache_hit); \ + COUNTER_UPDATE(Parent->_inverted_index_query_cache_miss_counter, \ + stats.inverted_index_query_cache_miss); \ + COUNTER_UPDATE(Parent->_inverted_index_query_timer, stats.inverted_index_query_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_query_null_bitmap_timer, \ + stats.inverted_index_query_null_bitmap_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_query_bitmap_copy_timer, \ + stats.inverted_index_query_bitmap_copy_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_searcher_open_timer, \ + stats.inverted_index_searcher_open_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_searcher_search_timer, \ + stats.inverted_index_searcher_search_timer); \ + COUNTER_UPDATE(Parent->_inverted_index_searcher_cache_hit_counter, \ + stats.inverted_index_searcher_cache_hit); \ + COUNTER_UPDATE(Parent->_inverted_index_searcher_cache_miss_counter, \ + stats.inverted_index_searcher_cache_miss); \ + COUNTER_UPDATE(Parent->_inverted_index_downgrade_count_counter, \ + stats.inverted_index_downgrade_count); \ + if (config::enable_file_cache) { \ + io::FileCacheProfileReporter cache_profile(Parent->_segment_profile.get()); \ + cache_profile.update(&stats.file_cache_stats); \ + } \ + COUNTER_UPDATE(Parent->_output_index_result_column_timer, \ + stats.output_index_result_column_timer); \ + COUNTER_UPDATE(Parent->_filtered_segment_counter, stats.filtered_segment_number); \ COUNTER_UPDATE(Parent->_total_segment_counter, stats.total_segment_number); // Update counters for NewOlapScanner @@ -665,11 +657,12 @@ void NewOlapScanner::_collect_profile_before_close() { #undef INCR_COUNTER #endif // Update metrics - DorisMetrics::instance()->query_scan_bytes->increment(_scan_bytes); - DorisMetrics::instance()->query_scan_rows->increment(_scan_rows); + DorisMetrics::instance()->query_scan_bytes->increment( + local_state->_read_compressed_counter->value()); + DorisMetrics::instance()->query_scan_rows->increment(local_state->_scan_rows->value()); auto& tablet = _tablet_reader_params.tablet; - tablet->query_scan_bytes->increment(_scan_bytes); - tablet->query_scan_rows->increment(_scan_rows); + tablet->query_scan_bytes->increment(local_state->_read_compressed_counter->value()); + tablet->query_scan_rows->increment(local_state->_scan_rows->value()); tablet->query_scan_count->increment(1); if (_query_statistics) { _query_statistics->add_scan_bytes_from_local_storage( diff --git a/be/src/vec/exec/scan/new_olap_scanner.h b/be/src/vec/exec/scan/new_olap_scanner.h index 44c300f446e6ea..fd1246b120ba77 100644 --- a/be/src/vec/exec/scan/new_olap_scanner.h +++ b/be/src/vec/exec/scan/new_olap_scanner.h @@ -101,8 +101,6 @@ class NewOlapScanner : public VScanner { std::unordered_set _tablet_columns_convert_to_null_set; // ========= profiles ========== - int64_t _scan_bytes = 0; - int64_t _scan_rows = 0; bool _profile_updated = false; }; } // namespace vectorized diff --git a/be/src/vec/exec/scan/scanner_context.cpp b/be/src/vec/exec/scan/scanner_context.cpp index ee1d60d2902424..bea222bd0f35b0 100644 --- a/be/src/vec/exec/scan/scanner_context.cpp +++ b/be/src/vec/exec/scan/scanner_context.cpp @@ -80,8 +80,6 @@ Status ScannerContext::init() { _scanner_profile = _local_state->_scanner_profile; _scanner_sched_counter = _local_state->_scanner_sched_counter; _newly_create_free_blocks_num = _local_state->_newly_create_free_blocks_num; - _scanner_wait_batch_timer = _local_state->_scanner_wait_batch_timer; - _scanner_ctx_sched_time = _local_state->_scanner_ctx_sched_time; _scale_up_scanners_counter = _local_state->_scale_up_scanners_counter; _scanner_memory_used_counter = _local_state->_memory_used_counter; @@ -224,10 +222,6 @@ Status ScannerContext::init() { return Status::OK(); } -std::string ScannerContext::parent_name() { - return _local_state->get_name(); -} - vectorized::BlockUPtr ScannerContext::get_free_block(bool force) { vectorized::BlockUPtr block = nullptr; if (_free_blocks.try_dequeue(block)) { @@ -257,18 +251,13 @@ void ScannerContext::return_free_block(vectorized::BlockUPtr block) { } } -bool ScannerContext::empty_in_queue(int id) { - std::lock_guard l(_transfer_lock); - return _blocks_queue.empty(); -} - Status ScannerContext::submit_scan_task(std::shared_ptr scan_task) { _scanner_sched_counter->update(1); _num_scheduled_scanners++; return _scanner_scheduler_global->submit(shared_from_this(), scan_task); } -void ScannerContext::append_block_to_queue(std::shared_ptr scan_task) { +void ScannerContext::push_back_scan_task(std::shared_ptr scan_task) { if (scan_task->status_ok()) { for (const auto& [block, _] : scan_task->cached_blocks) { if (block->rows() > 0) { @@ -287,12 +276,12 @@ void ScannerContext::append_block_to_queue(std::shared_ptr scan_task) if (_last_scale_up_time == 0) { _last_scale_up_time = UnixMillis(); } - if (_blocks_queue.empty() && _last_fetch_time != 0) { + if (_tasks_queue.empty() && _last_fetch_time != 0) { // there's no block in queue before current block, so the consumer is waiting _total_wait_block_time += UnixMillis() - _last_fetch_time; } _num_scheduled_scanners--; - _blocks_queue.emplace_back(scan_task); + _tasks_queue.emplace_back(scan_task); _dependency->set_ready(); } @@ -308,9 +297,9 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo _set_scanner_done(); return _process_status; } - if (!_blocks_queue.empty() && !done()) { + if (!_tasks_queue.empty() && !done()) { _last_fetch_time = UnixMillis(); - auto scan_task = _blocks_queue.front(); + auto scan_task = _tasks_queue.front(); DCHECK(scan_task); // The abnormal status of scanner may come from the execution of the scanner itself, @@ -335,7 +324,7 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo return_free_block(std::move(current_block)); } else { // This scan task do not have any cached blocks. - _blocks_queue.pop_front(); + _tasks_queue.pop_front(); // current scanner is finished, and no more data to read if (scan_task->is_eos()) { _num_finished_scanners++; @@ -374,13 +363,13 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo RETURN_IF_ERROR(_try_to_scale_up()); } - if (_num_finished_scanners == _all_scanners.size() && _blocks_queue.empty()) { + if (_num_finished_scanners == _all_scanners.size() && _tasks_queue.empty()) { _set_scanner_done(); _is_finished = true; } *eos = done(); - if (_blocks_queue.empty()) { + if (_tasks_queue.empty()) { _dependency->block(); } return Status::OK(); @@ -466,11 +455,6 @@ Status ScannerContext::validate_block_schema(Block* block) { return Status::OK(); } -void ScannerContext::set_status_on_error(const Status& status) { - std::lock_guard l(_transfer_lock); - _process_status = status; -} - void ScannerContext::stop_scanners(RuntimeState* state) { std::lock_guard l(_transfer_lock); if (_should_stop) { @@ -483,7 +467,7 @@ void ScannerContext::stop_scanners(RuntimeState* state) { sc->_scanner->try_stop(); } } - _blocks_queue.clear(); + _tasks_queue.clear(); // TODO yiguolei, call mark close to scanners if (state->enable_profile()) { std::stringstream scanner_statistics; @@ -533,11 +517,11 @@ void ScannerContext::stop_scanners(RuntimeState* state) { std::string ScannerContext::debug_string() { return fmt::format( - "id: {}, total scanners: {}, blocks in queue: {}," + "id: {}, total scanners: {}, pending tasks: {}," " _should_stop: {}, _is_finished: {}, free blocks: {}," " limit: {}, _num_running_scanners: {}, _max_thread_num: {}," " _max_bytes_in_queue: {}, query_id: {}", - ctx_id, _all_scanners.size(), _blocks_queue.size(), _should_stop, _is_finished, + ctx_id, _all_scanners.size(), _tasks_queue.size(), _should_stop, _is_finished, _free_blocks.size_approx(), limit, _num_scheduled_scanners, _max_thread_num, _max_bytes_in_queue, print_id(_query_id)); } diff --git a/be/src/vec/exec/scan/scanner_context.h b/be/src/vec/exec/scan/scanner_context.h index 85669765df89ef..c70313c98bca65 100644 --- a/be/src/vec/exec/scan/scanner_context.h +++ b/be/src/vec/exec/scan/scanner_context.h @@ -75,7 +75,6 @@ class ScanTask { public: std::weak_ptr scanner; std::list> cached_blocks; - uint64_t last_submit_time; // nanoseconds void set_status(Status _status) { if (_status.is()) { @@ -112,7 +111,7 @@ class ScannerContext : public std::enable_shared_from_this, ~ScannerContext() override { SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(_query_thread_context.query_mem_tracker); - _blocks_queue.clear(); + _tasks_queue.clear(); vectorized::BlockUPtr block; while (_free_blocks.try_dequeue(block)) { // do nothing @@ -143,37 +142,25 @@ class ScannerContext : public std::enable_shared_from_this, // set the `eos` to `ScanTask::eos` if there is no more data in current scanner Status submit_scan_task(std::shared_ptr scan_task); - // append the running scanner and its cached block to `_blocks_queue` - void append_block_to_queue(std::shared_ptr scan_task); - - void set_status_on_error(const Status& status); + // Push back a scan task. + void push_back_scan_task(std::shared_ptr scan_task); // Return true if this ScannerContext need no more process bool done() const { return _is_finished || _should_stop; } - bool is_finished() { return _is_finished.load(); } - bool should_stop() { return _should_stop.load(); } std::string debug_string(); RuntimeState* state() { return _state; } - void incr_ctx_scheduling_time(int64_t num) { _scanner_ctx_sched_time->update(num); } - std::string parent_name(); - - bool empty_in_queue(int id); SimplifiedScanScheduler* get_scan_scheduler() { return _scanner_scheduler; } void stop_scanners(RuntimeState* state); - int32_t get_max_thread_num() const { return _max_thread_num; } - void set_max_thread_num(int32_t num) { _max_thread_num = num; } - int batch_size() const { return _batch_size; } // the unique id of this context std::string ctx_id; TUniqueId _query_id; - int32_t queue_idx = -1; ThreadPoolToken* thread_token = nullptr; bool _should_reset_thread_name = true; @@ -195,7 +182,7 @@ class ScannerContext : public std::enable_shared_from_this, const RowDescriptor* _output_row_descriptor = nullptr; std::mutex _transfer_lock; - std::list> _blocks_queue; + std::list> _tasks_queue; Status _process_status = Status::OK(); std::atomic_bool _should_stop = false; @@ -223,8 +210,6 @@ class ScannerContext : public std::enable_shared_from_this, // This counter refers to scan operator's local state RuntimeProfile::Counter* _scanner_memory_used_counter = nullptr; RuntimeProfile::Counter* _newly_create_free_blocks_num = nullptr; - RuntimeProfile::Counter* _scanner_wait_batch_timer = nullptr; - RuntimeProfile::Counter* _scanner_ctx_sched_time = nullptr; RuntimeProfile::Counter* _scale_up_scanners_counter = nullptr; QueryThreadContext _query_thread_context; std::shared_ptr _dependency = nullptr; diff --git a/be/src/vec/exec/scan/scanner_scheduler.cpp b/be/src/vec/exec/scan/scanner_scheduler.cpp index 3ad4e758e79980..385b581d2a5725 100644 --- a/be/src/vec/exec/scan/scanner_scheduler.cpp +++ b/be/src/vec/exec/scan/scanner_scheduler.cpp @@ -123,7 +123,6 @@ Status ScannerScheduler::init(ExecEnv* env) { Status ScannerScheduler::submit(std::shared_ptr ctx, std::shared_ptr scan_task) { - scan_task->last_submit_time = GetCurrentTimeNanos(); if (ctx->done()) { return Status::OK(); } @@ -154,7 +153,7 @@ Status ScannerScheduler::submit(std::shared_ptr ctx, if (!status.ok()) { scanner_ref->set_status(status); - ctx->append_block_to_queue(scanner_ref); + ctx->push_back_scan_task(scanner_ref); } }); if (!s.ok()) { @@ -184,7 +183,7 @@ Status ScannerScheduler::submit(std::shared_ptr ctx, if (!status.ok()) { scanner_ref->set_status(status); - ctx->append_block_to_queue(scanner_ref); + ctx->push_back_scan_task(scanner_ref); } }; SimplifiedScanTask simple_scan_task = {work_func, ctx}; @@ -212,8 +211,6 @@ std::unique_ptr ScannerScheduler::new_limited_scan_pool_token( void ScannerScheduler::_scanner_scan(std::shared_ptr ctx, std::shared_ptr scan_task) { - // record the time from scanner submission to actual execution in nanoseconds - ctx->incr_ctx_scheduling_time(GetCurrentTimeNanos() - scan_task->last_submit_time); auto task_lock = ctx->task_exec_ctx(); if (task_lock == nullptr) { return; @@ -343,7 +340,7 @@ void ScannerScheduler::_scanner_scan(std::shared_ptr ctx, scanner->mark_to_need_to_close(); } scan_task->set_eos(eos); - ctx->append_block_to_queue(scan_task); + ctx->push_back_scan_task(scan_task); } void ScannerScheduler::_register_metrics() { diff --git a/be/src/vec/exec/scan/vfile_scanner.cpp b/be/src/vec/exec/scan/vfile_scanner.cpp index c3f4d12f9dc12b..2ecf72687f758b 100644 --- a/be/src/vec/exec/scan/vfile_scanner.cpp +++ b/be/src/vec/exec/scan/vfile_scanner.cpp @@ -126,8 +126,6 @@ Status VFileScanner::prepare(RuntimeState* state, const VExprContextSPtrs& conju _open_reader_timer = ADD_TIMER(_local_state->scanner_profile(), "FileScannerOpenReaderTime"); _cast_to_input_block_timer = ADD_TIMER(_local_state->scanner_profile(), "FileScannerCastInputBlockTime"); - _fill_path_columns_timer = - ADD_TIMER(_local_state->scanner_profile(), "FileScannerFillPathColumnTime"); _fill_missing_columns_timer = ADD_TIMER(_local_state->scanner_profile(), "FileScannerFillMissingColumnTime"); _pre_filter_timer = ADD_TIMER(_local_state->scanner_profile(), "FileScannerPreFilterTimer"); @@ -137,8 +135,6 @@ Status VFileScanner::prepare(RuntimeState* state, const VExprContextSPtrs& conju _not_found_file_counter = ADD_COUNTER(_local_state->scanner_profile(), "NotFoundFileNum", TUnit::UNIT); _file_counter = ADD_COUNTER(_local_state->scanner_profile(), "FileNumber", TUnit::UNIT); - _has_fully_rf_file_counter = - ADD_COUNTER(_local_state->scanner_profile(), "HasFullyRfFileNumber", TUnit::UNIT); _file_cache_statistics.reset(new io::FileCacheStatistics()); _io_ctx.reset(new io::IOContext()); @@ -219,7 +215,7 @@ Status VFileScanner::_process_late_arrival_conjuncts() { _discard_conjuncts(); } if (_applied_rf_num == _total_rf_num) { - COUNTER_UPDATE(_has_fully_rf_file_counter, 1); + _local_state->scanner_profile()->add_info_string("ApplyAllRuntimeFilters", "True"); } return Status::OK(); } diff --git a/be/src/vec/exec/scan/vfile_scanner.h b/be/src/vec/exec/scan/vfile_scanner.h index 750a1371d7ec29..86171d634ac693 100644 --- a/be/src/vec/exec/scan/vfile_scanner.h +++ b/be/src/vec/exec/scan/vfile_scanner.h @@ -180,14 +180,12 @@ class VFileScanner : public VScanner { RuntimeProfile::Counter* _get_block_timer = nullptr; RuntimeProfile::Counter* _open_reader_timer = nullptr; RuntimeProfile::Counter* _cast_to_input_block_timer = nullptr; - RuntimeProfile::Counter* _fill_path_columns_timer = nullptr; RuntimeProfile::Counter* _fill_missing_columns_timer = nullptr; RuntimeProfile::Counter* _pre_filter_timer = nullptr; RuntimeProfile::Counter* _convert_to_output_block_timer = nullptr; RuntimeProfile::Counter* _empty_file_counter = nullptr; RuntimeProfile::Counter* _not_found_file_counter = nullptr; RuntimeProfile::Counter* _file_counter = nullptr; - RuntimeProfile::Counter* _has_fully_rf_file_counter = nullptr; const std::unordered_map* _col_name_to_slot_id = nullptr; // single slot filter conjuncts @@ -216,7 +214,6 @@ class VFileScanner : public VScanner { Status _truncate_char_or_varchar_columns(Block* block); void _truncate_char_or_varchar_column(Block* block, int idx, int len); Status _generate_fill_columns(); - Status _handle_dynamic_block(Block* block); Status _process_conjuncts_for_dict_filter(); Status _process_late_arrival_conjuncts(); void _get_slot_ids(VExpr* expr, std::vector* slot_ids); diff --git a/be/src/vec/exec/scan/vscanner.cpp b/be/src/vec/exec/scan/vscanner.cpp index ae255f85a7f604..97bf563db1fa58 100644 --- a/be/src/vec/exec/scan/vscanner.cpp +++ b/be/src/vec/exec/scan/vscanner.cpp @@ -113,8 +113,7 @@ Status VScanner::get_block(RuntimeState* state, Block* block, bool* eof) { // 1. Get input block from scanner { // get block time - auto* timer = _local_state->_scan_timer; - SCOPED_TIMER(timer); + SCOPED_TIMER(_local_state->_scan_timer); RETURN_IF_ERROR(_get_block_impl(state, block, eof)); if (*eof) { DCHECK(block->rows() == 0); @@ -128,8 +127,7 @@ Status VScanner::get_block(RuntimeState* state, Block* block, bool* eof) { // 2. Filter the output block finally. { - auto* timer = _local_state->_filter_timer; - SCOPED_TIMER(timer); + SCOPED_TIMER(_local_state->_filter_timer); RETURN_IF_ERROR(_filter_output_block(block)); } // record rows return (after filter) for _limit check diff --git a/be/src/vec/exec/vjdbc_connector.cpp b/be/src/vec/exec/vjdbc_connector.cpp index 98acb43bcd47ee..0fa33bfaad917d 100644 --- a/be/src/vec/exec/vjdbc_connector.cpp +++ b/be/src/vec/exec/vjdbc_connector.cpp @@ -95,26 +95,23 @@ Status JdbcConnector::open(RuntimeState* state, bool read) { RETURN_IF_ERROR(JniUtil::get_jni_scanner_class(env, JDBC_EXECUTOR_FACTORY_CLASS, &_executor_factory_clazz)); - _executor_factory_ctor_id = - env->GetStaticMethodID(_executor_factory_clazz, "getExecutorClass", - "(Lorg/apache/doris/thrift/TOdbcTableType;)Ljava/lang/String;"); - if (_executor_factory_ctor_id == nullptr) { - return Status::InternalError("Failed to find method ID for getExecutorClass"); - } + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _executor_factory_ctor_id, env, + GetStaticMethodID(_executor_factory_clazz, "getExecutorClass", + "(Lorg/apache/doris/thrift/TOdbcTableType;)Ljava/lang/String;")); jobject jtable_type = _get_java_table_type(env, _conn_param.table_type); - jstring executor_name = (jstring)env->CallStaticObjectMethod( - _executor_factory_clazz, _executor_factory_ctor_id, jtable_type); - if (executor_name == nullptr) { - return Status::InternalError("getExecutorClass returned null"); - } - const char* executor_name_str = env->GetStringUTFChars(executor_name, nullptr); + JNI_CALL_METHOD_CHECK_EXCEPTION_DELETE_REF( + jobject, executor_name, env, + CallStaticObjectMethod(_executor_factory_clazz, _executor_factory_ctor_id, + jtable_type)); + + const char* executor_name_str = env->GetStringUTFChars((jstring)executor_name, nullptr); RETURN_IF_ERROR(JniUtil::get_jni_scanner_class(env, executor_name_str, &_executor_clazz)); env->DeleteLocalRef(jtable_type); - env->ReleaseStringUTFChars(executor_name, executor_name_str); - env->DeleteLocalRef(executor_name); + env->ReleaseStringUTFChars((jstring)executor_name, executor_name_str); #undef GET_BASIC_JAVA_CLAZZ RETURN_IF_ERROR(_register_func_id(env)); @@ -155,7 +152,6 @@ Status JdbcConnector::open(RuntimeState* state, bool read) { } ctor_params.__set_op(read ? TJdbcOperation::READ : TJdbcOperation::WRITE); ctor_params.__set_table_type(_conn_param.table_type); - ctor_params.__set_enable_connection_pool(_conn_param.enable_connection_pool); ctor_params.__set_connection_pool_min_size(_conn_param.connection_pool_min_size); ctor_params.__set_connection_pool_max_size(_conn_param.connection_pool_max_size); ctor_params.__set_connection_pool_max_wait_time(_conn_param.connection_pool_max_wait_time); @@ -191,14 +187,19 @@ Status JdbcConnector::test_connection() { RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); env->CallNonvirtualVoidMethod(_executor_obj, _executor_clazz, _executor_test_connection_id); - return JniUtil::GetJniExceptionMsg(env); + RETURN_ERROR_IF_EXC(env); + return Status::OK(); } Status JdbcConnector::clean_datasource() { + if (!_is_open) { + return Status::OK(); + } JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); env->CallNonvirtualVoidMethod(_executor_obj, _executor_clazz, _executor_clean_datasource_id); - return JniUtil::GetJniExceptionMsg(env); + RETURN_ERROR_IF_EXC(env); + return Status::OK(); } Status JdbcConnector::query() { @@ -306,7 +307,7 @@ Status JdbcConnector::exec_stmt_write(Block* block, const VExprContextSPtrs& out env->CallNonvirtualIntMethod(_executor_obj, _executor_clazz, _executor_stmt_write_id, hashmap_object); env->DeleteLocalRef(hashmap_object); - RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env)); + RETURN_ERROR_IF_EXC(env); *num_rows_sent = block->rows(); return Status::OK(); } @@ -316,7 +317,7 @@ Status JdbcConnector::begin_trans() { JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); env->CallNonvirtualVoidMethod(_executor_obj, _executor_clazz, _executor_begin_trans_id); - RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env)); + RETURN_ERROR_IF_EXC(env); _is_in_transaction = true; } return Status::OK(); @@ -329,7 +330,8 @@ Status JdbcConnector::abort_trans() { JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); env->CallNonvirtualVoidMethod(_executor_obj, _executor_clazz, _executor_abort_trans_id); - return JniUtil::GetJniExceptionMsg(env); + RETURN_ERROR_IF_EXC(env); + return Status::OK(); } Status JdbcConnector::finish_trans() { @@ -337,7 +339,7 @@ Status JdbcConnector::finish_trans() { JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); env->CallNonvirtualVoidMethod(_executor_obj, _executor_clazz, _executor_finish_trans_id); - RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env)); + RETURN_ERROR_IF_EXC(env); _is_in_transaction = false; } return Status::OK(); diff --git a/be/src/vec/exec/vjdbc_connector.h b/be/src/vec/exec/vjdbc_connector.h index 066a95de554444..954b0abfa78f0c 100644 --- a/be/src/vec/exec/vjdbc_connector.h +++ b/be/src/vec/exec/vjdbc_connector.h @@ -61,7 +61,6 @@ struct JdbcConnectorParam { int32_t connection_pool_max_wait_time = -1; int32_t connection_pool_max_life_time = -1; bool connection_pool_keep_alive = false; - bool enable_connection_pool; const TupleDescriptor* tuple_desc = nullptr; }; diff --git a/be/src/vec/exprs/table_function/udf_table_function.cpp b/be/src/vec/exprs/table_function/udf_table_function.cpp index 82e727b3f5dee9..35357f7c9357e1 100644 --- a/be/src/vec/exprs/table_function/udf_table_function.cpp +++ b/be/src/vec/exprs/table_function/udf_table_function.cpp @@ -48,9 +48,6 @@ UDFTableFunction::UDFTableFunction(const TFunction& t_fn) : TableFunction(), _t_ Status UDFTableFunction::open() { JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); - if (env == nullptr) { - return Status::InternalError("Failed to get/create JVM"); - } _jni_ctx = std::make_shared(); // Add a scoped cleanup jni reference object. This cleans up local refs made below. JniLocalFrame jni_frame; @@ -70,14 +67,22 @@ Status UDFTableFunction::open() { RETURN_IF_ERROR(jni_frame.push(env)); RETURN_IF_ERROR(SerializeThriftMsg(env, &ctor_params, &ctor_params_bytes)); RETURN_IF_ERROR(JniUtil::GetGlobalClassRef(env, EXECUTOR_CLASS, &_jni_ctx->executor_cl)); - _jni_ctx->executor_ctor_id = - env->GetMethodID(_jni_ctx->executor_cl, "", EXECUTOR_CTOR_SIGNATURE); - _jni_ctx->executor_evaluate_id = - env->GetMethodID(_jni_ctx->executor_cl, "evaluate", EXECUTOR_EVALUATE_SIGNATURE); - _jni_ctx->executor_close_id = - env->GetMethodID(_jni_ctx->executor_cl, "close", EXECUTOR_CLOSE_SIGNATURE); - _jni_ctx->executor = env->NewObject(_jni_ctx->executor_cl, _jni_ctx->executor_ctor_id, - ctor_params_bytes); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _jni_ctx->executor_ctor_id, env, + GetMethodID(_jni_ctx->executor_cl, "", EXECUTOR_CTOR_SIGNATURE)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _jni_ctx->executor_evaluate_id, env, + GetMethodID(_jni_ctx->executor_cl, "evaluate", EXECUTOR_EVALUATE_SIGNATURE)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _jni_ctx->executor_close_id, env, + GetMethodID(_jni_ctx->executor_cl, "close", EXECUTOR_CLOSE_SIGNATURE)); + + JNI_CALL_METHOD_CHECK_EXCEPTION( + , _jni_ctx->executor, env, + NewObject(_jni_ctx->executor_cl, _jni_ctx->executor_ctor_id, ctor_params_bytes)); jbyte* pBytes = env->GetByteArrayElements(ctor_params_bytes, nullptr); env->ReleaseByteArrayElements(ctor_params_bytes, pBytes, JNI_ABORT); env->DeleteLocalRef(ctor_params_bytes); @@ -123,9 +128,10 @@ Status UDFTableFunction::process_init(Block* block, RuntimeState* state) { jobject output_map = JniUtil::convert_to_java_map(env, output_params); DCHECK(_jni_ctx != nullptr); DCHECK(_jni_ctx->executor != nullptr); - long output_address = env->CallLongMethod(_jni_ctx->executor, _jni_ctx->executor_evaluate_id, - input_map, output_map); - RETURN_IF_ERROR(JniUtil::GetJniExceptionMsg(env)); + JNI_CALL_METHOD_CHECK_EXCEPTION( + long, output_address, env, + CallLongMethod(_jni_ctx->executor, _jni_ctx->executor_evaluate_id, input_map, + output_map)); env->DeleteLocalRef(input_map); env->DeleteLocalRef(output_map); RETURN_IF_ERROR(JniConnector::fill_block(block, {_result_column_idx}, output_address)); diff --git a/be/src/vec/exprs/vcast_expr.cpp b/be/src/vec/exprs/vcast_expr.cpp index 38f861add87224..0e0241c76ff04f 100644 --- a/be/src/vec/exprs/vcast_expr.cpp +++ b/be/src/vec/exprs/vcast_expr.cpp @@ -57,14 +57,11 @@ doris::Status VCastExpr::prepare(doris::RuntimeState* state, const doris::RowDes // Using typeindex to indicate the datatype, not using type name because // type name is not stable, but type index is stable and immutable _cast_param_data_type = _target_data_type; - // Has to cast to int16_t or there will be compile error because there is no - // TypeIndexField - _cast_param = _cast_param_data_type->create_column_const_with_default_value(1); ColumnsWithTypeAndName argument_template; argument_template.reserve(2); argument_template.emplace_back(nullptr, child->data_type(), child_name); - argument_template.emplace_back(_cast_param, _cast_param_data_type, _target_data_type_name); + argument_template.emplace_back(nullptr, _cast_param_data_type, _target_data_type_name); _function = SimpleFunctionFactory::instance().get_function( function_name, argument_template, _data_type, {.enable_decimal256 = state->enable_decimal256()}); @@ -133,7 +130,7 @@ const std::string& VCastExpr::expr_name() const { std::string VCastExpr::debug_string() const { std::stringstream out; - out << "CastExpr(CAST " << _cast_param_data_type->get_name() << " to " + out << "CastExpr(CAST " << get_child(0)->data_type()->get_name() << " to " << _target_data_type->get_name() << "){"; bool first = true; for (auto& input_expr : children()) { diff --git a/be/src/vec/exprs/vcast_expr.h b/be/src/vec/exprs/vcast_expr.h index 3c03cb42ffb02c..f553d7682a3b16 100644 --- a/be/src/vec/exprs/vcast_expr.h +++ b/be/src/vec/exprs/vcast_expr.h @@ -61,7 +61,6 @@ class VCastExpr final : public VExpr { std::string _target_data_type_name; DataTypePtr _cast_param_data_type; - ColumnPtr _cast_param; static const constexpr char* function_name = "CAST"; }; diff --git a/be/src/vec/functions/function_coalesce.cpp b/be/src/vec/functions/function_coalesce.cpp index d3450e97e98857..c461b260ab8a07 100644 --- a/be/src/vec/functions/function_coalesce.cpp +++ b/be/src/vec/functions/function_coalesce.cpp @@ -56,7 +56,6 @@ class FunctionCoalesce : public IFunction { public: static constexpr auto name = "coalesce"; - mutable DataTypePtr result_type; mutable FunctionBasePtr func_is_not_null; static FunctionPtr create() { return std::make_shared(); } @@ -70,26 +69,25 @@ class FunctionCoalesce : public IFunction { size_t get_number_of_arguments() const override { return 0; } DataTypePtr get_return_type_impl(const DataTypes& arguments) const override { + DataTypePtr res; for (const auto& arg : arguments) { if (!arg->is_nullable()) { - result_type = arg; + res = arg; break; } } - result_type = result_type ? result_type : arguments[0]; - return result_type; + res = res ? res : arguments[0]; + + const ColumnsWithTypeAndName is_not_null_col {{nullptr, make_nullable(res), ""}}; + func_is_not_null = SimpleFunctionFactory::instance().get_function( + "is_not_null_pred", is_not_null_col, std::make_shared()); + + return res; } Status execute_impl(FunctionContext* context, Block& block, const ColumnNumbers& arguments, size_t result, size_t input_rows_count) const override { - if (!func_is_not_null) [[unlikely]] { - const ColumnsWithTypeAndName is_not_null_col { - {nullptr, make_nullable(result_type), ""}}; - func_is_not_null = SimpleFunctionFactory::instance().get_function( - "is_not_null_pred", is_not_null_col, std::make_shared(), - {.enable_decimal256 = context->state()->enable_decimal256()}); - } DCHECK_GE(arguments.size(), 1); DataTypePtr result_type = block.get_by_position(result).type; ColumnNumbers filtered_args; diff --git a/be/src/vec/functions/function_collection_in.h b/be/src/vec/functions/function_collection_in.h index 33a4a2570800a9..755f2911245467 100644 --- a/be/src/vec/functions/function_collection_in.h +++ b/be/src/vec/functions/function_collection_in.h @@ -41,6 +41,7 @@ #include "vec/functions/function.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" struct ColumnRowRef { ENABLE_FACTORY_CREATOR(ColumnRowRef); ColumnPtr column; @@ -128,7 +129,7 @@ class FunctionCollectionIn : public IFunction { } ColumnPtr column_ptr = std::move(args_column_ptr); // make collection ref into set - int col_size = column_ptr->size(); + auto col_size = column_ptr->size(); for (size_t i = 0; i < col_size; i++) { state->args_set.insert({column_ptr, i}); } @@ -191,3 +192,5 @@ class FunctionCollectionIn : public IFunction { }; } // namespace doris::vectorized + +#include "common/compile_check_end.h" diff --git a/be/src/vec/functions/function_conv.cpp b/be/src/vec/functions/function_conv.cpp index 3dbfd81e8a2a53..baac2af61ed2a3 100644 --- a/be/src/vec/functions/function_conv.cpp +++ b/be/src/vec/functions/function_conv.cpp @@ -49,6 +49,7 @@ #include "vec/functions/simple_function_factory.h" namespace doris { +#include "common/compile_check_begin.h" class FunctionContext; } // namespace doris diff --git a/be/src/vec/functions/function_convert_tz.h b/be/src/vec/functions/function_convert_tz.h index d0a600a9e41a86..7765568b5f134a 100644 --- a/be/src/vec/functions/function_convert_tz.h +++ b/be/src/vec/functions/function_convert_tz.h @@ -52,6 +52,7 @@ #include "vec/functions/function.h" #include "vec/runtime/vdatetime_value.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" struct ConvertTzState { bool use_state = false; @@ -215,7 +216,7 @@ class FunctionConvertTZ : public IFunction { NullMap& result_null_map, size_t input_rows_count) { cctz::time_zone& from_tz = convert_tz_state->from_tz; cctz::time_zone& to_tz = convert_tz_state->to_tz; - auto push_null = [&](int row) { + auto push_null = [&](size_t row) { result_null_map[row] = true; result_column->insert_default(); }; @@ -310,3 +311,5 @@ class FunctionConvertTZ : public IFunction { }; } // namespace doris::vectorized + +#include "common/compile_check_end.h" diff --git a/be/src/vec/functions/function_date_or_datetime_computation.h b/be/src/vec/functions/function_date_or_datetime_computation.h index ac18965749eb8e..ba75a86ef7ecc4 100644 --- a/be/src/vec/functions/function_date_or_datetime_computation.h +++ b/be/src/vec/functions/function_date_or_datetime_computation.h @@ -27,6 +27,7 @@ #include #include +#include "common/cast_set.h" #include "common/compiler_util.h" #include "common/exception.h" #include "common/logging.h" @@ -645,7 +646,7 @@ struct DateTimeAddIntervalImpl { col_to->get_data(), null_map->get_data(), delta_vec_column->get_data()); } else { - Op::constant_vector(sources_const->template get_value(), + Op::constant_vector(sources_const->template get_value(), col_to->get_data(), null_map->get_data(), *not_nullable_column_ptr_arg1); } @@ -675,7 +676,7 @@ struct DateTimeAddIntervalImpl { Op::constant_vector(sources_const->template get_value(), col_to->get_data(), delta_vec_column->get_data()); } else { - Op::constant_vector(sources_const->template get_value(), + Op::constant_vector(sources_const->template get_value(), col_to->get_data(), *block.get_by_position(arguments[1]).column); } @@ -876,7 +877,7 @@ struct CurrentDateTimeImpl { if constexpr (WithPrecision) { if (const auto* const_column = check_and_get_column( block.get_by_position(arguments[0]).column)) { - int scale = const_column->get_int(0); + int64_t scale = const_column->get_int(0); dtv.from_unixtime(context->state()->timestamp_ms() / 1000, context->state()->nano_seconds(), context->state()->timezone_obj(), scale); @@ -1002,6 +1003,7 @@ struct CurrentTimeImpl { }; struct TimeToSecImpl { + // rethink the func should return int32 using ReturnType = DataTypeInt32; static constexpr auto name = "time_to_sec"; static Status execute(FunctionContext* context, Block& block, const ColumnNumbers& arguments, @@ -1012,7 +1014,8 @@ struct TimeToSecImpl { auto& res_data = res_col->get_data(); for (int i = 0; i < input_rows_count; ++i) { - res_data[i] = static_cast(column_data.get_element(i)) / (1000 * 1000); + res_data[i] = + cast_set(static_cast(column_data.get_element(i)) / (1000 * 1000)); } block.replace_by_position(result, std::move(res_col)); diff --git a/be/src/vec/functions/function_datetime_string_to_string.h b/be/src/vec/functions/function_datetime_string_to_string.h index 80fe6cf1f4174b..37eaefbbee08f5 100644 --- a/be/src/vec/functions/function_datetime_string_to_string.h +++ b/be/src/vec/functions/function_datetime_string_to_string.h @@ -23,6 +23,7 @@ #include #include +#include "common/cast_set.h" #include "common/status.h" #include "vec/aggregate_functions/aggregate_function.h" #include "vec/columns/column.h" @@ -46,6 +47,7 @@ #include "vec/runtime/vdatetime_value.h" namespace doris { +#include "common/compile_check_begin.h" class FunctionContext; } // namespace doris @@ -189,7 +191,7 @@ class FunctionDateTimeStringToString : public IFunction { for (int i = 0; i < len; ++i) { null_map[i] = Transform::template execute( ts[i], format, res_data, offset, context->state()->timezone_obj()); - res_offsets[i] = offset; + res_offsets[i] = cast_set(offset); } res_data.resize(offset); }, @@ -199,3 +201,5 @@ class FunctionDateTimeStringToString : public IFunction { }; } // namespace doris::vectorized + +#include "common/compile_check_end.h" diff --git a/be/src/vec/functions/function_encryption.cpp b/be/src/vec/functions/function_encryption.cpp index 9aaefc26a652cc..9c8028b1d87cb0 100644 --- a/be/src/vec/functions/function_encryption.cpp +++ b/be/src/vec/functions/function_encryption.cpp @@ -15,32 +15,25 @@ // specific language governing permissions and limitations // under the License. -#include -#include - #include +#include +#include #include #include #include #include #include +#include "common/cast_set.h" #include "common/status.h" #include "util/encryption_util.h" -#include "util/string_util.h" -#include "vec/aggregate_functions/aggregate_function.h" #include "vec/columns/column.h" #include "vec/columns/column_nullable.h" #include "vec/columns/column_string.h" #include "vec/columns/column_vector.h" -#include "vec/columns/columns_number.h" #include "vec/common/assert_cast.h" -#include "vec/common/pod_array.h" #include "vec/common/string_ref.h" #include "vec/core/block.h" -#include "vec/core/column_numbers.h" -#include "vec/core/column_with_type_and_name.h" -#include "vec/core/types.h" #include "vec/data_types/data_type.h" #include "vec/data_types/data_type_nullable.h" #include "vec/data_types/data_type_string.h" @@ -50,6 +43,7 @@ #include "vec/utils/util.hpp" namespace doris { +#include "common/compile_check_begin.h" class FunctionContext; } // namespace doris @@ -136,9 +130,9 @@ void execute_result_vector(std::vector& offsets_li template void execute_result_const(const ColumnString::Offsets* offsets_column, const ColumnString::Chars* chars_column, StringRef key_arg, size_t i, - EncryptionMode& encryption_mode, const char* iv_raw, int iv_length, + EncryptionMode& encryption_mode, const char* iv_raw, size_t iv_length, ColumnString::Chars& result_data, ColumnString::Offsets& result_offset, - NullMap& null_map, const char* aad, int aad_length) { + NullMap& null_map, const char* aad, size_t aad_length) { int src_size = (*offsets_column)[i] - (*offsets_column)[i - 1]; const auto* src_raw = reinterpret_cast(&(*chars_column)[(*offsets_column)[i - 1]]); execute_result(src_raw, src_size, key_arg.data, key_arg.size, i, @@ -147,15 +141,15 @@ void execute_result_const(const ColumnString::Offsets* offsets_column, } template -void execute_result(const char* src_raw, int src_size, const char* key_raw, int key_size, size_t i, - EncryptionMode& encryption_mode, const char* iv_raw, int iv_length, +void execute_result(const char* src_raw, size_t src_size, const char* key_raw, size_t key_size, + size_t i, EncryptionMode& encryption_mode, const char* iv_raw, size_t iv_length, ColumnString::Chars& result_data, ColumnString::Offsets& result_offset, - NullMap& null_map, const char* aad, int aad_length) { + NullMap& null_map, const char* aad, size_t aad_length) { if (src_size == 0) { StringOP::push_null_string(i, result_data, result_offset, null_map); return; } - int cipher_len = src_size; + auto cipher_len = src_size; if constexpr (is_encrypt) { cipher_len += 16; // for output AEAD tag @@ -438,22 +432,25 @@ struct EncryptionAndDecryptMultiImpl { }; struct EncryptImpl { - static int execute_impl(EncryptionMode mode, const unsigned char* source, - uint32_t source_length, const unsigned char* key, uint32_t key_length, - const char* iv, int iv_length, bool padding, unsigned char* encrypt, - const unsigned char* aad, int aad_length) { - return EncryptionUtil::encrypt(mode, source, source_length, key, key_length, iv, iv_length, - true, encrypt, aad, aad_length); + static int execute_impl(EncryptionMode mode, const unsigned char* source, size_t source_length, + const unsigned char* key, size_t key_length, const char* iv, + size_t iv_length, bool padding, unsigned char* encrypt, + const unsigned char* aad, size_t aad_length) { + // now the openssl only support int, so here we need to cast size_t to uint32_t + return EncryptionUtil::encrypt(mode, source, cast_set(source_length), key, + cast_set(key_length), iv, cast_set(iv_length), + true, encrypt, aad, cast_set(aad_length)); } }; struct DecryptImpl { - static int execute_impl(EncryptionMode mode, const unsigned char* source, - uint32_t source_length, const unsigned char* key, uint32_t key_length, - const char* iv, int iv_length, bool padding, unsigned char* encrypt, - const unsigned char* aad, int aad_length) { - return EncryptionUtil::decrypt(mode, source, source_length, key, key_length, iv, iv_length, - true, encrypt, aad, aad_length); + static int execute_impl(EncryptionMode mode, const unsigned char* source, size_t source_length, + const unsigned char* key, size_t key_length, const char* iv, + size_t iv_length, bool padding, unsigned char* encrypt, + const unsigned char* aad, size_t aad_length) { + return EncryptionUtil::decrypt(mode, source, cast_set(source_length), key, + cast_set(key_length), iv, cast_set(iv_length), + true, encrypt, aad, cast_set(aad_length)); } }; diff --git a/be/src/vec/functions/function_hash.cpp b/be/src/vec/functions/function_hash.cpp index 972d2eb0b9d8a1..a4648e54dfd512 100644 --- a/be/src/vec/functions/function_hash.cpp +++ b/be/src/vec/functions/function_hash.cpp @@ -38,6 +38,7 @@ #include "vec/utils/template_helpers.hpp" namespace doris::vectorized { +#include "common/compile_check_begin.h" constexpr uint64_t emtpy_value = 0xe28dbde7fe22e41c; template diff --git a/be/src/vec/functions/function_hex.cpp b/be/src/vec/functions/function_hex.cpp index f66849b9336335..5122fcb7ba1d64 100644 --- a/be/src/vec/functions/function_hex.cpp +++ b/be/src/vec/functions/function_hex.cpp @@ -25,6 +25,7 @@ #include #include +#include "common/cast_set.h" #include "common/status.h" #include "olap/hll.h" #include "util/simd/vstring_function.h" //place this header file at last to compile @@ -46,6 +47,7 @@ #include "vec/functions/simple_function_factory.h" namespace doris { +#include "common/compile_check_begin.h" class FunctionContext; } // namespace doris @@ -111,7 +113,7 @@ struct HexStringImpl { auto source = reinterpret_cast(&data[offsets[i - 1]]); size_t srclen = offsets[i] - offsets[i - 1]; hex_encode(source, srclen, dst_data_ptr, offset); - dst_offsets[i] = offset; + dst_offsets[i] = cast_set(offset); } return Status::OK(); } @@ -184,7 +186,7 @@ struct HexHLLImpl { dst_data_ptr = res_data.data() + offset; hex_encode(reinterpret_cast(hll_str.data()), hll_str.length(), dst_data_ptr, offset); - res_offsets[i] = offset; + res_offsets[i] = cast_set(offset); hll_str.clear(); } return Status::OK(); diff --git a/be/src/vec/functions/function_hll.cpp b/be/src/vec/functions/function_hll.cpp index a6b91e27c2dd1f..f47c1a8af45fb0 100644 --- a/be/src/vec/functions/function_hll.cpp +++ b/be/src/vec/functions/function_hll.cpp @@ -22,6 +22,7 @@ #include #include +#include "common/cast_set.h" #include "common/status.h" #include "olap/hll.h" #include "util/hash_util.hpp" @@ -47,6 +48,7 @@ #include "vec/functions/simple_function_factory.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" struct HLLCardinality { static constexpr auto name = "hll_cardinality"; @@ -167,8 +169,8 @@ class FunctionHllFromBase64 : public IFunction { res.reserve(input_rows_count); std::string decode_buff; - int last_decode_buff_len = 0; - int curr_decode_buff_len = 0; + int64_t last_decode_buff_len = 0; + int64_t curr_decode_buff_len = 0; for (size_t i = 0; i < input_rows_count; ++i) { const char* src_str = reinterpret_cast(&data[offsets[i - 1]]); int64_t src_size = offsets[i] - offsets[i - 1]; @@ -302,7 +304,7 @@ struct HllToBase64 { DCHECK(outlen > 0); encoded_offset += outlen; - offsets[i] = encoded_offset; + offsets[i] = cast_set(encoded_offset); } return Status::OK(); } diff --git a/be/src/vec/functions/function_ip.h b/be/src/vec/functions/function_ip.h index ddb99d80a1b10b..51675ae7c7d893 100644 --- a/be/src/vec/functions/function_ip.h +++ b/be/src/vec/functions/function_ip.h @@ -24,6 +24,7 @@ #include #include +#include "common/cast_set.h" #include "vec/columns/column.h" #include "vec/columns/column_const.h" #include "vec/columns/column_nullable.h" @@ -49,6 +50,7 @@ #include "vec/runtime/ip_address_cidr.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" class FunctionIPv4NumToString : public IFunction { private: @@ -75,12 +77,11 @@ class FunctionIPv4NumToString : public IFunction { for (size_t i = 0; i < vec_in.size(); ++i) { auto value = vec_in[i]; if (value < IPV4_MIN_NUM_VALUE || value > IPV4_MAX_NUM_VALUE) { - offsets_res[i] = pos - begin; null_map->get_data()[i] = 1; } else { format_ipv4(reinterpret_cast(&vec_in[i]), src_size, pos); - offsets_res[i] = pos - begin; } + offsets_res[i] = cast_set(pos - begin); } vec_res.resize(pos - begin); @@ -283,7 +284,6 @@ void process_ipv6_column(const ColumnPtr& column, size_t input_rows_count, } if (is_empty) { - offsets_res[i] = pos - begin; null_map->get_data()[i] = 1; } else { if constexpr (std::is_same_v) { @@ -296,8 +296,8 @@ void process_ipv6_column(const ColumnPtr& column, size_t input_rows_count, std::reverse(ipv6_address_data, ipv6_address_data + IPV6_BINARY_LENGTH); format_ipv6(ipv6_address_data, pos); } - offsets_res[i] = pos - begin; } + offsets_res[i] = cast_set(pos - begin); } } @@ -829,7 +829,7 @@ class FunctionIPv4CIDRToRange : public IFunction { throw Exception(ErrorCode::INVALID_ARGUMENT, "Illegal cidr value '{}'", std::to_string(cidr)); } - auto range = apply_cidr_mask(ip, cidr); + auto range = apply_cidr_mask(ip, cast_set(cidr)); vec_lower_range_output[i] = range.first; vec_upper_range_output[i] = range.second; } @@ -841,7 +841,7 @@ class FunctionIPv4CIDRToRange : public IFunction { } for (size_t i = 0; i < input_rows_count; ++i) { auto ip = vec_ip_input[i]; - auto range = apply_cidr_mask(ip, cidr); + auto range = apply_cidr_mask(ip, cast_set(cidr)); vec_lower_range_output[i] = range.first; vec_upper_range_output[i] = range.second; } @@ -853,7 +853,7 @@ class FunctionIPv4CIDRToRange : public IFunction { throw Exception(ErrorCode::INVALID_ARGUMENT, "Illegal cidr value '{}'", std::to_string(cidr)); } - auto range = apply_cidr_mask(ip, cidr); + auto range = apply_cidr_mask(ip, cast_set(cidr)); vec_lower_range_output[i] = range.first; vec_upper_range_output[i] = range.second; } @@ -937,11 +937,13 @@ class FunctionIPv6CIDRToRange : public IFunction { auto* src_data = const_cast(from_column.get_data_at(0).data); std::reverse(src_data, src_data + IPV6_BINARY_LENGTH); apply_cidr_mask(src_data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } else { apply_cidr_mask(from_column.get_data_at(0).data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } } } else if (is_cidr_const) { @@ -957,11 +959,13 @@ class FunctionIPv6CIDRToRange : public IFunction { auto* src_data = const_cast(from_column.get_data_at(i).data); std::reverse(src_data, src_data + IPV6_BINARY_LENGTH); apply_cidr_mask(src_data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } else { apply_cidr_mask(from_column.get_data_at(i).data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } } } else { @@ -977,11 +981,13 @@ class FunctionIPv6CIDRToRange : public IFunction { auto* src_data = const_cast(from_column.get_data_at(i).data); std::reverse(src_data, src_data + IPV6_BINARY_LENGTH); apply_cidr_mask(src_data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } else { apply_cidr_mask(from_column.get_data_at(i).data, reinterpret_cast(&vec_res_lower_range[i]), - reinterpret_cast(&vec_res_upper_range[i]), cidr); + reinterpret_cast(&vec_res_upper_range[i]), + cast_set(cidr)); } } } @@ -1315,7 +1321,7 @@ class FunctionCutIPv6 : public IFunction { UInt8 bytes_to_cut_count = is_ipv4_mapped(address) ? bytes_to_cut_for_ipv4_count : bytes_to_cut_for_ipv6_count; cut_address(address, pos, bytes_to_cut_count); - offsets_res[i] = pos - begin; + offsets_res[i] = cast_set(pos - begin); } block.replace_by_position(result, std::move(col_res)); @@ -1335,3 +1341,5 @@ class FunctionCutIPv6 : public IFunction { }; } // namespace doris::vectorized + +#include "common/compile_check_end.h" diff --git a/be/src/vec/functions/function_java_udf.cpp b/be/src/vec/functions/function_java_udf.cpp index 86daf5ebf3bb0a..e2c441b660201d 100644 --- a/be/src/vec/functions/function_java_udf.cpp +++ b/be/src/vec/functions/function_java_udf.cpp @@ -42,9 +42,6 @@ JavaFunctionCall::JavaFunctionCall(const TFunction& fn, const DataTypes& argumen Status JavaFunctionCall::open(FunctionContext* context, FunctionContext::FunctionStateScope scope) { JNIEnv* env = nullptr; RETURN_IF_ERROR(JniUtil::GetJNIEnv(&env)); - if (env == nullptr) { - return Status::InternalError("Failed to get/create JVM"); - } if (scope == FunctionContext::FunctionStateScope::THREAD_LOCAL) { SCOPED_TIMER(context->get_udf_execute_timer()); diff --git a/be/src/vec/functions/url/domain.h b/be/src/vec/functions/url/domain.h index 54361134eff706..b2ec5e0c9d9a4b 100644 --- a/be/src/vec/functions/url/domain.h +++ b/be/src/vec/functions/url/domain.h @@ -20,11 +20,12 @@ #pragma once -// #include #include #include "vec/common/string_utils/string_utils.h" +#include "vec/functions/url/find_symbols.h" #include "vec/functions/url/protocol.h" +#include "vec/functions/url/tldLookup.h" namespace doris::vectorized { @@ -144,4 +145,128 @@ struct ExtractDomain { } }; +struct ExtractTopLevelDomain { + static size_t get_reserve_length_for_element() { return 5; } + + static void execute(const char* data, size_t size, const char*& res_data, size_t& res_size) { + res_data = data; + res_size = 0; + StringRef host = get_url_host(data, size); + + if (host.size == 0) { + return; + } else { + auto host_view = host.to_string_view(); + if (host_view[host_view.size() - 1] == '.') { + host_view.remove_suffix(1); + } + + const auto* host_end = host_view.data() + host_view.size(); + const char* last_dot = find_last_symbols_or_null<'.'>(host_view.data(), host_end); + if (!last_dot) { + return; + } + + /// For IPv4 addresses select nothing. + /// + /// NOTE: it is safe to access last_dot[1] + /// since getURLHost() will not return a host if there is symbol after dot. + if (is_numeric_ascii(last_dot[1])) { + return; + } + + res_data = last_dot + 1; + res_size = host_end - res_data; + } + } +}; + +struct ExtractFirstSignificantSubdomain { + static size_t get_reserve_length_for_element() { return 10; } + + static void execute(const Pos data, const size_t size, Pos& res_data, size_t& res_size, + Pos* out_domain_end = nullptr) { + res_data = data; + res_size = 0; + + Pos tmp; + size_t domain_length = 0; + ExtractDomain::execute(data, size, tmp, domain_length); + + if (domain_length == 0) { + return; + } + if (out_domain_end) { + *out_domain_end = tmp + domain_length; + } + + /// cut useless dot + if (tmp[domain_length - 1] == '.') { + --domain_length; + } + + res_data = tmp; + res_size = domain_length; + + const auto* begin = tmp; + const auto* end = begin + domain_length; + std::array last_periods {}; + + const auto* pos = find_first_symbols<'.'>(begin, end); + while (pos < end) { + last_periods[2] = last_periods[1]; + last_periods[1] = last_periods[0]; + last_periods[0] = pos; + pos = find_first_symbols<'.'>(pos + 1, end); + } + + if (!last_periods[0]) { + return; + } + + if (!last_periods[1]) { + res_size = last_periods[0] - begin; + return; + } + + if (!last_periods[2]) { + last_periods[2] = begin - 1; + } + + const auto* end_of_level_domain = find_first_symbols<'/'>(last_periods[0], end); + if (!end_of_level_domain) { + end_of_level_domain = end; + } + + auto host_len = static_cast(end_of_level_domain - last_periods[1] - 1); + StringRef host {last_periods[1] + 1, host_len}; + if (tldLookup::is_valid(host.data, host.size)) { + res_data += last_periods[2] + 1 - begin; + res_size = last_periods[1] - last_periods[2] - 1; + } else { + res_data += last_periods[1] + 1 - begin; + res_size = last_periods[0] - last_periods[1] - 1; + } + } +}; + +struct CutToFirstSignificantSubdomain { + static size_t get_reserve_length_for_element() { return 15; } + + static void execute(const Pos data, const size_t size, Pos& res_data, size_t& res_size) { + res_data = data; + res_size = 0; + + Pos tmp_data = data; + size_t tmp_length; + Pos domain_end = data; + ExtractFirstSignificantSubdomain::execute(data, size, tmp_data, tmp_length, &domain_end); + + if (tmp_length == 0) { + return; + } + res_data = tmp_data; + res_size = domain_end - tmp_data; + } +}; } // namespace doris::vectorized diff --git a/be/src/vec/functions/url/find_symbols.h b/be/src/vec/functions/url/find_symbols.h new file mode 100644 index 00000000000000..4eafea893f878f --- /dev/null +++ b/be/src/vec/functions/url/find_symbols.h @@ -0,0 +1,484 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// This file is copied from +// https://github.com/ClickHouse/ClickHouse/blob/master/base/base/find_symbols.h +// and modified by Doris + +#pragma once + +#include +#include +#include + +#if defined(__SSE4_2__) +#include +#endif + +/** find_first_symbols(begin, end): + * + * Allow to search for next character from the set of 'symbols...' in a string. + * It is similar to 'strpbrk', 'strcspn' (and 'strchr', 'memchr' in the case of one symbol and '\0'), + * but with the following differences: + * - works with any memory ranges, including containing zero bytes; + * - doesn't require terminating zero byte: end of memory range is passed explicitly; + * - if not found, returns pointer to end instead of nullptr; + * - maximum number of symbols to search is 16. + * + * Uses SSE 2 in case of small number of symbols for search and SSE 4.2 in the case of large number of symbols, + * that have more than 2x performance advantage over trivial loop + * in the case of parsing tab-separated dump with (probably escaped) string fields. + * In the case of parsing tab separated dump with short strings, there is no performance degradation over trivial loop. + * + * Note: the optimal threshold to choose between SSE 2 and SSE 4.2 may depend on CPU model. + * + * find_last_symbols_or_null(begin, end): + * + * Allow to search for the last matching character in a string. + * If no such characters, returns nullptr. + */ + +struct SearchSymbols { + static constexpr auto BUFFER_SIZE = 16; + + SearchSymbols() = default; + + explicit SearchSymbols(std::string in) : str(std::move(in)) { +#if defined(__SSE4_2__) + if (str.size() > BUFFER_SIZE) { + throw std::runtime_error("SearchSymbols can contain at most " + + std::to_string(BUFFER_SIZE) + " symbols and " + + std::to_string(str.size()) + " was provided\n"); + } + + char tmp_safety_buffer[BUFFER_SIZE] = {0}; + + memcpy(tmp_safety_buffer, str.data(), str.size()); + + simd_vector = _mm_loadu_si128(reinterpret_cast(tmp_safety_buffer)); +#endif + } + +#if defined(__SSE4_2__) + __m128i simd_vector; +#endif + std::string str; +}; + +namespace detail { +template +constexpr bool is_in(char x) { + return ((x == chars) || ...); +} // NOLINT(misc-redundant-expression) + +static bool is_in(char c, const char* symbols, size_t num_chars) { + for (size_t i = 0U; i < num_chars; ++i) { + if (c == symbols[i]) { + return true; + } + } + + return false; +} + +#if defined(__SSE2__) +template +inline __m128i mm_is_in(__m128i bytes) { + __m128i eq0 = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(s0)); + return eq0; +} + +template +inline __m128i mm_is_in(__m128i bytes) { + __m128i eq0 = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(s0)); + __m128i eq = mm_is_in(bytes); + return _mm_or_si128(eq0, eq); +} + +inline __m128i mm_is_in(__m128i bytes, const char* symbols, size_t num_chars) { + __m128i accumulator = _mm_setzero_si128(); + for (size_t i = 0; i < num_chars; ++i) { + __m128i eq = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(symbols[i])); + accumulator = _mm_or_si128(accumulator, eq); + } + + return accumulator; +} + +using AlignedArray = std::array, 16>; +inline AlignedArray mm_is_in_prepare(const char* symbols, size_t num_chars) { + AlignedArray result {}; + + for (size_t i = 0; i < num_chars; ++i) { + reinterpret_cast<__m128i&>(result[i]) = _mm_set1_epi8(symbols[i]); + } + + return result; +} + +inline __m128i mm_is_in_execute(__m128i bytes, const AlignedArray& needles) { + __m128i accumulator = _mm_setzero_si128(); + + for (const auto& needle : needles) { + __m128i eq = _mm_cmpeq_epi8(bytes, reinterpret_cast(needle)); + accumulator = _mm_or_si128(accumulator, eq); + } + + return accumulator; +} +#endif + +template +constexpr bool maybe_negate(bool x) { + return x == positive; +} + +template +constexpr uint16_t maybe_negate(uint16_t x) { + if constexpr (positive) + return x; + else + return ~x; +} + +enum class ReturnMode : uint8_t { + End, + Nullptr, +}; + +template +inline const char* find_first_symbols_sse2(const char* const begin, const char* const end) { + const char* pos = begin; + +#if defined(__SSE2__) + for (; pos + 15 < end; pos += 16) { + __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos)); + + __m128i eq = mm_is_in(bytes); + + uint16_t bit_mask = maybe_negate(uint16_t(_mm_movemask_epi8(eq))); + if (bit_mask) return pos + __builtin_ctz(bit_mask); + } +#endif + + for (; pos < end; ++pos) + if (maybe_negate(is_in(*pos))) return pos; + + return return_mode == ReturnMode::End ? end : nullptr; +} + +template +inline const char* find_first_symbols_sse2(const char* const begin, const char* const end, + const char* symbols, size_t num_chars) { + const char* pos = begin; + +#if defined(__SSE2__) + const auto needles = mm_is_in_prepare(symbols, num_chars); + for (; pos + 15 < end; pos += 16) { + __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos)); + + __m128i eq = mm_is_in_execute(bytes, needles); + + uint16_t bit_mask = maybe_negate(uint16_t(_mm_movemask_epi8(eq))); + if (bit_mask) return pos + __builtin_ctz(bit_mask); + } +#endif + + for (; pos < end; ++pos) + if (maybe_negate(is_in(*pos, symbols, num_chars))) return pos; + + return return_mode == ReturnMode::End ? end : nullptr; +} + +template +inline const char* find_last_symbols_sse2(const char* const begin, const char* const end) { + const char* pos = end; + +#if defined(__SSE2__) + for (; pos - 16 >= begin; + pos -= + 16) /// Assuming the pointer cannot overflow. Assuming we can compare these pointers. + { + __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos - 16)); + + __m128i eq = mm_is_in(bytes); + + uint16_t bit_mask = maybe_negate(uint16_t(_mm_movemask_epi8(eq))); + if (bit_mask) + return pos - 1 - + (__builtin_clz(bit_mask) - + 16); /// because __builtin_clz works with mask as uint32. + } +#endif + + --pos; + for (; pos >= begin; --pos) + if (maybe_negate(is_in(*pos))) return pos; + + return return_mode == ReturnMode::End ? end : nullptr; +} + +template +inline const char* find_first_symbols_sse42(const char* const begin, const char* const end) { + const char* pos = begin; + +#if defined(__SSE4_2__) + constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT; + + __m128i set = _mm_setr_epi8(c01, c02, c03, c04, c05, c06, c07, c08, c09, c10, c11, c12, c13, + c14, c15, c16); + + for (; pos + 15 < end; pos += 16) { + __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos)); + + if constexpr (positive) { + if (_mm_cmpestrc(set, num_chars, bytes, 16, mode)) + return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode); + } else { + if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY)) + return pos + + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY); + } + } +#endif + + for (; pos < end; ++pos) + if ((num_chars == 1 && maybe_negate(is_in(*pos))) || + (num_chars == 2 && maybe_negate(is_in(*pos))) || + (num_chars == 3 && maybe_negate(is_in(*pos))) || + (num_chars == 4 && maybe_negate(is_in(*pos))) || + (num_chars == 5 && maybe_negate(is_in(*pos))) || + (num_chars == 6 && maybe_negate(is_in(*pos))) || + (num_chars == 7 && + maybe_negate(is_in(*pos))) || + (num_chars == 8 && + maybe_negate(is_in(*pos))) || + (num_chars == 9 && + maybe_negate(is_in(*pos))) || + (num_chars == 10 && + maybe_negate( + is_in(*pos))) || + (num_chars == 11 && + maybe_negate( + is_in(*pos))) || + (num_chars == 12 && + maybe_negate( + is_in(*pos))) || + (num_chars == 13 && + maybe_negate( + is_in( + *pos))) || + (num_chars == 14 && + maybe_negate( + is_in( + *pos))) || + (num_chars == 15 && + maybe_negate(is_in(*pos))) || + (num_chars == 16 && + maybe_negate(is_in(*pos)))) + return pos; + return return_mode == ReturnMode::End ? end : nullptr; +} + +template +inline const char* find_first_symbols_sse42(const char* const begin, const char* const end, + const SearchSymbols& symbols) { + const char* pos = begin; + + const auto num_chars = symbols.str.size(); + +#if defined(__SSE4_2__) + constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT; + + const __m128i set = symbols.simd_vector; + + for (; pos + 15 < end; pos += 16) { + __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos)); + + if constexpr (positive) { + if (_mm_cmpestrc(set, num_chars, bytes, 16, mode)) + return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode); + } else { + if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY)) + return pos + + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY); + } + } +#endif + + for (; pos < end; ++pos) + if (maybe_negate(is_in(*pos, symbols.str.data(), num_chars))) return pos; + + return return_mode == ReturnMode::End ? end : nullptr; +} + +/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do. + +template +inline const char* find_first_symbols_dispatch(const char* begin, const char* end) + requires(0 <= sizeof...(symbols) && sizeof...(symbols) <= 16) +{ +#if defined(__SSE4_2__) + if (sizeof...(symbols) >= 5) + return find_first_symbols_sse42( + begin, end); + else +#endif + return find_first_symbols_sse2(begin, end); +} + +template +inline const char* find_first_symbols_dispatch(const std::string_view haystack, + const SearchSymbols& symbols) { +#if defined(__SSE4_2__) + if (symbols.str.size() >= 5) + return find_first_symbols_sse42(haystack.begin(), haystack.end(), + symbols); + else +#endif + return find_first_symbols_sse2( + haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size()); +} + +} // namespace detail + +template +inline const char* find_first_symbols(const char* begin, const char* end) { + return ::detail::find_first_symbols_dispatch(begin, + end); +} + +/// Returning non const result for non const arguments. +/// It is convenient when you are using this function to iterate through non-const buffer. +template +inline char* find_first_symbols(char* begin, char* end) { + return const_cast( + ::detail::find_first_symbols_dispatch( + begin, end)); +} + +inline const char* find_first_symbols(std::string_view haystack, const SearchSymbols& symbols) { + return ::detail::find_first_symbols_dispatch(haystack, + symbols); +} + +template +inline const char* find_first_not_symbols(const char* begin, const char* end) { + return ::detail::find_first_symbols_dispatch( + begin, end); +} + +template +inline char* find_first_not_symbols(char* begin, char* end) { + return const_cast( + ::detail::find_first_symbols_dispatch( + begin, end)); +} + +inline const char* find_first_not_symbols(std::string_view haystack, const SearchSymbols& symbols) { + return ::detail::find_first_symbols_dispatch(haystack, + symbols); +} + +template +inline const char* find_first_symbols_or_null(const char* begin, const char* end) { + return ::detail::find_first_symbols_dispatch( + begin, end); +} + +template +inline char* find_first_symbols_or_null(char* begin, char* end) { + return const_cast( + ::detail::find_first_symbols_dispatch( + begin, end)); +} + +inline const char* find_first_symbols_or_null(std::string_view haystack, + const SearchSymbols& symbols) { + return ::detail::find_first_symbols_dispatch(haystack, + symbols); +} + +template +inline const char* find_first_not_symbols_or_null(const char* begin, const char* end) { + return ::detail::find_first_symbols_dispatch( + begin, end); +} + +template +inline char* find_first_not_symbols_or_null(char* begin, char* end) { + return const_cast( + ::detail::find_first_symbols_dispatch( + begin, end)); +} + +inline const char* find_first_not_symbols_or_null(std::string_view haystack, + const SearchSymbols& symbols) { + return ::detail::find_first_symbols_dispatch(haystack, + symbols); +} + +template +inline const char* find_last_symbols_or_null(const char* begin, const char* end) { + return ::detail::find_last_symbols_sse2(begin, + end); +} + +template +inline char* find_last_symbols_or_null(char* begin, char* end) { + return const_cast( + ::detail::find_last_symbols_sse2(begin, + end)); +} + +template +inline const char* find_last_not_symbols_or_null(const char* begin, const char* end) { + return ::detail::find_last_symbols_sse2(begin, + end); +} + +template +inline char* find_last_not_symbols_or_null(char* begin, char* end) { + return const_cast( + ::detail::find_last_symbols_sse2( + begin, end)); +} + +/// Slightly resembles boost::split. The drawback of boost::split is that it fires a false positive in clang static analyzer. +/// See https://github.com/boostorg/algorithm/issues/63 +/// And https://bugs.llvm.org/show_bug.cgi?id=41141 +template +inline To& splitInto(To& to, std::string_view what, bool token_compress = false) { + const char* pos = what.data(); + const char* end = pos + what.size(); + while (pos < end) { + const char* delimiter_or_end = find_first_symbols(pos, end); + + if (!token_compress || pos < delimiter_or_end) to.emplace_back(pos, delimiter_or_end - pos); + + if (delimiter_or_end < end) + pos = delimiter_or_end + 1; + else + pos = delimiter_or_end; + } + + return to; +} diff --git a/be/src/vec/functions/url/function_url.cpp b/be/src/vec/functions/url/function_url.cpp index e25af6f7f27f6a..47afe076b74611 100644 --- a/be/src/vec/functions/url/function_url.cpp +++ b/be/src/vec/functions/url/function_url.cpp @@ -46,10 +46,33 @@ struct NameProtocol { using FunctionProtocol = FunctionStringToString, NameProtocol>; +struct NameTopLevelDomain { + static constexpr auto name = "top_level_domain"; +}; +using FunctionTopLevelDomain = + FunctionStringToString, NameTopLevelDomain>; + +struct NameFirstSignificantSubdomain { + static constexpr auto name = "first_significant_subdomain"; +}; +using FunctionFirstSignificantSubdomain = + FunctionStringToString, + NameFirstSignificantSubdomain>; + +struct NameCutToFirstSignificantSubdomain { + static constexpr auto name = "cut_to_first_significant_subdomain"; +}; +using FunctionCutToFirstSignificantSubdomain = + FunctionStringToString, + NameCutToFirstSignificantSubdomain>; + void register_function_url(SimpleFunctionFactory& factory) { factory.register_function(); factory.register_function(); factory.register_function(); + factory.register_function(); + factory.register_function(); + factory.register_function(); } } // namespace doris::vectorized diff --git a/be/src/vec/functions/url/functions_url.h b/be/src/vec/functions/url/functions_url.h index f9f02a17a668c2..b6736496d24345 100644 --- a/be/src/vec/functions/url/functions_url.h +++ b/be/src/vec/functions/url/functions_url.h @@ -89,7 +89,6 @@ struct ExtractSubstringImpl { for (size_t i = 0; i < size; ++i) { Extractor::execute(reinterpret_cast(&data[prev_offset]), offsets[i] - prev_offset, start, length); - res_data.resize(res_data.size() + length); memcpy_small_allow_read_write_overflow15(&res_data[res_offset], start, length); res_offset += length; @@ -105,11 +104,6 @@ struct ExtractSubstringImpl { Extractor::execute(data.data(), data.size(), start, length); res_data.assign(start, length); } - - // static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) - // { - // throw Exception("Column of type FixedString is not supported by URL functions", ErrorCodes::ILLEGAL_COLUMN); - // } }; /** Delete part of string using the Extractor. @@ -155,11 +149,6 @@ struct CutSubstringImpl { res_data.append(data.data(), start); res_data.append(start + length, data.data() + data.size()); } - - // static void vector_fixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) - // { - // throw Exception("Column of type FixedString is not supported by URL functions", ErrorCodes::ILLEGAL_COLUMN); - // } }; } // namespace doris::vectorized diff --git a/be/src/vec/functions/url/tldLookup.generated.cpp b/be/src/vec/functions/url/tldLookup.generated.cpp new file mode 100644 index 00000000000000..9b9471c094dc59 --- /dev/null +++ b/be/src/vec/functions/url/tldLookup.generated.cpp @@ -0,0 +1,140 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// This file is copied from +// https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/URL/tldLookup.generated.cpp +// and modified by Doris + +// clang-format off +/* C++ code produced by gperf version 3.1 */ +/* Command-line: /usr/bin/gperf --output-file=tldLookup.generated.cpp tldLookup.gperf */ +/* Computed positions: -k'1-11,13-14,17,$' */ + +#if !( \ + (' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126)) +/* The character set is not based on ISO-646. */ +#error "gperf generated tables don't work with this execution character set. Please report a bug to ." +#endif + +#line 7 "tldLookup.gperf" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" +#pragma GCC diagnostic ignored "-Wunused-macros" +#include + +#define TOTAL_KEYWORDS 5045 +#define MIN_WORD_LENGTH 4 +#define MAX_WORD_LENGTH 34 +#define MIN_HASH_VALUE 75 +#define MAX_HASH_VALUE 110600 +/* maximum key range = 110526, duplicates = 0 */ + +class TopLevelDomainLookupHash { +private: + static inline unsigned int hash(const char* str, size_t len); + +public: + static const char* is_valid(const char* str, size_t len); +}; + +inline unsigned int TopLevelDomainLookupHash::hash(const char* str, size_t len) { + static const unsigned int asso_values[] = {110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, + 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, + 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 3905, 0, 5, + 11617, 15312, 10, 5, 25, 0, 25, 0, 5, 0, 0, 110601, 110601, 110601, 5, 110601, + 110601, 110601, 110601, 110601, 30, 20, 5, 15, 10, 65, 45, 80, 70, 55, 110601, 110601, + 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, + 110601, 2570, 9477, 1350, 15, 130, 5915, 1830, 4360, 2210, 5405, 63, 3190, 20, 1165, 5, + 6120, 5863, 470, 2315, 175, 0, 815, 40, 13577, 115, 5680, 1030, 11798, 23179, 345, 1097, + 28079, 13839, 245, 25674, 31874, 75, 31774, 7351, 27474, 190, 16044, 8040, 50, 25, 35, 55, + 0, 0, 30, 0, 10, 0, 0, 0, 35, 0, 55, 10, 5, 65, 0, 60, + 0, 25, 5, 30, 0, 5, 10, 0, 20, 5, 5, 35, 5, 0, 0, 0, + 0, 0, 15, 0, 5, 5, 0, 5, 5, 5, 0, 0, 0, 0, 0, 15, + 5, 110601, 110601, 5, 10, 45, 5, 110601, 0, 110601, 110601, 110601, 110601, 110601, 110601, 110601, + 0, 0, 0, 0, 110601, 110601, 110601, 45, 0, 0, 0, 0, 110601, 110601, 110601, 110601, + 0, 0, 110601, 0, 0, 0, 0, 5, 0, 5, 30, 0, 0, 110601, 110601, 110601, + 110601, 110601, 110601, 110601, 0, 110601, 110601, 110601, 0, 0, 5, 0, 20, 40, 110601, 110601, + 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, 110601, + 110601, 110601, 110601, 110601}; + unsigned int hval = len; + + switch (hval) { + default: + hval += asso_values[static_cast(str[16])]; + /*FALLTHROUGH*/ + case 16: + case 15: + case 14: + hval += asso_values[static_cast(str[13] + 1)]; + /*FALLTHROUGH*/ + case 13: + hval += asso_values[static_cast(str[12])]; + /*FALLTHROUGH*/ + case 12: + case 11: + hval += asso_values[static_cast(str[10])]; + /*FALLTHROUGH*/ + case 10: + hval += asso_values[static_cast(str[9])]; + /*FALLTHROUGH*/ + case 9: + hval += asso_values[static_cast(str[8] + 1)]; + /*FALLTHROUGH*/ + case 8: + hval += asso_values[static_cast(str[7])]; + /*FALLTHROUGH*/ + case 7: + hval += asso_values[static_cast(str[6] + 3)]; + /*FALLTHROUGH*/ + case 6: + hval += asso_values[static_cast(str[5])]; + /*FALLTHROUGH*/ + case 5: + hval += asso_values[static_cast(str[4] + 2)]; + /*FALLTHROUGH*/ + case 4: + hval += asso_values[static_cast(str[3] + 1)]; + /*FALLTHROUGH*/ + case 3: + hval += asso_values[static_cast(str[2])]; + /*FALLTHROUGH*/ + case 2: + hval += asso_values[static_cast(str[1])]; + /*FALLTHROUGH*/ + case 1: + hval += asso_values[static_cast(str[0] + 20)]; + break; + } + return hval + asso_values[static_cast(str[len - 1])]; +} + +const char* TopLevelDomainLookupHash::is_valid(const char* str, size_t len) { + static const char* const wordlist[] = {"","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.tm","","\340\270\227\340\270\253\340\270\262\340\270\243.\340\271\204\340\270\227\340\270\242","","","","com.mu","","","","","com.so","","\340\270\243\340\270\261\340\270\220\340\270\232\340\270\262\340\270\245.\340\271\204\340\270\227\340\270\242","","co.cm","com.mo","","","","","","","\340\270\230\340\270\270\340\270\243\340\270\201\340\270\264\340\270\210.\340\271\204\340\270\227\340\270\242","","","com.bo","","","","","com.sd","","","","","","","","","","","","","","","","","","","","","","","\327\231\327\251\327\225\327\221.\327\231\327\251\327\250\327\220\327\234","","com.bm","","","","","com.km","","","","","","","\327\220\327\247\327\223\327\236\327\231\327\224.\327\231\327\251\327\250\327\220\327\234","","","","","","com.io","","edu.so","","","","","edu.mo","","","","","com.mw","","","","","edu.bo","","","","","edu.sd","","","","","com.kw","","","com.im","","","","","","","","","","","","","","","\327\236\327\236\327\251\327\234.\327\231\327\251\327\250\327\220\327\234","","edu.bm","","","","","edu.km","","","","","com.cu","com.mk","","","","","","","","","com.co","","","","","","","","","","edu.mw","","","","","","","","","","","","","","","edu.kw","","","","","com.cm","","","","","com.ru","","","","","","","","","","com.ro","","co.dk","","","","","","","","edu.cu","edu.mk","","","","","","","","","edu.co","","","","","com.cw","","","","","","","","","","","","","","","","","","","","com.sy","","","","","com.my","","","","\340\270\250\340\270\266\340\270\201\340\270\251\340\270\262.\340\271\204\340\270\227\340\270\242","edu.ru","","","","","com.by","","","","","com.ky","","","","","","","","","","com.se","","","","","nom.km","","","","","com.uy","","","","","edu.cw","","","","","","","","","","","","","","","c66.me","","","","","edu.sy","","","","","edu.my","","","","","","","","","","","","","","","edu.ky","","","","","","","","","","","","","","","edu.me","","","","","edu.uy","","","","","","","","","","nom.co","","","","","com.st","","","","","com.mt","","","","","com.cy","","","","","com.bt","","","","","","","","","","","","","","","","","","","","","","","","","nom.ro","","","","","","","","","","","","","","","","","","","","","","","","","edu.st","","","","","edu.mt","","","","","","","","","","edu.bt","","","","","com.re","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.it","","","","","","","","","","","","net.mu","","","","","net.so","","","","","net.mo","","","","","","","","","","net.bo","","","","","net.sd","","","","","n4t.co","","","","","","","","","","","","","","","","","","","","net.bm","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.mw","","","","","","","","net.id","","","","","","","net.kw","","","net.im","","","","","","","","com.pk","","","","crd.co","","","","","","","","","","","","","","","net.cu","net.mk","","","","","","","","","net.co","","","","","","","","","co.cr","","","","","","nom.re","","","","","","","","","","","net.uk","","","","net.cm","","","","","net.ru","edu.pk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.cw","","","","","com.py","","","","","","","","","","","","","","","net.sy","","","","ed.cr","net.my","","","","","","","","","","com.pe","","","","","net.ky","","","","","net.rw","","","","","","","","","","net.me","","","","","net.uy","","","","","","","","","","edu.py","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.pe","","","","","","","","","","","","","","","","","","","","","","","","","com.pt","","","","","com.to","","","","","","","","","","net.st","","","","","net.mt","","","","","net.cy","","","","","net.bt","","","","","com.tm","","","","","","","","","","","","","","","","","co.uk","","","","","","","","","","","","","edu.pt","","","","","edu.to","","","","","com.tw","","","","","","","","","","","","","","","","","","","","","","","","","edu.tm","","","","b\303\241l\303\241t.no","adm.br","","","","","","","","","","nom.pe","","","","","","","","","","","","","","","","","","emr.it","","com.br","","","","","edu.tw","","","","co.tt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nov.su","","","","","","","","","","","","","","","","","","","","edu.br","","","","","","","","","co.zm","","","","","g\303\241ls\303\241.no","","","","","","","","","","","","","","","","","","","","","","","","","","nom.tm","","","","","","","","","","ato.br","","","","","","","","","","","net.pk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","*.mm","co.st","","","","","","","","","","","","","","","","","","","","co.zw","","","","","","com.tt","","","","","","","","","","nov.ru","","","","co.mu","","","","","","net.py","","","","","","","","","","adult.ht","","","","","","","","","","","","","","","","","","","cn.vu","net.pe","","","","","","","","","","","","","","","edu.tt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","n\303\270tter\303\270y.no","","","","","","","","","","","","","","","","","","com.ye","com.lk","","","co.mw","","","","","","","","","","","net.pt","","","","","net.to","","","","","not.br","","","","","","","","","","","","","","","","","","","","","","","","","net.tm","","","","","","","","","","com.au","","","","","edu.ye","edu.lk","","","","","","","","","","","","","","","","","","","","","","","","net.tw","","ac.lk","","","","","","","av.tr","com.am","","","","","com.ly","","","","","","","","","","","","","","","com.pr","","","","","edu.au","","","","","","","","","","","","","","","com.aw","","","","","","","","","","net.br","","","","","","","","","","","","","","","","","","","","edu.ly","","","","","","","","","","","","","","co.lc","edu.pr","","","","co.bw","","","","","","","","","net.ir","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","art.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nom.ad","","","","","","","","","","","","","","","","","","","","","","","","0e.vc","","","","","","","","","","","","","","","go.cr","","","","","gok.pk","com.sv","","","","","com.mv","","","","","","","","","","","","","","","net.tt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.sv","","","","","edu.mv","","","","","","","","","","jor.br","","","","","","","","","","adv.br","","","","","","","","","","","","","","","","","","","","bmd.br","","","","","","","","","","","","","","","com.tr","","","","","com.cv","","","","","","","","","","","","","","","net.ye","net.lk","","","","ntr.br","","","","","","","","","","gov.mu","","","","","gov.so","","","","","gov.mo","","","","","","","","","","","","","","","gov.sd","","","","","edu.tr","","","","","edu.cv","","","","","","","","","","net.au","","","","","gov.bm","","","","","gov.km","","","","","","","","","","","","","","","","","","","","","","","","","gov.mw","","","","","net.am","","","","","net.ly","","","","","gov.kw","","","","","","","","","","net.pr","","","","","","","","","","","","","","","","","","","","gov.cu","gov.mk","","","","","","","","","gov.co","","","","","","","","","","","","","","ac.cr","","","","","am.br","gov.cd","","","","","","gov.uk","","","","gov.cm","","","","","gov.ru","","","","","","","","","","","","","","co.ve","","","","","","","","","","","","","","","","","","","","","","","co.je","","","","","","","","","","","","","","","","","","gov.sy","","","","","gov.my","","","","","","","","","","gov.by","","","","","","","","","","gov.rw","","","","","","","","","","gov.me","","","","","geo.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","com.ee","","","","","net.ae","","","","","","","","","","com.lr","","","gov.ie","","","","","","","","","","","","net.mv","","","","","","","","","","","","","","","","","","","","","","","","","gov.cy","","","","","gov.bt","","","","","edu.ee","","","","","","","","","","","jur.pro","","","nc.tr","edu.lr","","","","","","","ac.uk","","","com.et","","","","","","","","","","","","","","","","","","gov.it","","","","","","","","","","","","com.gu","","","","","","","","","","","","","","co.hu","","","","","","","","","","","","","edu.scot","","","net.tr","","","","","edu.et","","","","","","","co.ni","","","","","","","","","","co.ci","","","","","","","","cnt.br","","","","","edu.gu","","","","","","","","","","","","co.vi","","","com.ar","","","","","","","","","","","","","","","edu.gd","","","","co.ls","","","","","","","","","","","","","","","","","","","","","com.sn","","","","ac.zm","","","","","","","","","","","com.bn","","ed.ci","","","","","","","","edu.ar","","","","","eco.br","","","","","","","","uk.net","","","","","","","","","","","","","gov.pk","","","","","","","","","gru.br","","","com.in","","edu.sn","","","","","edu.mn","edu.krd","","","","","","","","","edu.bn","","","","","edu.kn","","","","","","","","","","","","","","","","","","","","","","","","ac.cy","com.gy","","","","co.rw","","","","","","","","","","","","","","edu.in","","com.cn","","","","ac.zw","","","","","","com.ge","","","","","","","","nid.io","","","","","","ac.mu","gov.py","","","","","","","","","","","","","","","","","","","","edu.gy","","","","","","","","","","","","","","","","","","","","edu.cn","","","","","etc.br","","","","","edu.ge","","","","","","","","","","","","","","","","","","","","","","","","","com.gt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.lr","","","","","","","","","ac.mw","","","","","","gov.pt","","","","","gov.to","","","","","","","","","","","","","","","edu.gt","","","","","","","","","","","","","","","gov.tm","","","","","","","","","","","","","","","","","","","","","","","","","net.et","","","","","","","","","","","","","","","gov.tw","","","","","","","","","","","","","","","net.gu","","","","","com.sc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.mr","gon.pk","","","","","","","","","gov.br","","","","","","","","","","","","","","","","","","","","","","","","","edu.sc","","","","","","","","","","net.ar","","","","","","","","","","","","","gov.ir","","","","","","","com.lv","","","","","","","","","","","","","","","act.au","","","","","","","","","","","","","","","","","","","","net.bn","","","","","net.kn","","","","","","","","","","","","","*.er","","","","","","","","","","","","edu.lv","","","","","edu.pn","","","","","","","","","","erotika.hu","","","net.in","","","","","","","","","","","","","","","","bd.se","","","","","","","","","","","","","","","","art.sn","","","","","","","","","","","","","","","net.gy","","","","","","","","","","","","","","","","","","","","net.cn","","","","","gov.tt","","","","","net.ge","","","","","","","","","","","","","","","","","","","","","","","cam.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.gt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.ye","gov.lk","","","","","","","","","","","","","","","","","","","","","","","co.me","","","","","co.us","","","","","","","","","","","","","","","","","","ak.us","","","","","","","","","","","","","gov.au","","","","","","","","","","com.tn","","","","","com.gr","","","","","","","","","","cim.br","","","","","","","","","","","","","","","","","","","","gov.ly","","","","","net.sc","","","","","agr.br","","","","","gov.pr","","","","","com.jo","","","","","","","","","","","","","","","edu.gr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","go.ci","","biev\303\241t.no","","","","","","","","","","","edu.jo","","","","","","","","","ct.us","","","","","","net.lv","","","","","net.pn","","","","","","","","","gv.vc","","","","","","gov.lt","","","","","udi.br","ngo.lk","","","","","","","","","","","","","nd.us","","","","","nm.us","","","","","","","","","","","","","","","","","","","","","bet.ar","","","","","","","","","","","","","","","","","","","","","","","","","co.krd","","","","","","","","","","","","","","","","","","","","eti.br","","","","","gov.ae","","","","","","","","","","","","","","","","","","","ac.se","","","","","ny.us","gov.mv","","","","ut.us","","","","","","","","","","ne.us","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ecn.br","","","","","","","","","ar.us","","","","","","","","","","","gov.tr","","","","","","","","","","jus.br","","","","","","","ac.ni","","","","","","","","","","ac.ci","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.bi","","","","","","","","bir.ru","","","","","cri.br","","","","ac.ls","","","","","","","","","","","","","","","","net.tn","","","","ac.ru","net.gr","","","","","","","","","","","","","","","","","","","","","","","","","com.sg","","","","","com.mg","","","","","","","","","","","","","","","com.kg","","","","","net.jo","","","","","","","","","","","","","","","com.ug","","","","","","","","","","","","","","","","","","","","","","","","","edu.sg","","","","","edu.mg","","","","","","","","","","","","","","","edu.kg","","","","ac.rw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.kr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","com.lc","","","","","nom.mg","","","","","gov.ee","","","","","","gos.pk","","","","nsw.au","","","","","gov.lr","","","","","","","","","co.na","","","","","","","","","","co.ca","","","","","","","","","","","bio.br","","","","","cng.br","","","","","edu.lc","","","","","net.je","","","","","","","","","","","","","","","est.pr","","yk.ca","","","","","gov.scot","","","","","","","","gov.et","","","","","","","","","","","","","","","nyc.mn","","","","","","","","","","","","","","","gov.gu","","","","","eng.br","","","","","","","","","","","","","","","gen.tr","","","","","","","","","","gov.gd","","","","","","","","","","","","","","","","","","","","","","","","","com.ac","","","","","","","","","","","","","","","","","","","","","","","","","gov.ar","","","","","","","","","","","","","","gu.us","","","","","","","","","","","","","","","","","","","","nu.ca","","","","","","edu.ac","","","","nv.us","gov.mn","","","","","","","","","","gov.bn","","","","","gov.kn","","","","ne.kr","","","","","","net.sg","","","","","","","","","","","","","","","","","erotica.hu","","","net.kg","","","","","","","","","","","","","gov.in","","","","","","at.vg","gets-it.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.gy","","","","","","","","","","","","","","","","","","","","gov.cn","","","","","","","","","","gov.ge","","","","","","","","","","","","","","","","ky.us","","","","","","","","","","","","","nt.ca","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.nl","","","","","","","","","","co.cl","","","","","","net.lc","","","","","grosseto.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","com.fm","","","","ac.me","","","","","","","","","","","","","","","","","","","","","notteroy.no","","","","","","","","","","com.bi","","","","","com.ki","","","","","","","","","","","","","","","","","","","","","eng.pro","","","","edu.fm","","","","","","","","","","","","","","","","","","","","gov.sc","","","","","","","","","","","","","","","edu.bi","","","","","edu.ki","","","","","","","","","","","","","","","","","","","","","","","","","com.ec","","","","","net.ac","*.ck","","","","com.gn","","","","","","","","","","com.ci","","","","","","","","","","","","","","","qld.au","","","","","","","","","","","","","","","gov.lv","","","","","gov.pn","","","","ac.be","","","","","","edu.ec","","","","","","","","","co.rs","edu.gn","","","","jolster.no","","","","","","edu.ci","","","","","","","","","","ass.km","","","","","com.ss","","","","","com.ms","","","","","","","","","","com.bs","","","","","","","","gen.in","nc.us","","","","","","","","","","","","","","","","","","","","","","","","","","url.tw","","","","","","","","","","","","","com.is","","edu.ss","","","","aremark.no","edu.ms","","","","","","","","","co.ua","edu.bs","","","","","","","","","","za.net","","","","","","","","","","","","","","","","","","","","","","","","","com.ws","","","","","","","","","","","","","edu.is","","","","","","","asn.au","","ck.ua","","","","","","","","","","","","","","","","","aurland.no","","","","","","","","","","","","","","","go.kr","","","","","","anani.br","","","","","edu.ws","","","","","","","","","","","","","","","","","","","co.ug","","","","","\303\270rland.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.rs","","","","zt.ua","","","","","","","","","","","net.fm","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.za","net.ki","","","","","gov.tn","","","","","gov.gr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.jo","","","","","","","","","","","","","","","","","barsy.ro","","","","","","","","com.ag","","","","","net.ec","","","","","","","","","","net.gn","","","","","","","bukhara.su","","","net.ci","","","","","","","","","","","","","","","","","","","","","","","","co.ma","","","","","","","","","","","","","","","","","","","","","","","","","","bloxcms.com","","","","","","","","","","","","","","","com.ps","","","","","","","","","","","","","","ac.kr","net.ss","","","","","net.ms","","","","co.ke","","","","","","net.bs","","","","","","","","","","com.sa","","","","","","","","","","","","","","","com.ba","","","","ne.ug","","","","","","edu.ps","","","","","","","","","cr.ua","","","","net.is","co.mg","com.ua","","","","","","","","","","","","","","group.aero","","","","","","","","","","","edu.sa","bar.pro","","","","","","","","","nom.ag","","","","","edu.ba","","","","","","","","","","net.ws","","","","","","","","","","","","","","cc.na","edu.ua","","","","","","","","","","","","","","","","","educator.aero","","","","","","","","","aca.pro","","","","","","","","","","","","","","","","enebakk.no","","qc.ca","","","","","","","","","","","com.fr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","kvits\303\270y.no","","","","","","","","","","","","","","","","","as.us","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.im","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.sg","","","","","gov.mg","","","","ne.ke","","","","","","","","","","","gov.kg","","","","","","","","","","","","","","co.gy","","","","","","","","","","","com.eg","","","","","net.ag","","","","","","","","","","nom.fr","","","","","","","","","","","","","","cv.ua","","","","","","","","","","","","com.hk","","","","","","","","","","","","","","","","","","","","","","","","edu.eg","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.ps","","","","","","","","","","","edu.hk","","","","","","","","","eidsberg.no","","","","","com.pa","","","","","","","","","","","","","","","net.sa","","","","","net.ma","","","","","","","","","","net.ba","","","","","","","","","","gov.lc","","nl.ci","","","","","","","","","","","","","net.ua","","","","ca.us","","","","","","edu.pa","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","jeonbuk.kr","","","","","","","","","","","","","","","","","","","","","","","","","","com.ht","","","","","","","","","","","","","","","com.ai","","","","","","","jdevcloud.com","","","","km.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","ch.tc","","","j\303\270rpeland.no","","","nom.pa","","","","go.ug","edu.ht","","","","","bel.tr","","","","","","","","","","gov.ac","","","","","","","","","","","","co.no","","","","","","","","","","","4u.com","","edu.ls","","","","","","","","","cn.ua","","","","","","","","","","","","","","","","","","","","","","","","co.com","","","","","","","","","","com.om","","","","","ktistory.com","ac.rs","","","","","","","","","","","","","","","","com.do","","","","","","","","","","uwu.ai","","","","","","","","","","zlg.br","","","","","","","","","","com.dm","","","\303\245mli.no","","","","","eu.com","","","","","edu.om","","","","","","","net.eg","","","","","","","","","","","","","","","edu.do","","co.pw","","","","","","","","","","","","","","","","","","","","","","","","net.hk","","","","edu.dm","","","","co.ir","","","","","bc.ca","","","","","ciencia.bo","","","","","","","","","","","","","","bas.it","","","","","","cc.ua","","","","","123sait.ru","","","greta.fr","","","","","","","","nat.tn","","","","\303\270rskog.no","","","ed.pw","","","","","","","","","","","","","","","","","","net.pa","","","no.com","","","uk.com","","","","","","","","","","","","","go.ke","","","","","co.om","","","","","","","","","","","","","","nic.in","ac.ug","","","","","","","","","","","","","","cal.it","","","","","uy.com","","","","","","","","","","","","","","","","","","","","","","com.de","","","","","","audnedaln.no","","","","","","","","","","","","","es.kr","grimstad.no","","","","","","","","","","","","","","","asn.lv","","","","","","","","","","","","nt.no","","","gov.ki","","","","ac.za","","kr.ua","","","","","","","","","net.ht","","","","gc.ca","","","","","al.us","","","","","","net.ai","","","","","","","","","","","","","","","","","","","","","","","","","net.ls","","","","","","","","","","","","","","","aju.br","git-repos.de","","","","","","","","","com.la","","","","","","","","","","gov.ec","","","ar.com","","","","ne.pw","","","gov.gn","","","","ao.it","","","","","","art.ht","","","","","","","fuossko.no","","","com.hr","","","","","","","","","","net.gg","","","","us.na","","","","","ac.ma","","","","","co.it","edu.la","","","","","","","","","be.gy","com.sl","","","","","com.ml","nord-odal.no","","net.om","","","","","","","","","","","","","","","","","","","","","","net.do","","","","","gov.ss","","","","","gov.ms","","","","ns.ca","","","","","ac.ke","gov.bs","","","","","","","","","","net.dm","","","","","edu.sl","","","","","edu.ml","","","","","","","","","","com.es","","","","ca.na","","","","","","qsl.br","ks.us","","","","","","","gov.is","alesund.no","\303\241k\305\213oluokta.no","","","","","","","","","","","aaa.pro","","","ce.it","j\303\270lster.no","","","","at.it","","","","","","art.do","","","","","","","","","","","","","","","","","","*.jm","","edu.es","","","","ud.it","gov.ws","","","","","","","","","ct.it","","","","","","","","","","","","","","","","","","","","adygeya.su","namdinh.vn","","","","","","*.kh","","","nu.it","","","","","no.it","","kv.ua","","","","","","","","","","","","","","gov.rs","","","","","","","ad.jp","","","","","","","","","","caa.aero","","","","","","","","coz.br","","bu.no","","","","","","","","","","co.jp","","","com.gi","","","","","","","","","","","","","","","","","","","","bod\303\270.no","","","","","","","","","ga.us","","","","","","nom.es","","","","","","","","","","ens.tn","","","","","","","","","","","","","","","","kg.kr","","","","edu.gi","","","","","","","","","","eun.eg","","","","ac.im","","","ed.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fot.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.la","","","","ar.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cr.it","","","","","","","","","","","","","","","","com.pl","","","","","","","","","","","","","","","net.sl","","","","","net.ml","","","","","","","","","","","","","","","gov.ps","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.pl","","","","","","","","","","gov.sa","","","net.il","","gov.ma","","","","","","","","","","gov.ba","","","","","","","","","","","","","","","","","ne.jp","","","","","","","","gov.ua","","","","","forum.hu","","","","","","","","","","atm.pl","","go.pw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nhs.uk","","","","","","","","","","","","","","","","","","","berlevag.no","","","","","","","","","","","","","cn.com","","nom.pl","","","","","com.nr","","","","","","","","","","","","","","bygland.no","","","","","","","","barsy.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.nr","","","","","","","","br.com","","","","","","av.it","","","aosta.it","","bo.it","emp.br","","","","","","","","","","","","","","","","","","","","jdf.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","at.md","","","","","","","","","","","","","","","","","","","","","","","","arq.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","k\303\245fjord.no","","","","","","","","","","","qc.com","","gov.eg","","","","","","","","","","","","","","","","","","","","","","","","nl.ca","","","\303\270ksnes.no","","","","","","","","","","","","bt.it","","gov.hk","","","ua.rs","","","","","","from-or.com","","","","ac.ir","","","","","","net.pl","","","","","","","","","","","","","gr.com","","","","","","","from-ma.com","","","","go.it","from-ut.com","","","","","","","","","","","","","","","from-wa.com","","","","","from-mt.com","","","","","","","","from-oh.com","","","","","","","","","","","","","","","","","","","","\340\270\255\340\270\207\340\270\204\340\271\214\340\270\201\340\270\243.\340\271\204\340\270\227\340\270\242","","","","uk.kg","","","kr.com","","","","","","","","","","","","","","","art.pl","","","","","","","","","","","","","","","","","","","","","","","","","com.qa","neat-url.com","","","","s\303\241l\303\241t.no","","","","","","","","","","","","","","","from-wv.com","","","","","","","","","ge.it","","","","","an.it","","","","","nm.cn","","","","","","","","","","","","","","","","","","","","","","","","","","edu.qa","","","","","","","","","cn.it","","","","","","","","","","","","","","","","net.nr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.ls","","","","","","","","","","","","","","","","","","","","","","","","en.it","","","","","br.it","","","","","","","","go.jp","","","","","aoste.it","","fm.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nh.us","","","","","","","","","","","","","","","","","","","krellian.net","","","gop.pk","","gov.om","","","","","","","","","","","","","","","","","","","","","","gov.do","","","","","","","","","","","","udine.it","","","","","","","","com.al","","","","","com.hn","","","","","gov.dm","","","","","","","","","","fnd.br","","","","","","","","","","","","","","","gov.as","","","","","","","","","","","","","","","","","","","","","","ac.pr","","","","","","","","edu.al","","","","","edu.hn","kommune.no","","","","","","","","","from-mo.com","","keymachine.de","","","","","","","","","","","","gr.it","","","","","","","","","","","","","","","","","","","","","","","","","natural.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nord-fron.no","","","","","kr.it","","","eidskog.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ac.jp","","","","","","","","net.qa","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","georgia.su","","","","","ju.mp","","","","","","","","","","","","","","","","","","","","","getmyip.com","","","","","","","","","","","","","","","","","","","","","","","","","","ks.ua","","","","","","gr.jp","","","enf.br","","","","","gov.la","","","","","","","","","","","","","","","","","","","","","","","skedsmo.no","","","","","","","","","","","","","","","","","","","","","","","","","","","naustdal.no","","","","","","","","","","from-va.com","","","","ag.it","from-ms.com","","co.ro","","","","","","","","gov.sl","","","","","gov.ml","","","","","from-vt.com","","","","","","","","","","","","","","","","","barsy.io","","","","","","from-ks.com","","","","","","","","","","","","","","","","","net.al","","","","","net.hn","","4lima.at","","","","","","gov.il","","","","","","","","","","","","www.ro","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gd.cn","","","","","barsy.site","","","","","","","","","a.se","","","","","","","gov.cl","","","","","","","","","bn.it","","","","","","","","","y.se","","","","","","","","","","z.se","","","","","c.se","","nsn.us","","","","","bacninh.vn","","","x.se","","","","","","","","","","","","123minsida.se","","","us.com","","","","","","","","","","","","","","","","","","","aa.no","","co.gg","","","","","","","","","","","","","","","","","","","","","","","","","","","","","e.se","","eid.no","","","","","","","","","","gov.gi","","","","yn.cn","","","","","","","","","","","\303\245s.no","","","","","","","","b\303\241jddar.no","","","","","","","","","","","","","","","za.com","","","","","","","","","","","","","","","","","","","","","","","","","u.se","fi.cr","","","","","","","","","","","from-ok.com","","","","","eidfjord.no","","","","","","","","","","from-nj.com","","","","","","","","","","","","","","","","","","","","from-tx.com","","","","","cya.gg","","nt.ro","n.se","","","","","","","","","","","","","","","from-nh.com","","","","","","","","","","","","","","kustanai.ru","","","","","","","co.tj","","","","","ci.it","","","","","ac.cn","jp.net","","","","","","","","","","com.gl","","","","","","","","","","from-nv.com","","","","ac.vn","now-dns.net","","","","co.bn","","","","","","","","","cechire.com","","","","","","","","","","","","","","","","","gov.pl","","","","","","","","","","","","","bykle.no","","","","","jevnaker.no","","edu.gl","","","","","","","","","","","","","","","","","","fvg.it","","","","","","cs.it","","","","","","from-ca.com","","","","co.gl","bnr.la","","","","","","","","","adygeya.ru","","","","","","","","","","nj.us","from-ct.com","","","","","","","","","","com.sh","","","","","biz.mw","","","","","","","","biz.id","","com.bh","","","","az.us","","","","","","","bounceme.net","","","","","","","","","","","","chungnam.kr","","","","","","","","","","","","","","","","","","","","","","\303\245lg\303\245rd.no","","barsy.bg","","","","","","","","","","","","","nittedal.no","","","","","edu.bh","fam.pk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cci.fr","","","","","","","","","","gov.nr","grp.lk","akamaihd.net","","","","","","","","","","","","","","","","","","","qa2.com","","","","evenassi.no","","","","","biz.my","","","","","b\303\241hccavuotna.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ca.it","","","","","","","","","","","now.sh","","","","","","","","","","","","","","","","","","","","","","","","bg.it","","","","","","","","","","","gov.tl","","","","","","","","","","","","al.no","","","","","","","","biz.cy","","","","","","","","","","aid.pl","","","","","","","","","","","","","","","nom.nc","","","","","","","","","","","","kep.tr","","","bryne.no","","b\303\245tsfjord.no","","arendal.no","","","","","","","","","","","","","","","","fst.br","","","","","","","","","","","","","","","net.gl","","","","","","","","","","","","","","","","","","","","","","","","","","*.fk","","b.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","na.it","","","","","fetsund.no","quangnam.vn","","","","","","","","","","","","","","","","","","","","from-ga.com","","","","","from-md.com","","","","","","","","","","","","","","","","","","","","com.ph","","","","","","","","","","","","","","","net.sh","","","","","","n\303\245\303\245mesjevuemie.no","","","","","","","","","net.bh","","","","","","","","","","","","","","","","biz.pk","nl.no","","","gov.qa","","","","","","","","","","","","","","","edu.ph","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","norddal.no","","autocode.dev","","","","","","","","","","","","","","\303\245l.no","","","","","","","","","","","","","g.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","bi.it","","","barsy.eu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","k.se","bitbucket.io","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\303\270ygarden.no","","","","","","","","","","","","","","","","","","","","","","","","esp.br","","","","","","kristiansund.no","","","","","","","","bs.it","","","","","","","","","","ai.vn","","","","","","","","","","","","co.am","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","al.it","gov.al","","ghost.io","","","","","","","","grong.no","srv.br","","","","games.hu","","","aukra.no","","from-il.com","","","","","","","","","","from-ia.com","","","","","","","","","cl.it","*.bd","berlev\303\245g.no","","","go.tj","","","","","","from-co.net","","","","","","","","","","","","","","we.tc","","","","","","","","","","","","","","","","","se.net","","","","","","","","","","","","","","far.br","","","","","","","","","","","","","","","","","","","","","","","","brescia.it","","","","","","","","","","","","","emb.kw","","","","","","","","","","","","","","","","","us.kg","ggf.br","","","","","","","","","","net.ph","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ba.it","","","","","","","","","","","","","","","","","","","","","","","cdn77-ssl.net","","co.il","","","","","co.th","","","","","","","austevoll.no","","","","","","","","","","","","","","","","","","\303\245krehamn.no","","","","","","","","","","","","","","","","com.ng","","","","komforb.se","","","","","","","","","","consultant.aero","","","","","","","","","","","biz.tt","","","","","","","","","","","","","","","","","","","","","","","","","bardu.no","","","","","","","","","","","","","","","edu.ng","","","","","nesna.no","","","","","","","","","","","","","","","","nt.au","","","","","","","","ac.tj","","","","","","","","","","","","","","","js.cn","from-ar.com","","","","","from-al.com","","","","","","","","","","","","","","","","","","","","","","","","co.bj","","","","","","","","","","","","","soc.lk","","","","","","","","","","","","","","","4lima.de","","","","","","","","","","","","","","","","","","","","","","","","blogsyte.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","eu.org","","","","","","","","","","","","","","","ae.org","","","","","net.th","","","","nf.ca","","","","","","","","","","","umb.it","","","","","","","","","","fie.ee","","","","","biz.pr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cpa.pro","","","","","","","","","","","","","","","","","","","","","","","","","","","","","awsmppl.com","","","","","co.network","","","","","","kh.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gsm.pl","b\303\245d\303\245ddj\303\245.no","","elk.pl","","","barsy.online","","","","","","ah.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.pl","","","","","","","","gjerstad.no","seg.br","","","","","","","","","","","","","","","","","","","","","","","","","","cdn-edges.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.ng","","","","","","","","","","biz.mv","","","aostavalley.it","","","","","","","","","","","","gol.no","","","","","abr.it","","","","","","","","","","","","","","","","","","","","from-nd.com","","","","","","","","","","","","","","bl.it","quangtri.vn","","","","","","","","","","biz.at","","","","","","","","","","","","","","","aip.ee","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","biz.tr","","","","","gda.pl","","","","","","","kustanai.su","","","","","","","","","","","","","","","","","","sd.us","","","","","fed.us","","","","","","","","","","","co.at","","","","","","","narviika.no","","","","","","","","","","","","","","","","","","","","","froland.no","","","gob.bo","","","","","aircraft.aero","","","barum.no","","","","","k\303\241r\303\241\305\241johka.no","","","","","","","","","","","","","","","","","","","","gov.sh","","","","","sa.cr","","barsy.in","","","","","","","","gov.bh","","","","","","","","","","from-mi.com","","","","uz.ua","","","","","","com.ni","","","","","","","","","","from-wi.com","","","","","","","","","","navoi.su","","","","","","","","","","","","","","gs.cn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.ni","","","","","","","","","","","","","","","","","","","forsand.no","","","","even\303\241\305\241\305\241i.no","","","","","","","","","kvafjord.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","entertainment.aero","","","caa.li","","","","","","","","","","","","gub.uy","","","","","","","","","","","","ch.it","","","","","go.th","","","","","","","","","","sar.it","","","","","","nom.ni","","","","","","","","","","","","","","","","","","","jl.cn","","","","","","","","","","","","","","","","","","brand.se","","","ynh.fr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cc.hn","","co.bb","","","zp.ua","biz.et","","","","","com.bj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.bj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","com.gh","","","","","","","","","","","","","","","","","","","","from-la.net","","","","","","","","","","","","","","","sc.ls","","","","","","","","","","gov.ph","","","","\303\245seral.no","from-ri.com","","","","","","","","","","","","","","","edu.gh","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.ni","","","","","","","","biz.in","","","","urn.arpa","","ac.il","","","","gob.pk","ac.th","","","","","","fhv.se","","","","","from-ak.com","","belem.br","","","","","ac.pa","","","","","","","","","","","","","","","","","","","","","","","","","","","consulting.aero","","","","","","","","","","","","","","","","","","","","","nissedal.no","","","","","","","","","","","","","","","","","g\303\274nstigliefern.de","","","","","","","","","","forte.id","","","","","","","","","","","","","jampa.br","","","","","","","","","","","barsy.net","","","","","","","","","","","","","","","","gob.pe","","","","","","","","","","","","barsy.info","","","","","","","","","","","","","","","","ngo.ph","","","","","","","","","wy.us","","kragero.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","barsy.menu","","","","","","","","","","sic.it","com.na","","","","","","","","","","","","","","","","","","sk.ca","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.id","net.bj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","from-id.com","","","","","","","","","","","","barsy.me","","","com.bz","","g\303\241\305\213gaviika.no","","","com.kz","","","egersund.no","","","","","","","","","","","kommunalforbund.se","","","","","","com.uz","","","","","","","","","","","co.ae","","control.aero","","","","","","g\303\274nstigbestellen.de","","","","","","askvoll.no","","","","","edu.mz","","","ngrok.dev","","","","","","","edu.bz","","","","","edu.kz","","","","","","","","","","","","cbg.ru","","","","","","","","from-sd.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","kvalsund.no","","","","","","","fm.no","seoul.kr","","","","","","","","","","","","","","","","belluno.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","futurehosting.at","","","","","","","","","","aknoluokta.no","","gov.ng","","","","","","","","","","","","","","","","","","","","","","","","ah.cn","","","","","","","","","","","","","","","","","","","","","","\345\222\214\346\255\214\345\261\261.jp","","","","","","","store.ro","","","","","","","com.tj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","qh.cn","","","","","","","","","","","","","","","","","","","","","","","","","","edu.tj","","","","","from-de.com","","","","","","","","","ac.fj","","","","","","grane.no","","","","","","","abc.br","","","","","","","","","sochi.su","","","","","","","","","","","","","","","","","","","bhz.br","ac.at","","","","","","","","","","","","","shw.io","sc.us","","","","we.bs","","","","","","","","","","","","","","","","","","","","","","","","","com.iq","","","","","","ngo.ng","","","","","","","","cagliari.it","","","amscompute.com","","","","","","","","","","","ngrok.io","","","","","","","","","","","","","","","","","","","","","com.zm","","","","","","","","","","","","net.mz","","","","edu.iq","","","","","","net.bz","","","","","net.kz","","","","","","","","","","","","","","farsund.no","","","","","","net.uz","","","","","","","","","","","","","","","nuoro.it","","","edu.zm","","","","","","","","","","","wv.us","","","","","","eu.int","","krager\303\270.no","","","","","","","fl.us","","","","","","","","","","","","","","","","","","agrar.hu","","","","","","","","","","","","","","","","","","azimuth.network","","","","","","","novecore.site","","","","nannestad.no","","","","","","","store.ve","","","","","","","","","","","","","","","","","","","","","","","","","aarborte.no","","","","","","","","kristiansand.no","","","","","","","","fm.it","","","","","","","","","","","sm.ua","","","","","gen.ng","","","","cz.it","","","","","","fet.no","","","","","","","","","","\303\270yer.no","","","","","","","","","","","","","","","","","gob.sv","","elementor.cloud","","","uri.arpa","","","","","","","","","","","","","","","","","","","","","","fh.se","","","","","builtwithdark.com","net.tj","","","","","","","","","","","","","barsy.org","","","","","","","adv.mz","","","","fe.it","fin.tn","","","","","","","","","","","","","n\303\241vuotna.no","","","","","","","","","","","","","","fuettertdasnetz.de","","","","","","","","","","","","","","","","","","","","","","","","","aq.it","","","","","","","","","","","","","","","","","co.financial","","","","","","","","svn-repos.de","","gausdal.no","","","","","","","","","","","","","","","","eco.bj","","","","","","","","jotelulu.cloud","","","gv.at","","*.pg","","","","","","","","","","","","foz.br","cloud66.zone","","","","com.kp","","","","","flesberg.no","","","sonla.vn","","","","","","net.iq","","","","","","","","","","","","","","","","","","","","","","","","","go.id","com.pf","","","","","","","","","","","","","","","","","","","","","","","net.zm","","edu.kp","","","","","","","","","","","","","","","","","","","kasserver.com","","","","","","","","","","","","","","","","bjark\303\270y.no","","","","","edu.pf","","","","","","","","","","","","","","","","","","","barueri.br","","","","","","us.org","","","","","","","","","","","","","","","","","","","","","","","council.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ap.it","","","","","","","","","","","","","","","","","","","","","sc.kr","","","","","","","","","fr.it","","","","","","","","","","","","","","","","za.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","bomlo.no","","","","","","","","","","","","","","","","","js.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ac.id","","","","","","","","gb.net","","","","","","","","","","","","","","","","","am.in","","","","","","","","","","","","","","","","","","","","","certmgr.org","","","","co.in","","","","","zj.cn","","","","","","","","","","","","","","","xj.cn","","","feira.br","","","","edgestack.me","","","","","","","","","","","","","","","","","avellino.it","","com.az","","","","","gov.gh","ac.ae","nog.community","","","","","","","","","","","","","","","","","","","","gob.ar","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","store.st","","edu.az","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\303\245mot.no","","","","","","","","","bz.it","","","ekloges.cy","","","jessheim.no","","","","","","","","","","","","","","","","","","","","from-ne.com","","","","","","","","","","","","s\303\274dtirol.it","","","","","","","","","","","","","","","uk.in","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ac.gn","appspot.com","","","","","","","","","","","","","","","","","","","","","","","","","col.ng","","","","","","","","","xz.cn","","","","","","","","","","","","","","","","","","","","","","","suedtirol.it","","","","","","","","nordkapp.no","","","","","","blogsite.org","","","","","","","","","b.br","sch.so","gob.gt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","biz.ki","*.np","","","","","","","","sch.id","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","co.pn","","","","","","","","app.br","","cq.cn","","","","","","","","","","","","","","","","","","gov.mz","","","","","","","","","","gov.bz","","","","","gov.kz","","","","","","","","","","","","","","","com.af","","","","","","","","","","","consulado.st","","geekgalaxy.com","","net.az","","","","","","","","from-mn.com","er.in","","","","","","","","","amot.no","","","","","","","","","","","","","","","","","","gratangen.no","","","","","","","","","from-fl.com","","gmina.pl","","","edu.af","","","","","from-dc.com","","","","","","","","emiliaromagna.it","","","slg.br","h\303\241bmer.no","brasilia.me","","","","genoa.it","","","biz.ss","","","","","","","","","b\303\241hcavuotna.no","","","jab.br","","","","","","","","","","","","","","","","","","","","","","bergamo.it","","","","","wi.us","","","","arts.ro","","","","","","","","","","","","","","","st.no","","","","","","","","","","","","","ab.ca","","","","","","","","","","","","","fc.it","","","","from-tn.com","","","adobeaemcloud.com","","","","","","","","","","","","","keliweb.cloud","","","","","","","","","","","","","","","","","","bib.br","","","","","","","conference.aero","","","","","","","","","","","gov.tj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","from-pr.com","","","","","","","","","bj.cn","","","","","","from-pa.com","","","","","","","","","","","","","","","","","","","","","","servemp3.com","","","","","","","","sc.ug","","","","jp.md","","co.ag","","","","","","bsb.br","","","","","","","","","","","","","","surnadal.no","","","","","","","","","fin.ec","","","fedje.no","","","","","","","","","","","","engerdal.no","","","","","fin.ci","","","","","","","","","","","karelia.su","","","","","","","","","","","","","store.dk","","","","","","","","","","gov.iq","yombo.me","","com.vu","","","","ntdll.top","","","","","nb.ca","","","","","","","askim.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.zm","jp.kg","","","","","","gov.bf","","","","","","","","","","","","edu.vu","","","","","","kvanangen.no","","","","","","","","","","","","","","","gov.zw","","net.af","","","","wa.us","","adobeaemcloud.net","","","","","","","","","so.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","bolzano.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","b\303\241id\303\241r.no","","","","","","","","","","","","","","","","","","","","","","","","","sc.ke","","","","","","","","","","","","","","","","","","kvinesdal.no","","","","","","","","","","","","","","","","","","","namdalseid.no","","","biz.ua","","","","cn.in","","","","","","","","","","fg.it","","","com.ve","","","","jpn.com","","","","","","","","","","","","","","","","","kv\303\246fjord.no","","","","","karacol.su","","","sec.ps","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edu.ve","barsy.pro","","","","","","","","","","hu.net","","","","","","","forde.no","","","","","","","","","","","","","","","co.technology","","","","gz.cn","","","","","","","","","","med.sd","","","","","","","","","","","","","","","","","","","f.se","ac.in","","","","","","","","","me.vu","","","","","","","","","","","","","conn.uk","","","","","","","","sch.ir","","","","googlecode.com","","","","","","","","","","","","","","","","","","","","","empresa.bo","","","","","","","","","","","","","","","","","","nom.ve","","","gov.kp","","","","","","bremanger.no","klabu.no","","","","codespot.com","","","","","","net.vu","","","","","","","","","","","","","","bokn.no","","muos\303\241t.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","works.aero","","","","","","","","","","jorpeland.no","galsa.no","","","","","","","","","","","","","","","andoy.no","","","","niteroi.br","\303\245fjord.no","","","","","sr.it","","","","","","","","","","","","","","","","","zgora.pl","","","aeroport.fr","","","","","","","","","","","","","","","","","","","","","","","","caobang.vn","","","","arts.co","","","","","","","","","","","","","","","","ws.na","","","","","fi.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","asker.no","","","","","","","","","","","","gov.nl","","sch.lk","","","","","","","","servecounterstrike.com","","bbs.tr","","","","","","","","","","","","","","","","","","","","","","","","","net.ve","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","n\303\246r\303\270y.no","","","","","","","","","","","","","","","","","","","sch.ly","","","","","","","","","from-nc.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","biz.ls","","","","","","","","","","","","","","","com.gp","sorum.no","","","","","","","elementor.cool","","","","","","","","","","","","","svalbard.no","jgora.pl","","","bluebite.io","","","","","","","","","","","","","","","sv.it","","","","6g.in","gov.az","","","","","","","","","","","quangninh.vn","","","","","","","","5g.in","edu.gp","","","siena.it","","","","","","camau.vn","","","","","","","","","","","","somna.no","cremona.it","","","","","","","","","","","","","","smola.no","","","","","","","","","siellak.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cloudns.cc","sd.cn","","","","","","","","","","from-nm.com","sch.ae","","","","","","","","","","","","","","","dr.tr","","","","","","","","","","","","","natal.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","biz.dk","","","","","","adobeioruntime.net","","","","","","","","","","serveirc.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gjemnes.no","","","","","","","","freeboxos.com","","","","","","","","","","","","","","","","","","","","det.br","","agdenes.no","","","","","","","","","","","","","","","","","","","","","","me.uk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ai.in","","","","","","","krokstadelva.no","","","","","","","","","","","","","","","","","","","","","","","armenia.su","","","","","","","","bearalv\303\241hki.no","","","from-hi.com","","","","med.br","","","","","","","","","","","","","","","","","","","","","","","","","authgear-staging.com","","","","","","","","","","nalchik.su","","","","","","","","","","","","","","","","","","","","","","","","","","net.gp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gov.af","","","","","","","","","cs.in","","","gob.ec","","","","","","","","","","","","","","broke-it.net","","","","","sjc.br","","","","","","","","","","","","","","qcx.io","","","","","","","","","","","","gildesk\303\245l.no","","","","","abo.pa","","","","","","","","","","","","","","","","","","","","","","","","","editorx.io","","","","","aver\303\270y.no","","","","","","","","","","","","senseering.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","us.in","blogspot.md","","","","","","","","nyan.to","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","com.fj","","","","","","s\303\270rfold.no","adobeio-static.net","","gjesdal.no","","","","","","","","","nieruchomosci.pl","","","","","","","","","","","","asnes.no","","balestrand.no","","","c.la","","","","","","","s\303\270r-odal.no","","","","blogspot.dk","","","","","blogspot.mk","","","","","","","","ca.in","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","avocats.bj","","","","","","","","","","","blogspot.td","","","","","kalmykia.ru","","","","\346\224\277\345\272\234.\351\246\231\346\270\257","","","","","","","","","","\347\266\262\347\265\241.\351\246\231\346\270\257","","","","","","","","","","","","","","","med.ly","","","","kiengiang.vn","co.ao","","","","","","blogspot.my","","","","","blogspot.tw","","","askoy.no","","med.pro","","","","\345\205\254\345\217\270.\351\246\231\346\270\257","","","","","","","","","giize.com","","blogspot.de","","","","","","","","","","","","","","","nic.tj","","","","","","","","","","","br\303\270nn\303\270y.no","","me.tc","","","","","","","","","","","","","","","from-in.com","","","","","","","","","","","ed.ao","","sor-fron.no","","","","","","dev.br","","","","","","","","","","","","","ulvik.no","","","","","","","","","","","","","","","","","","","","cherkasy.ua","","","","","","","","","","","","","","","","","","","","","","czest.pl","","","","","","","","","","","","","","","","","","","","","","","biz.pl","","","\327\246\327\224\327\234.\327\231\327\251\327\250\327\220\327\234","","","","","","","","","","","","","","","","","","","","spdns.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.ru","","","","","from-sc.com","","","","","blogspot.ro","","","","","","","","","","","gildeskal.no","","","","","","","","","","sauherad.no","","","","","","","","","","","","","","","","","","","a.bg","","","","","2.bg","","","","","4.bg","","","","","3.bg","","","","","1.bg","","","","","y.bg","","gov.ve","","","0.bg","cafjs.com","","","","z.bg","","","","","c.bg","","","","s.se","6.bg","","","","","x.bg","","","","","9.bg","","","","","net.fj","","","","","5.bg","","","","","8.bg","","","","","q.bg","","","eek.jp","","7.bg","batsfjord.no","","","","","","","","","","servers.run","","","ski.no","","austrheim.no","","","","","","","","","biz.nr","","","","","","","cargo.aero","","kicks-ass.net","e.bg","","","","","","","","","","","","","","","","","","","","","","ybo.science","","","","","","","","","gentapps.com","","sk\303\241nit.no","","sn.cn","","gonna.jp","","","co.place","","","","","","ballangen.no","","","","","","","","","","","kutno.pl","","sa.com","augustow.pl","","","","","","","","","","","","","","","u.bg","","","","","","","","","","","","","","","","","","","","blogspot.re","","","","etnedal.no","","","","","","akita.jp","","","","","","","","","","","","","","","","","","","","","","","gouv.ml","","n.bg","cb.it","","","","","","ustka.pl","","","","","","","","","","bytom.pl","","","","","","sokndal.no","","","","","","","","","","","kvitsoy.no","","","","","","frana.no","","","","","si.it","","aurskog-h\303\270land.no","","","","","","","","","fly.dev","","","","","","","","","","","com.vn","","karlsoy.no","syno-ds.de","","","","","","","","","","","","","","","","","","","","sc.cn","","","","m\303\245lselv.no","","","","","","j.bg","","","","","","","","","","flakstad.no","","","","","","","","","","","","namsskogan.no","","","","","","","","","de.ls","edu.vn","","","","","","","belau.pw","","","","","","","","","","","ss.it","","","","","","","","","","","","","","","","","","","med.ee","","","","","","","","","","","","","","","1kapp.com","","","","","","","","","","","blogspot.mr","","","","","","","","","","","","barsy.ca","","","","","","","","","","gob.pa","","","","","","","","","","","","","","md.ci","","kviteseid.no","","","","","blogspot.kr","","","","","","","","","","","","","","","","","","","","","grondar.za","","sld.pa","","","","","","","","","","","en-root.fr","","","","","","","ferrara.it","kautokeino.no","","","","","","","","","","","no-ip.net","","","","","","","","","","","","","","","","","\303\245lesund.no","","","","msk.su","","","","","","","no-ip.info","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","boo.jp","","","","","","sch.jo","","","","","","","","","","","","","","","","","","","","","","chimkent.su","","","","","","","","","","","","","","","","","","","","","","naroy.no","","","","","sa.it","commune.am","","","","blogspot.no","","","","","","","","","arkhangelsk.su","blogdns.com","","","","","","","","","","","","","","","","","com.vc","","elverum.no","","","","","","","","","","holt\303\245len.no","","","","","","sykkylven.no","","","","","boy.jp","","","","","","","","cloudfront.net","journalist.aero","","","","","","","","","","","","","","","","","","msk.ru","w.se","","","","","","","","sci.eg","","","","","","edu.vc","","","","","","","","","","","","","but.jp","","","","","unj\303\241rga.no","","","","","","","","","","","kharkov.ua","gob.do","","","","","","","","giske.no","","","","","","","","me.ss","","","","net.vn","yolasite.com","","","","","","","","","","","","at-band-camp.net","","","","","b.bg","","","","","","sld.do","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.com","","","","cuneo.it","sos.pl","","","","","","","","","","akamaiedge.net","","","","","","","","","","","","","","","","","","","","","","","","","","assur.bj","skierva.no","","","","","","","gloppen.no","","","","","sn\303\245ase.no","","","","","","","","","","fyresdal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","br\303\270nn\303\270ysund.no","","","","","klepp.no","","","","","","","does-it.net","","","","","","","","","","","","mo\303\245reke.no","","","","","","","","","","","","","kapsi.fi","","","","","","","","","","","","","","sport.hu","","","","","","","","","","","","","","","","","","","","","","","","g.bg","","","","","","aurskog-holand.no","","","","","","","","","","","","kinghost.net","","workers.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","","amusement.aero","","","","","","","","","","","","blogdns.net","","","","","","","k.bg","","","","alstahaug.no","","","","","","","","","","","","","","","","s\303\270r-fron.no","","","","","","","","","","","","","","","","","","","","","","","","sel.no","","","","","","","","","","","","","","","","","","","","","bielawa.pl","","","","","net.vc","","","","de.us","","","","","","","","","","","","","","fermo.it","","","","","","","","","","","","","","","","","","","","","","","","","ybo.trade","","","","","","","","","","","","","","","","","","","","","sogne.no","","","","","","","","","","","","","","","","codeberg.page","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","barsy.mobi","","","","","caserta.it","","","","","","","","gunma.jp","mo.us","","","","gob.es","","","","","des.br","md.us","","","","kochi.jp","","","","","","","","","","cistron.nl","","","","","ardal.no","","","","","","","","","","","","","","","","","","","","girly.jp","","ddnss.de","gov.fj","","gob.cl","","","sunndal.no","","","","","","","","","","","","","","","","","","","","krym.ua","","","","","","","","","","caltanissetta.it","","","barletta-trani-andria.it","","","","","","","","","","edu.za","","","","","","","","","","","","","","","","","","","","","","","","","me.us","","","","","","","","","","","","","","soundcast.me","","mus.br","nerdpol.ovh","","","","nalchik.ru","","","","","","","","","","konskowola.pl","","","","","","","","hi.us","","","","","","mt.us","","","","","","eurodir.ru","","","","","","","","","","","","","","no-ip.org","","","","","","karm\303\270y.no","","","","","","","","","","","","slz.br","","","","","","","","gv.ao","","","","","","","","","","kr\303\245anghke.no","kalmykia.su","","","","","","","","","","","","","nom.za","","","","","","","brindisi.it","","","notaires.km","","","","","","","","","","","","","","carrd.co","","","","","","","","","","","","","","","","","","","","","","","","","","","","sondrio.it","","","gouv.ht","","","","","","","","sauda.no","","","","","","","awsglobalaccelerator.com","","goupile.fr","","","","","","","","","","","","mar.it","","","biz.gl","","","","","","","","","vet.br","","","g12.br","nordreisa.no","","","","","","","","","","","","","","skedsmokorset.no","","","","","","gran.no","","","","","","","","","","","","com.dz","","","","","akamaihd-staging.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cloudns.us","","","","","","","","","","folldal.no","air-surveillance.aero","","","","","edu.dz","","","","","","","","","","","","","","bajddar.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","mol.it","","","","","","naklo.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","net.za","\304\215\303\241hcesuolo.no","","","2ix.at","","frosinone.it","","","","","","waw.pl","","","","","","","","","","","","","","","","","co.tz","","","","","co.nz","","","","","","","x0.to","","","co.cz","","","birkenes.no","","","","mat.br","","","","","","","","","","","","sakuratan.com","","","","","","","","","","servesarcasm.com","","","","","","","","","fla.no","","","","","","","","","","x0.com","blogspot.sk","","","hoylandet.no","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.ie","","","","","","","","","","","","","","","","","e4.cz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","crotone.it","","","kyiv.ua","","","","","","","","","","","","web.bo","","","kiev.ua","","","","","","","","","","hm.no","","","","bmoattachments.org","","sch.ss","co.sz","","","sogndal.no","","","","","blogspot.it","","","","","","eero-stage.online","","","","","hu.com","","","","blogspot.se","","","","","","","","","","","","","","","","","","","","","","","arts.ve","skierv\303\241.no","web.id","","","","","","","","gaivuotna.no","","blogspot.am","","ven.it","","","","","gov.vn","","","","","","","","","","","","","","","","","hk.com","","","","","","","","blackbaudcdn.net","","","","","web.co","","dy.fi","","","","","","","","","","","","","","","","","","","","","","","","","","baria-vungtau.vn","","kirkenes.no","","","","dr\303\270bak.no","kafjord.no","","","net.dz","","","","","","","","","","\347\275\221\347\273\234.cn","","","","","","","","","","","","ne.tz","spdns.eu","","","","","","","","","","","cloudns.eu","","","","","","bip.sh","","","","","","","","","","\347\266\262\347\265\241.cn","","","","","","","","","","","","cloudns.pw","","","","","","","","","","","","","","","","","","","","","","","blogspot.gr","","","","","\345\205\254\345\217\270.cn","coop.tt","","faststacks.net","","art.dz","","","","kicks-ass.org","","","","akamaiorigin.net","","","selbu.no","","","","","","","h\303\245.no","","","","hs.kr","","","","","","","","","","","","","","","","","","","","","","","","dr.na","","","","blogspot.ae","","","","","","","","","","","","","","","","","","","government.aero","","","","","","","","","","","","","zarow.pl","","","","","","","","","","","","","com.nf","","","","","","","","","","","","bitbridge.net","","","net.nz","","","","","","","","","","","","","","","","","","","bahccavuotna.no","","dc.us","","","","","","","","","","bjerkreim.no","com.vi","","","","","","","","","","","","","","","","","dyn53.io","","","","","","","","","","sor-odal.no","aichi.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.cv","","","","","","","gov.vc","mn.us","","","","","","","","ybo.review","","","","","","","","","edgeapp.net","","","","","","","","","","","","","","","","","","","","","","","","curv.dev","","","","","k12.tr","","","","","brumunddal.no","","","","","","","","","","","","","","","","","","","","","barsy.pub","sch.sa","","","","","","","","jelastic.team","","","","","","barsy.support","","","","","","sampa.br","","","","","campinagrande.br","","","","","","","","","","","","","","kids.us","","","web.pk","","","","gob.hn","","","","","","","","","","","","","","","","","","","","","","","fjell.no","unjarga.no","","","union.aero","econo.bj","","","","","","","","","","blogspot.vn","","","cloudns.pro","","","","","","","sandnes.no","","","","","bambina.jp","","","","","","","aland.fi","","","","","","stavern.no","","","","","","","","","","","","","","","","","blogspot.lu","","co.uz","","","","","","","","","","shopselect.net","","","","cx.ua","","","","stordal.no","","","hemsedal.no","","","","","","","","","","","","","","","","","","airline.aero","","","","","","\303\241laheadju.no","","fj.cn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","flora.no","","","","","","","","","","","","","","","","","","","","","","","","selje.no","now-dns.org","","","","","","","","aivencloud.com","","","","","","","","","","","","","","serveexchange.com","","","","","","","","","","","","","","","","","","","coop.mv","","","","","snasa.no","","","","fhs.no","","","","","","","","withgoogle.com","","","sandcats.io","","","","net.nf","","","","","enscaled.sg","","go.tz","","","","","","","med.ec","","","","","","ybo.party","","","","","","","","","","","","","","","","","","","","","","","","","","","net.vi","","","","","","","","","","","","","kopervik.no","","","","","","","drr.ac","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","angry.jp","","","","","","","","","","","","","","","","","","","","","","sortland.no","","","","","","","","","","","wedeploy.me","mk.ua","","","","","","","","","","kharkiv.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","edgesuite.net","","","","","smushcdn.com","","","blogspot.lt","","co.mz","","","","","","","angiang.vn","","","chernovtsy.ua","","","","","","","","","","","","","","","","","","","","","","","grajewo.pl","","","","","","","","aero.tt","","","","","","","","","","","","","","","","","","","","","","","sorreisa.no","","","","","","","","","","balsfjord.no","","","","barsy.uk","","csx.cc","","","","","","","","","","","conf.lv","","","","","","","","","","","","","","","gov.za","","","","","hs.run","","","services.aero","","","","blogspot.ug","","","software.aero","","","","","","a\303\251roport.ci","","","","","","","","","","","","","ac.tz","","","","","ac.nz","","","","","","","","","","","","","boavista.br","jeju.kr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fukui.jp","","","","","","","","","","","","","gouv.km","","","","","","","","","kvam.no","hzc.io","","","","","","","","","","seljord.no","","","","","","","","","","","","","","","","","","","","","","","","","","","web.lk","","","","\351\271\277\345\205\220\345\263\266.jp","","","","","","","","","","","","","","","","hurum.no","","","","","nfshost.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","servegame.com","up.in","","siiites.com","ac.sz","","","","","stj\303\270rdal.no","","","sh.cn","","","net-freaks.com","","","","","","","","","","ngo.za","com.sb","","","","","","","","","berg.no","","","","vao.it","","com.bb","","","","","","","","vda.it","","","","","","","","","de.gt","","","","froya.no","","","","","","","","","","","","","","","","","","gouv.fr","","","","","no-ip.biz","","","","","","edu.sb","","","","","","","","","","","","","hornindal.no","","edu.bb","","","","","","","","","","","","","","me.ke","","","","","hotel.hu","","","","","","med.sa","gov.dz","","","","","","","","sf.no","","","","","","","","","","","","","","","","","","","","","","hk.cn","naamesjevuemie.no","","","","","","","","","","","","","","","","","mi.us","","","","","","","notodden.no","","","","aosta-valley.it","","hotel.lk","","","","","","contagem.br","","","","","","","gyeongbuk.kr","","","","","","","","","","","","","","","","","","","","","sk\303\245nland.no","","he.cn","from-wy.com","","","","","","","","","bronnoy.no","","","","","gangwon.kr","","","","","","","","biz.ni","from-ky.com","","","","","","","","","","","","","","","","","","mil.bo","","","","","","","","","","bologna.it","","","","ms.us","","","","","aero.mv","","","","","midsund.no","","","","","","","","","freesite.host","","mil.km","","","","","","","","","","","","","","","","","","","32-b.it","","","","","","forl\303\254cesena.it","woltlab-demo.com","","","","nesoddtangen.no","","","mil.id","campania.it","","","","kyoto.jp","16-b.it","","","","","vt.us","","","","","","","","","","","","2-d.jp","","","","","","","","64-b.it","","","","","","","","","","","","","","","","mil.co","","","","hoabinh.vn","","","","","","","","","","","","","","","","","","","web.tr","","","","","","","","","","","","mil.ru","","","","","","","","","","jeonnam.kr","","","","","","","","sa.au","","","","","sells-it.net","gouv.ci","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","krasnodar.su","mil.sy","","dn.ua","","","mil.my","","","","","","","","","","mil.by","","","","","","","","","","mil.rw","","","","","","blogspot.hu","","","","net.sb","","","","","mil.uy","","","","","","","","","","net.bb","","","enterprisecloud.nu","","","","","","","","","mytis.ru","","ma.us","","","","","","","gorlice.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mil.st","","","","","med.pa","","","","","mil.cy","","","","","","","","","","","","","","","","","","","","","de.com","","kontum.vn","","","","blogspot.hk","","","","","","","","","","","","aure.no","","","","","","","","","","","","","","","hemne.no","","","","","","","","","","","","","","","","","","","","","","","","floripa.br","","cloudns.in","blogspot.in","","","","","","","","","","","","","","","","","","","","","","","","","","","","","securitytactics.com","bentre.vn","","","","","","","under.jp","","","","","sytes.net","","kherson.ua","","","","","","","","","","","","","","","med.ht","","","","","","","","","","","","north-kazakhstan.su","","","","","","","","jan-mayen.no","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.sn","","","","","","","","","","","","","","","","","","","","gen.nz","","","","","","freeboxos.fr","","","","","","","","","","","","","","","","","","","chu.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","web.gu","","","sp.it","","","","","","","","","","","","","","","","","","","","","","","","","","","med.om","","","","","","","","","","","","","xii.jp","","","","","123miweb.es","","","","","","","","","","","","","","","h\303\241pmir.no","","","","","","","","","","","","","ddnslive.com","","","","","","mil.py","carrara-massa.it","","","","","","","","workisboring.com","","","","","","","","","","","","","","","","","","groundhandling.aero","","","mil.pe","","","","","","","","","salud.bo","","","","","ms.kr","","","","ac.mz","","","","","noticias.bo","","","","","","","","","","","","","","me.so","","","","","","","","","","","","","","","","","","","cleverapps.io","","","","barsycenter.com","aktyubinsk.su","","","","","","","","from.tv","","web.in","","","game-server.cc","","","","","","","","","","","","","","","","chernihiv.ua","slattum.no","","","","mil.to","","","","","","","","","","sardinia.it","","","","nome.cv","","","","","","","","","","","","","","","","mil.tm","hjelmeland.no","navuotna.no","","","","","","","","","filegear.me","","","","","","","","","","","","","","","kozow.com","wa.au","","","","gsj.bz","","","","","","","","","mil.tw","","","","","","","","","","","","","","","","","h.se","","","","","","","","","","","","","","","","","","","storj.farm","","","","","","mr.no","","","","","unusualperson.com","","","com.lb","","","","","","","","","snillfjord.no","mil.br","","","","","mil.kr","cri.nz","","","","","","","","bodo.no","","","","","","","","","","","","","","","como.it","","","","","","","","","","mo.it","","ha.no","","","","","blogspot.rs","","harstad.no","","","","","","","edu.lb","","","","","","","","hn.cn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","binhduong.vn","","","","","","","","","","","","alt.za","","","","","edgekey.net","","","","","charter.aero","","","","","","","","","","","","","","","","","biz.tj","","","","","","","","","","","","","","","","","","","","","","game-host.org","","","","","","me.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","krodsherad.no","","","","","kilatiron.com","","mt.it","","","","","","","","","","","","","","","","","","","","airkitapps.eu","","","","","","","now-dns.top","","","","","","","","","","blogspot.hr","","","","acct.pro","","","","","","","","","","","","","","","","","","","","","","","","","","nsupdate.info","","eidsvoll.no","","","","","","","","","familyds.com","","","","","","","","mod.gi","","","","","","","","","","","","","","","","","","","","","","","","","","","sch.qa","","","","","","mer\303\245ker.no","","","","","","","mil.ye","giehtavuoatna.no","","","","","","","","","","","","","biz.zm","","","","","","modum.no","","","","sorfold.no","","hammerfest.no","","","","","","","","","","","coop.ar","","","","wedeploy.io","","","","","","familyds.net","def.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","spdns.org","","","","etne.no","","","","","","","","","","","","","","","","","","","","3utilities.com","","","","","","","biz.wf","bryansk.su","","","","","","","","","","","","","","","assn.lk","","grue.no","net.lb","","","","qoto.io","","","","","","","","","","","","","","","","","","","","saotome.st","","","","","","","","","","","","","","","name.et","","","","","","","","","","","","","","","","","","","","","","","","karasjok.no","","","","","","aerobatic.aero","","","","","nic.za","","","","","","","","","","name.tt","","","","","","","","","","","","","","","","","","","","from-ny.net","student.aero","","","","","","","mcdir.ru","skanland.no","","","","","","","","","","","","","","de.md","","","media.hu","","","","","gov.sb","nore-og-uvdal.no","","","sandnessjoen.no","","","","","","","","cherkassy.ua","","","gov.bb","","","","","","","","","","busan.kr","","","","","med.pl","","simplesite.gr","","","","","","","","","","","","","mil.ae","","","","","","hl.no","","","","","","","","","","","","","","","","","","","mil.mv","","","","","","","","","","","","","","","","","","","","baclieu.vn","","","","","","","\303\270stre-toten.no","","","","","","","","","","","","","","","","","","","","","finn\303\270y.no","","","","monza.it","","","","","","","","","","","","","","","","alto-adige.it","","","","","","","k12.ec","","","","","vic.au","","scrysec.com","","","","","","","","","co.business","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mil.tr","","","","","","","","","","","","","","name.tr","","","","my-vigor.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gob.ni","","","","","","","","","","","wpmucdn.com","","","","","dynv6.net","","","","","","","","","","","uni5.net","","","no-ip.ca","naturbruksgymn.se","","","","simplesite.com","","","","","","","","","","","","","","","","","","","","wlocl.pl","","","","","","","","blogspot.sg","nx.cn","","","","","","","hol.no","","","","","","","","","buyshop.jp","","","","","","mo.cn","","","","","","","","","","","","","","academia.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","123siteweb.fr","","","","shacknet.nu","murmansk.su","","","","","","","","","","","","jpn.org","","","endoftheinternet.org","","","","","","","bss.design","","","","","","","jx.cn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fem.jp","","","","","","","","servegame.org","","","","","","","kuleuven.cloud","","","steigen.no","","","","","","","","","","","","","","","zhitomir.ua","","","","","","","","","hi.cn","","","","","","","service.one","","","","name.mv","","","","","","","","","","","","","","","sandefjord.no","","","","","","","","","","","","f.bg","","","","","dyn-ip24.de","","","","bar2.net","","","","","bar1.net","","","","","","","","","","","","","friulivenezia-giulia.it","","","","","","","","","","","vi.us","","","","","","","","quangngai.vn","","","","","","","mn.it","","","","","","","","","","","","","","","","","","familyds.org","gitpage.si","","","biz.az","","","","","","","","","","","alessandria.it","za.bz","ninhbinh.vn","","","","","","","","","","","","","","","journal.aero","","nesodden.no","","","","","","","","","","","","","conf.se","","","","","","","","","","","","","","","","","","","","","","","","samnanger.no","","","","","","","skjervoy.no","","","","","","catering.aero","","","","","","","","","","","bialystok.pl","","","","","","","","","","","","","","blogspot.pe","","","","","","","","","mil.ar","","","","","","","","","","","","","","","","","","","","","","","","","","","","sb.ua","","","","","","","","","","chiba.jp","","","","","","","","","","","mc.it","","","","","","","","","","","","","alaheadju.no","","","","","","","","","","","","","","","","","andasuolo.no","","blogspot.pt","name.mk","","","airkitapps.com","saltdal.no","","","","","","","dni.us","","","","","","","","blogspot.ca","stord.no","mil.in","","","","","","","","","","","","","","","","","","community-pro.de","servehttp.com","","","","","","","","","","","ha.cn","","","","","","","","","","stavanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","","mil.cn","","","","","","","","","nis.za","mil.ge","","","","","v\303\245gs\303\270y.no","","zapto.org","","","","","","","arts.nf","","","synology.me","","","","","","","","","","","","va.us","","","","","googleapis.com","","","","","","","","","bplaced.com","","","","","sellsyourhome.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mil.gt","","","","","","","","","","","","hk.org","","","","","","graphox.us","","","","","","for-better.biz","","","","","","","","","","","","","","","","","","","blush.jp","","","","","","","sandnessj\303\270en.no","","","storfjord.no","","","","","","santamaria.br","","","","","","","","\303\270ystre-slidre.no","","amli.no","hostedpi.com","sor-aurdal.no","community-pro.net","","","","blogspot.si","","","","","","","","","","","","","","","","","","","","","","","","sund.no","","gx.cn","","","","","","kepno.pl","","","","","","","","","","","","","","","","","","","","","","","d.se","","","asti.it","","","","ehime.jp","vn.ua","","","","","","","","","","mp.br","","","","","","","from-me.org","","","","","","","","","","dynns.com","","2ix.de","","gov.lb","","","donna.no","","","","","","","","blogspot.is","","cupcake.is","","","","","encoreapi.com","","","","","","","","blogspot.fr","","","slask.pl","","","","","","","jobs.tt","","myvnc.com","","","","","","","","","","","","","","","","","","","","","blog.gt","","","gouv.sn","","","mil.lv","ddns5.com","","","","","","","","","","","","","servepics.com","","","","","","","","","","","","ddnss.org","","curitiba.br","","","","","","","","","","bozen.it","","","","","","","","","bplaced.de","","firm.ro","","","","","club.tw","","","","","","","","sakuraweb.com","","","","","coop.ht","","","","","","","","","","","","","hokksund.no","","","","","","","","","","","gulen.no","","","","","","","","","","","","","fl\303\245.no","","","","","","","","","","","","","","","fredrikstad.no","","","","","","","","","","","","shopitsite.com","","","","","","","","","","","","bplaced.net","","","","","","","","","","","mayfirst.org","","better-than.tv","","m.se","kr\303\270dsherad.no","","cloudns.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hl.cn","","","","","","","","123hjemmeside.no","","","","soctrang.vn","","","","bashkiria.ru","","","","","ngrok-free.dev","","","","","","","","","engineer.aero","","","","","","","","","","","","","","","","","","","boyfriend.jp","","","","","xx.gl","","","","","davvenj\303\241rga.no","","","","","","","123hjemmeside.dk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","firm.dk","","","","","","","","utwente.io","","","","","","","","","","exnet.su","","","","","","","","","","","","","","","","","","","","","","","","","","","kropyvnytskyi.ua","","","","","","","","","","","","mi.it","mil.jo","","zapto.xyz","","","","","cloudapp.net","","","","","","","","vp4.me","","","","","","business.in","","gangaviika.no","","","","","","","","","","","campinas.br","","","","","","","","","","","konin.pl","","","","","","","","","","","","","","","","","","","","","frogn.no","","","","","","","","","","","","","","","","","","","","cf-ipfs.com","","","","","","","","","","","","","","","","","","ms.it","","","","","","","","","","","bihar.in","","","4lima.ch","","","","","","ve.it","","","","","","","bjugn.no","bacgiang.vn","","","","","","badaddja.no","cyon.link","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vt.it","","economia.bo","","","sejny.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","space-to-rent.com","","","","","","","","","","","","","","","songdalen.no","","","","","","","","blogspot.nl","","","","","","","","nikolaev.ua","","","","","","","","","","","","","","doomdns.com","","","","","","","","","","","","","","","","","","","","","","","","","","est-a-la-masion.com","","","mein-iserv.de","balat.no","","","","","","","","","","","","","blogspot.li","","","","","","","","","airtraffic.aero","","","","","","","","","","","","","","","","","","","","daegu.kr","","de.cool","","","","","","","","skaun.no","","","","","","florence.it","","","","","","","","fastlylb.net","","","myasustor.com","","","","","netgamers.jp","","","","","","","","","","","","","","","","","","","","","skiptvet.no","","","","","","est-a-la-maison.com","","","sch.ng","","","","","","","","","","","","","","","","","","","","","k12.il","","","","","niigata.jp","","","blogspot.cl","","est-mon-blogueur.com","","","","","","","","steinkjer.no","","","quangbinh.vn","","","","","akrehamn.no","","","","","","","","","","","","","","","vr.it","mil.mg","","","","","","","","","","","","","","","mil.kg","","","","floro.no","khmelnitskiy.ua","","","","\347\275\221\347\273\234.hk","","","","","\344\270\252\344\272\272.hk","","","","","","","","","bjarkoy.no","\347\266\262\347\273\234.hk","","","dedibox.fr","","spb.su","","","","shiftedit.io","\347\275\221\347\265\241.hk","","","","","\346\224\277\345\272\234.hk","","","","","firm.co","","","","","\347\266\262\347\265\241.hk","","","","","","","","flatanger.no","mosj\303\270en.no","","","","","","","","","","","","","","","chernivtsi.ua","","","","","","","","","","","","","","freemyip.com","","\345\205\254\345\217\270.hk","","","s.bg","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","noip.us","","","","","","","","","","ddnsgeek.com","","sand\303\270y.no","","herokussl.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","spb.ru","servebbs.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sassari.it","","","","hyllestad.no","","","","","","","","","","","","","","","","","chernigov.ua","","","","","","","","","","","","media.aero","","","","fylkesbibl.no","","","","","","","servebbs.net","","","","","","coolblog.jp","","","","","","meinforum.net","","","","","cloudcontrolapp.com","","","","","","forli-cesena.it","air-traffic-control.aero","","","","","vv.it","","","","","","","","","","","","","","","","","","coop.mw","","","mayfirst.info","","","","","g\303\241ivuotna.no","","gliding.aero","serveftp.com","","","","","","","","","","","","mil.ac","","","","","","","","","bari.it","","","","","","","","","","","","","","","","skr.jp","","","","","","","cloudycluster.net","","","","","","","","","alwaysdata.net","","","","","","","","","","","","","","","","","","","stat.no","","","","","","","goiania.br","","","","","","","","","","","","","","","","serveftp.net","","","","","","","","","","","","","square7.de","","","","","","","","","ulsan.kr","","","","","","","jele.cloud","","","","","","","","","","gj\303\270vik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","servep2p.com","","","","","","","","","","","","","","messina.it","","","","","","","","","","","","","","","","","","","","","knx-server.net","","","","square7.net","","","","","","","","","","","","","","","","","","","","","","","","cyon.site","","","","","","","","","","","","","","","","","","","","yenbai.vn","","","","","","","","","","","","","","spacekit.io","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","web.do","","","","","","x443.pw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","stjordal.no","","","","v\303\241rgg\303\241t.no","","","","","","","stranda.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","for-more.biz","","","","","","","","","","","","","","","","","","","","","h\303\270nefoss.no","","","","","","","","","","","w.bg","","","","","","","casacam.net","","","","","","","","","coop.rw","","hitra.no","","","","","","groks-this.info","","","","","coop.km","","","","","","","","","","","","co.education","","","","","","","","","","","","","","mytuleap.com","","","","","","","","from.hr","","","","","","mil.ec","","","","","","","","","","","","","","vc.it","","","","","","","","","","","drammen.no","","","","","","","","","","","","","","","","","clerk.app","","","","","","","","","","","","","","","","","","","","","","","","","dyndns1.de","","","","","akamaiorigin-staging.net","","","","","","","hof.no","","","","","","","","","","","","","","","","","","","","","","","","","barlettatraniandria.it","","biz.fj","","","jele.io","","","","","","","","","","","","","","","","","","","","","","","","","","","and\303\270y.no","","","cloudcontrolled.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","s\303\270r-varanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gyeonggi.kr","","notaires.fr","","","","","","","","","","","","","","stor-elvdal.no","","","khanhhoa.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","balashov.su","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","kongsvinger.no","","","","servebbs.org","","","","","","","","carbonia-iglesias.it","","","","","","","","","","","","","","","","","","meraker.no","","","","","","","calabria.it","","","blogspot.al","","","","","","","","","","framercanvas.com","","","","","","","","","","","","","name.na","myactivedirectory.com","","","","","","","","","","","","","","","","","","","nome.pt","","","","","","","","","girlfriend.jp","bashkiria.su","","","","","","","","","","","","","","","hanoi.vn","","","","","","","","","","","","","altoadige.it","","","boutir.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","serveftp.org","","","","","ashgabad.su","","","","","","","channelsdvr.net","","synology-diskstation.de","","","","","","","","","","","","","","","","","","","","","","","","","","accesscam.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","golffan.us","","","marnardal.no","squares.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gob.ve","","","siteleaf.net","id.ly","greater.jp","","","","","","","","","","","arna.no","","","","","","","","","","","","airkitapps-au.com","","","","","","","","","","","","","bulsan.it","zachpomor.pl","","","mil.ba","","","","","","","","","","","byen.site","","","","","","","dovre.no","","","","","","","","","","","","","","dp.ua","","","","","","","","","ecologia.bo","cloudns.info","","","","","","","","","","noip.me","vik.no","","","","","","","","","","","","ind.kw","","","","","","","","","","","","","","","","","","","","","","va.no","","","","","","","","","","","","","","","","","","","","","","","","","","","dnsup.net","for-our.info","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","int.bo","","","","","","","","","drangedal.no","","","","","","","","univ.sn","","","","","","","","","","","","","","","canva-apps.com","","","ascolipiceno.it","","","","","","","","","","agrigento.it","","","","","","","mycloud.by","","","","","","","","int.mw","biella.it","","","","","","","biz.vn","","","catfood.jp","","","","","","","","","","","","","cloudns.club","","","","","","","","","","","","","","2ix.ch","vi.it","","","feste-ip.net","","","","","12hp.at","","","","","","","","int.co","","","","","","","","","","","","","certification.aero","","","","","","","","","","","","","","","delhi.in","","","","","","","int.ru","damnserver.com","","","","","","","","","","","","campidanomedio.it","","","","","","","","","","","","","","","","","","","","","","mil.eg","chips.jp","","","","","","andriatranibarletta.it","","","","","","","","fuoisku.no","","","","vs.it","cahcesuolo.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","servehalflife.com","","","","fukuoka.jp","","","","","","","","","","","","","","","","","","","bialowieza.pl","","","","","ballooning.aero","","","","","","bearalvahki.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","molde.no","","","","","","","","","noho.st","","","","blogspot.be","","","","","","","","","","","","","","","","","","","","","","sch.zm","","","","","idv.tw","","","","","","","","","","","","","","","","","","","malselv.no","","","","","","","","","","","","","","","frei.no","","","","","","","","","","","","","","","halsa.no","","","","","","","","","","","","","mydatto.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","moareke.no","","","airport.aero","","","","va.it","","","sch.wf","","altervista.org","","","","","","","","","","","","","","","","dyn-berlin.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","staba.jp","","","","","","","ngrok.pizza","","","","","","","","","","","","","homeftp.net","","","","","","","","","","","","","","","","","","","","","koobin.events","","","","","","","","","ask\303\270y.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cloud.goog","","","","","","","","","","mil.do","","","cocotte.jp","","","","","","","","","","","","sardegna.it","","","","","","serveblog.net","","","","","","","","name.hr","dnsupdater.de","","","divttasvuotna.no","","","","","","","","","","","","","blogspot.ch","","","","","","","","","","","","","","","","goip.de","","","","","","","","","","","","","","","","","","vennesla.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","svelvik.no","my.id","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ind.br","","","","","","","","","","","","","","","","","","","","","","","","","int.pt","","","","","","","","","","","","","","","","","","","","","bulsan-s\303\274dtirol.it","","","","","grozny.ru","","","","mil.no","","","","","","","","","","","","","","","","","","","","","","mydatto.net","kv\303\246nangen.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sveio.no","","","","","","\345\214\227\346\265\267\351\201\223.jp","","","","","","","","","","","mcpre.ru","ullensvang.no","","","","","","","","sicilia.it","","","","","","","","","","","","","","","blogspot.fi","","gyeongnam.kr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vikna.no","","","","","","","","","","","","","","","","","","","","","","","","","","vestnes.no","","","","","","zgorzelec.pl","","","","","","","","","","","","","","siracusa.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ddnsfree.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","stryn.no","","","","","","","","","","","","","","","sklep.pl","","","int.tt","","","","","","","","","","","","","","","","","","","","","","sch.tf","","","","","","","firm.ht","","canva-apps.cn","","","","accident-investigation.aero","","","dagestan.ru","","","","","","","","","","","","","","","","","granvin.no","","","","","","","","","","mil.cl","","","","","","","","","","","b\303\270mlo.no","","","","","","","","","","","","","","","","","","","","","","","","","s\303\270ndre-land.no","","","","","","","","","","","","","","","","","","","navigation.aero","b\303\246rum.no","","","mi.th","","int.lk","","","","","flynnhosting.net","","merseine.nu","saitama.jp","","","","","","","","","","","","","","","","","","","","","","","moonscale.net","","","cosenza.it","friulivgiulia.it","","","","yalta.ua","","","","","","","","","small-web.org","","","wpmudev.host","","","","","","","","","","","","","","","","","salvador.br","","","","","","","","m\303\245s\303\270y.no","basilicata.it","","","","","","","","","","","","","","","","","","","","","","","","","stathelle.no","","","","","","sanok.pl","","","","","","","2038.io","","","","","\303\241lt\303\241.no","mytabit.com","","","its.me","","","","","","","","","","","","","","","","","","","","","","","dep.no","","","enna.it","","","name.tj","","","","","","","","","","syncloud.it","","","","","","","","soc.dz","","","nesseby.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","konyvelo.hu","","quicksytes.com","","filegear-de.me","","","","","","","","","","","","","","","","krasnik.pl","","narvik.no","","","","","","","","","","","","","kuron.jp","wellbeingzone.eu","","","","","","","","","","","","","","","","","","","","","","","","mil.pl","","","","nedre-eiker.no","","alta.no","","","","","","","","","","","","","","","","","","","","","avocat.pro","","","","","","","","","","","","","","","","","","s\303\270rreisa.no","","","","","","","","","blogspot.qa","","","","","","","","bozen-sudtirol.it","vladimir.ru","","int.mv","","","","","","","","","","","","","","","","","","","sc.tz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","streamlitapp.com","","","","","","","","","","ddnsking.com","","holtalen.no","","","","","name.jo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","simplesite.pl","","","int.cv","","","","","","","","","","","","","","","","bulsan-suedtirol.it","","","","","","forlicesena.it","","","","","","","","","","","","","schokokeks.net","","","","id.us","vgs.no","","","appspacehosted.com","","","","","","","","","","","","","","","","","bib.ve","","","","","","","","","","","","","","","","","","","","","","catania.it","","","","","","","","","avocat.fr","12hp.de","","","","ybo.faith","","","","dscloud.me","","","","","","","gamvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","skydiving.aero","","","","","","","","","","","gujarat.in","","","","","","","","","","","","firm.ve","","","","","","","","","","","","","","","","vercelli.it","","","","sex.hu","","kongsberg.no","","","","","","","","","federation.aero","","","","","","cantho.vn","","","","","","","","voorloper.cloud","","","","","","","","","","","","","","","","","","","","","","","","","","","blogdns.org","","","","","","","","","in.net","","","freebox-os.fr","","","","","","","","","","","","","","","","","","","","","","","","","","uzhgorod.ua","","","","","","","","","dielddanuorri.no","","","mintere.site","equipment.aero","","\303\270vre-eiker.no","","","","","","","","bolivia.bo","","","","","","","","","skjerv\303\270y.no","easypanel.host","","","volda.no","salerno.it","","","","","","","","","","","","","","","","","","","","","","","","","","in.ni","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","odo.br","","","","me.in","","","","hs.zone","","","","","","","","","","","","","","","valleeaoste.it","","","","","","","","\303\270rsta.no","","","","","gialai.vn","","gorizia.it","","","app.gp","","","","mil.qa","campobasso.it","","","","mo-siemens.io","","","","","","","","","","","","","","","","hungyen.vn","","","","","","","","","","","","","cloudjiffy.net","","","dr.in","","","cheap.jp","","","","","","","","","","","","","","","","","","ind.in","","","","","","","","","","","","coop.in","","","","","","","","","","","","","","freebox-os.com","","","","","","","","","","","","azure-mobile.net","","","","","","","","or.cr","","","ngrok.app","","","","","vinnica.ua","","binhphuoc.vn","int.ar","","","guovdageaidnu.no","","","","","","","","","","dienbien.vn","","","mydobiss.com","","","","","","","","","","","","","","","","","","","","","","nowaruda.pl","","","","","","","","","","","","","abkhazia.su","","","","","cloudns.biz","","","","","","andriabarlettatrani.it","","","","","","","knowsitall.info","","","neko.am","kurgan.su","","","","","","","","","","","","","","","","","","","","","","","","","kl\303\246bu.no","","","","","int.in","","","","","","","ind.gt","","","","","","","","","","","","","","","","","","","asso.ht","","","carraramassa.it","","","","jambyl.su","","","","","","","","name.pr","","","","","","","balsan.it","","","","","","","authgearapps.com","","mil.al","","","","","mil.hn","","","","","","","emergency.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","geometre-expert.fr","","","","","","","","","","","","myspreadshop.no","","","","","","","","","","","","","","","","","","","","","","game.tw","","","","myspreadshop.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","jeez.jp","","kaas.gg","","","","","","","","","","","","","","","","","","","clickrising.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mcdir.me","","","","","","","","","","","","","","","","","","","","","dyroy.no","","","","","","","","","","myspreadshop.net","","","","","","","","","","","","myspreadshop.dk","","","","","","grozny.su","","","","","","","","","","","ciscofreak.com","","auto.pl","com.mx","","","","","","","","","","","","","","","","","","","","","mb.ca","","","","sumy.ua","","","","","","","","","","","","","","","","","","","","","","myspreadshop.de","","","","","","","","","","blog.kg","","oy.lc","","","","","","edu.mx","","","","","","","","","","","","","","hjartdal.no","musician.io","","","","","","","","","","","","myspreadshop.se","","","","bievat.no","","","asso.re","","","","","","","","","","","","","","","","","","","","or.mu","","","sunnyday.jp","","","","","","","","","","vaksdal.no","mordovia.ru","basicserver.io","","","","","","","","","","","","","architectes.bj","","wmcloud.org","","","","","","","","","","","","","","","","own.pm","","","","","","ullensaker.no","","","","","","","","","","","","","","","","","","","1337.pictures","","","","","","","","","","","","","","","","","180r.com","","","","","","","","","","","","","","","","","","","","","","","","","","hanam.vn","h.bg","","","","","","","","","myftp.org","in.us","","nesset.no","","","coop.br","","","","","","","","","","","","","","","","","","","","","","servebeer.com","","","","","","","","","ind.tn","gentlentapis.com","","","","","","","","","","","","","","","","","","","","","","","","skien.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","honefoss.no","","","","","","","","","dagestan.su","","","","","","","","","","","","","","","","","","","","","","","","","","","","","haiduong.vn","","","","","","","","","beagleboard.io","","","campidano-medio.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","12hp.ch","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","h\303\241mm\303\241rfeasta.no","","","biz.bb","","","","","","","","","fashionstore.jp","","","","","","","","","","","","","divtasvuodna.no","dynserv.org","","","","","","","","","","","","net.mx","","","","","","","","","","","","","","","","","","","name.eg","","","","","","","backan.vn","","","","","","","","","","","","","","","","","","aus.basketball","","","","","","","","","","","","","","","","","","","","vologda.su","","","","","","","","","","","","","bolzano-altoadige.it","","","","\303\245rdal.no","","","","","","","","","","","","","m\303\241latvuopmi.no","","","zhytomyr.ua","","","","evenes.no","","","","","","","","","","","movimiento.bo","friulive-giulia.it","","","heavy.jp","vf.no","","her.jp","","","","","","","","","","","","","","","supabase.co","","","","","!www.ck","","blogspot.cz","","","","","","","","bedzin.pl","","","","","","","","beep.pl","","","","","","","","","","","","","","","","","","stj\303\270rdalshalsen.no","","","","","","","","","","","","","","binhdinh.vn","","","","","","","","","","sex.pl","","","","","","","","","","","","","","","","","","","","","","balsan-s\303\274dtirol.it","","","","","","","","","","","","","","","","","","","","","","","","supabase.net","","","","","mil.sh","","","","","","","","","","","myeffect.net","","","","","","","","","org.mu","","","","","org.so","","","","","org.mo","","","","","","","","vladimir.su","","org.bo","","","","","org.sd","","","","","","","","","","","","","","","exchange.aero","","","","noor.jp","","","","","","org.bm","","","","","org.km","","","","ie.ua","","gaular.no","","","copro.uk","civilaviation.aero","","","","","","","","","","","","","","","org.mw","","","","","","","","","","org.bw","","","","","org.kw","","123website.lu","org.im","","","","","","","v.ua","","","","","","","","mosjoen.no","","","","","","","dev.vu","","","","","org.cu","org.mk","","","","endofinternet.net","","","","","org.co","utazas.hu","","","","","","","","","","arte.bo","","","","","","","","","","","","","","","org.uk","","","","","","","","","org.ru","","","","","","","","","serveminecraft.net","org.ro","","","","","","","","daejeon.kr","","","","","","","","","","virtualuser.de","","","","","","","org.cw","","skjak.no","web.ni","","","","","","co.events","","","cloud66.ws","","","","","","","","org.sy","","name.vn","","","org.my","","","","","","","","","","","","","sx.cn","","org.ky","sosnowiec.pl","","","filegear-ie.me","org.rw","","","","","org.se","","","","","org.me","","","","","org.uy","","","","","","","","","","","","","","","","","","","in.na","","","","","","","","","","","","","","blogspot.bg","varggat.no","","","","","","","","","","","","","","","","","","","","","kumamoto.jp","","","xnbay.com","","","","","","","","","","barsyonline.com","","","","","","","","blogspot.cf","","lk3.ru","gdansk.pl","org.st","","","","","org.mt","","","","","org.cy","","123website.be","","","org.bt","","","","","","homelink.one","","","","","association.aero","","","","","","boxfuse.io","","","","","","","","","","","","","","","","","edugit.io","","","","","","","","wnext.app","","","","","chambagri.fr","","","","","","","","","","","","","","","cable-modem.org","","","","","","","","","","","","","","","","","","","","","","","","","or.ci","","","mil.ph","","","","","","ltd.uk","","","gdynia.pl","","","","","","","","","","","","","","","","","","","","filegear-au.me","","","","","","","","boomla.net","","","","","","","","","drud.io","","","","","","","","homelinux.com","","futuremailing.at","","","","","","myspreadshop.be","","","","","","","","","","","","iveland.no","","","","","","ecommerce-shop.pl","","","","","","jele.host","","lom.it","cloudns.asia","","","","sor-varanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","githubpreview.dev","","","","","","","","","","","","","","","","teo.br","","asso.km","gotpantheon.com","","hb.cn","org.pk","","","","","","","","","","","","","","","","","","","","","","","","barsy.shop","","","","","","","","","eigersund.no","","","","","homelinux.net","","","","","","freeddns.org","","","","","","","","","","","","","","","ltd.cy","alpha-myqnapcloud.com","","","","","","","","free.hr","","","","","","","","","","tm.cy","","bamble.no","","","","","","","","","","wixsite.com","","","","","","","","my-router.de","","","","","","org.py","","","","aerodrome.aero","","","","","","","","","","championship.aero","","","","","","","","","","","","","","","","org.pe","","","","","","","","","","","","","","","dedyn.io","","","","","zagan.pl","","","","","","","mydissent.net","","","","","","","","","","","","sells-for-less.com","","","","","asso.fr","","","","","","","","","","","","","","","name.ng","","","","","virtual-user.de","","","","","","","","kraanghke.no","","","","","","","","santoandre.br","jondal.no","","","","","","","","","org.pt","","","","","org.to","","","","","mypep.link","","","","","","","name.qa","","","","","","","","","","","","","","","","","","org.tm","","","","","verbania.it","","","","","","","","","eu.ax","","","","","","","","","","","","","","","","","","ok.us","","","","balsan-suedtirol.it","","","","org.tw","","","","ia.us","","","mein-vigor.de","","","","","","frenchkiss.jp","","","","","","","schulserver.de","","","","xy.ax","","","","","","tur.br","","","","","","","","","","","","","","","trd.br","","friuliveneziagiulia.it","","","","drud.us","","","","","","","","tv.tr","org.br","","budejju.no","","","","","","","azurewebsites.net","","","kvinnherad.no","","","","","","","","","","","","","","","","","","","","","","","","","gwangju.kr","","in.rs","","","","","","","","","org.ir","","","","","","","","dnsdojo.com","","","","","","","","","","klodzko.pl","","valledaosta.it","","","","","","","gov.sx","","","","","","","","","","","","","","","mil.ng","","","","","h\303\270yanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","mjondalen.no","","","","","","","","","","","","","","","","","","","int.ci","","","","in.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sopot.pl","","","","","","","","","","asso.ci","","\303\245snes.no","","","","","","","123paginaweb.pt","","","","","\347\266\262\350\267\257.tw","","","","","","troms\303\270.no","gov.cx","","","","ninhthuan.vn","org.tt","","","","","","","","","","namaste.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","diskstation.eu","vindafjord.no","","","oya.to","","","","","","","","","","","","","","","","","","mordovia.su","upli.io","","","gjovik.no","","","","","","int.is","","","","","","","","","","","","","","","","","","","","","","","","","d.bg","id.ir","","","","","","","","","","","","","","","","","","","","","davvesiida.no","","","","","org.ye","org.lk","","it.com","or.us","","","","web.tj","salat.no","e12.ve","","","","","moo.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","myspreadshop.fr","","","","na4u.ru","","","","","","","","","org.au","","","","","","","","","","","","","","","","hacca.jp","","","","","","","","agro.pl","","","","","","org.yt","","","","","","","","","","org.am","","store.nf","","","org.ly","","","agents.aero","","","","","","","","","","","","org.pr","wedeploy.sh","","myftp.biz","boleslawiec.pl","","","","","","","","","freedesktop.org","","","","","","","","","","shopware.store","cesenaforli.it","","dnsdojo.net","","","","","","","","","","","","","","","","","flier.jp","","","","media.pl","","","","","","khmelnytskyi.ua","","","","","","","","","","","","","","","","","","","","","wroc.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","swinoujscie.pl","il.us","","","","","","m.bg","","","","","","","","","","","","","","","","","","","","","","","","","valle.no","","","","","","","","","","","","","","firm.ng","","idv.hk","","","","","","","","","","","","","","","","","","","","ltd.lk","","","","myamaze.net","","","","","","","","","","","","","","","","","","","","org.ae","","","moss.no","im.it","","","","","","","","myspreadshop.ca","","","","","couchpotatofries.org","","","org.sv","","","","","org.mv","","","","","","","","","","","","karasjohka.no","","tm.se","ong.br","","","","","","","","","","","","","","","","","","","","","","","","asso.mc","","","","","","minisite.ms","","","","","","","","","","","","","","","","","","","be.ax","","","","","","","","","","","","","","","","","","","","","","","","","","","khakassia.su","","","","","mb.it","","","binhthuan.vn","","andebu.no","stargard.pl","","","","","","","","","club.aero","","","","","","","","","org.tr","","","","","org.cv","","","","tm.hu","","","","","","","","","","","","","","","","","","","blogspot.ba","","","","or.bi","","","","","","","sorocaba.br","","","","","","","","","","","mil.ni","","","","for-some.biz","","","","","","","avoues.fr","","","","","","","","","democracia.bo","","","","","","","","","myiphost.com","","","","","","","","","","","","","","","","","","","","","","","","","","synology-ds.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","static-access.net","","","","","","","","","","","","","","","","","sakura.tv","","","","","","","","","","","","","","","dnshome.de","","","","","","","","","","tec.br","hamar.no","","","","","","","","","","","","","","","","","","","","blog.br","","","","","blog.bo","","","","","","","","webspace.rocks","","","","","","azerbaijan.su","","","","","","","","","","","","","","tm.fr","","","","","","","","","","or.kr","","","","","","","","","","os\303\270yro.no","","agro.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","tysv\303\246r.no","monzaebrianza.it","","","","","","","mantova.it","","","","","","","","","","","","filegear-sg.me","","","","","","","","","gjerdrum.no","","","","","","sinaapp.com","","","","","","","","","","","","","","","or.na","org.ee","","","","","","","","","","","","","","","org.lr","","","","","","","","","","","","","","","","","","","wafflecell.com","akamai.net","","","","","","","","video.hu","","","","","","","","","","","","","univ.bj","","","","","","","","blogsite.xyz","","","","","tm.mc","","","","shop.ro","","","news.hu","","","","","baidar.no","","","","","","","","","org.et","","","","","","","","","hopto.me","","","","","","mil.gh","","","","","","","","","","meloy.no","","","","","org.gu","","","wroclaw.pl","","","haram.no","","","","","","","","","moskenes.no","","","","","","","myspreadshop.nl","tos.it","","","","edgekey-staging.net","","","","","vald-aosta.it","","","","","","","","","","","","","","","","","","tur.ar","","v\303\246r\303\270y.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.ar","","","myds.me","","","","","","","\347\256\207\344\272\272.hk","","","","tv.br","","","","","","","","","","","","","","","","","vevelstad.no","","","","","","","","","org.sn","","","forl\303\254-cesena.it","","org.mn","","","","","","","","","","org.bn","","","","","org.kn","","zakopane.pl","","","","","","","","","","","","","","ebiz.tw","","","","","","","","","","","","","","","","","","","","","","","","","","","org.in","","","","","","","","","firm.in","","","","","","","","","","","","","inf.cu","inf.mk","","","","nobushi.jp","","","","","cesenaforl\303\254.it","","","","","","","","","io.kg","","","","","","org.gy","","","","od.ua","","","","","","","","","","firm.nf","","","","","","org.cn","","","","","","","","","","org.ge","","","","","","","","","","","hotel.tz","","","","","","","","","","","","","","","","","","","","","","","","","","mediatech.dev","","","","","","","dyn-o-saur.com","","","","","","","","","","","","","","","io.vn","","","","","modalen.no","","","","","id.vn","","","","","","","","","","","","sells-for-u.com","","i.ng","","","","","","","org.gt","","","vadso.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","beskidy.pl","","","","","","","","","","","","","draydns.de","","","","","","","","","","","","","","stuff-4-sale.us","","","","","","","","","mil.mz","","","","","cnpy.gdn","for-the.biz","","","","bulsan-sudtirol.it","","","","","mil.kz","","","","base.ec","","","","","","","","","","","","","","","","","","","","aejrie.no","","","","","","","","","","","","nord-aurdal.no","","","","","","","","","nz.basketball","","","","","","","","","","org.sc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tm.km","","","","","","","","","","","","","","","","","","","","","","fusa.no","","","","","","","","","","","","","","","chungbuk.kr","","","","","","","","","","","","","","","","","store.bb","homelinux.org","","","","","","","","","","on.ca","org.lv","","","","","org.pn","dray-dns.de","","upper.jp","","","","","","","","","","","","","","","","asso.nc","","naples.it","","","","","conf.au","","","","","","","","","","","","","tc.br","","","","","","","","bronnoysund.no","","shiftcrypto.dev","int.la","stufftoread.com","","","","","","","","","","","","katowice.pl","","","","","","","","","","unicloud.pl","","","","","","","","","masfjorden.no","","","","","","","","mil.tj","","","","","log.br","","medecin.fr","","","","","","","","","","","","","","","","","","","fakefur.jp","","kaluga.su","","","","","","","","","","","","","","verse.jp","","","","","donetsk.ua","","","hopto.org","","","","","","","","","","","","","","","","","","","","","","","","freeddns.us","","","","","","","buyshouses.net","","vardo.no","","","","","","","","","","","","","","","","","","","","","","","fuel.aero","","","","","","","","","","","","novara.it","","","","","","","","","","","or.ug","leg.br","","","","","","","","","","","","hicam.net","","","","","","","","","","","daknong.vn","","","","","","supabase.in","","","","","mil.iq","","","","","","","","","","","","","","","","","","","","","","","myspreadshop.fi","","","","ddns.me","","broker.aero","","","","","","","","","gotdns.org","","","","","","","","","","","mil.zm","","","","","hostyhosting.io","","","","","","","","","","","","","","","","","worse-than.tv","","","","","","","","","","","","","","","","","","mil.zw","","org.tn","","","","","org.gr","","","","arezzo.it","","","akamaiedge-staging.net","","","","","","","","","","","miniserver.com","","","","","","","","","","","","","","","dsmynas.com","es.ax","","","","","fool.jp","","","","","","","","","","","org.jo","fauske.no","","","","","","vpnplus.to","","","","","","","","","","","","","","","","","","","","","","uber.space","","","","","","","","","","","inf.br","","","","","","","","","","","inc.hk","","","","","","","","us.ax","","","","","","","","","","","","","","","","","","","","","","","","","","","here-for-more.info","","","","","","","","","","","","","","","alvdal.no","","","","","","","","","","","","","or.ke","cat.ax","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","medecin.km","","","","mx.na","","","","virtualserver.io","","","","","","","","h\303\244kkinen.fi","","","","","","","","","","","","","","","","","","","","","","","bergen.no","","","","","","","","dnsiskinky.com","","","","","","","","myspreadshop.ch","","ham-radio-op.net","","cutegirl.jp","","","","","","","","","","","","","","","","","","friulivegiulia.it","","","","","","mil.tz","","","","","endofinternet.org","dynalias.com","","","","org.je","","","","","","","","","","","","","","","","","me.tz","","","dreamhosters.com","","","","","","","","","","","","","","","","bindal.no","","","","","","","","swidnik.pl","","","","","","","","","","","is-leet.com","","","","","web.ve","","","","","","","","","","611.to","","","","","bookonline.app","","","","","","","","","","","","","","","","dynalias.net","","","","","","","","","","","s\303\241lat.no","akamai-staging.net","","","","","i.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dsmynas.net","","","","","","gg.ax","","","","","","macerata.it","","vaapste.no","","","","","","","","","","","tsk.tr","","mie.jp","","","","","","","","","","","","","","","","","","","","","","","","","org.sg","","","","","org.mg","","azurestaticapps.net","","","","","","","","","","","","","org.kg","","","","","","","","","","","","","","","","","","","","org.ug","","","","","terni.it","","","","","","","","","","","","","","","hammarfeasta.no","","","","","","","","","name.fj","v.bg","","","","iz.hr","","","","","","","","","","","forgeblocks.com","","","","","","","","schulplattform.de","","","square7.ch","","","","","","","","","","kikirara.jp","","","","masoy.no","","","","","inder\303\270y.no","","","","","","","","","noticeable.news","","","","","tn.us","","","","","","","","mircloud.ru","","instantcloud.cn","","","","","","","","","","","ing.pa","guam.gu","","","","","","","voss.no","","","","veg\303\245rshei.no","","","","","","","","","","","","","","","","","","","","","","","ath.cx","","","","","","","","","","","","","","","","","","","averoy.no","org.lc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fldrv.com","","","is.it","","","","","shiga.jp","","omg.lol","","hagiang.vn","","","","","","","","aseral.no","","","","","","","","","","vb.it","","","hobby-site.com","","","","","","","","","","","","","","","herokuapp.com","","","valleaosta.it","","","","","","","","","","","","","","","","","","mil.az","","","","mysecuritycamera.com","","","","","","","","","","","","","","","","ts.net","kill.jp","","","","tonsberg.no","","","","","","","workinggroup.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","deporte.bo","","","","","","","","org.ac","","","","","","","","","","","skygearapp.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","misconfused.org","","","","mypsx.net","","","","","","","","","varoy.no","law.pro","","","mysecuritycamera.net","","","","","","","","","","","","","","","","","","","","loab\303\241t.no","","","","","","","","","","","","","","","","","","","","","","","","","","mydrobo.com","","or.pw","","","","nghean.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sarpsborg.no","foggia.it","haugiang.vn","","","","tv.na","","","","","","","","","","","","","","","","","","","","","","","","","","","","","uh-oh.jp","ot.it","","","","","","","","","","","","","","","","","","","","sub.jp","","","","","","","","","","","","","","","","","","","","","","nohost.me","","","","","","","","","","","","","","","glitch.me","name.my","","","","","","hra.health","","","","","","streamlit.app","","","","","","akamaized.net","frosta.no","","hlx.live","","","edgesuite-staging.net","","","","","","","","","spjelkavik.no","umbria.it","","","te.ua","","","s\303\270rum.no","","","","","","","","","","","","","","dynalias.org","","","","lel.br","vps-host.net","","","","","fr\303\270ya.no","","","","","","","","","","","","","","org.fm","","","","","","","","webhosting.be","","","","diskstation.org","","","","","","appspaceusercontent.com","","","","fh-muenster.io","","","","","","","","","","","","","org.bi","","","shiftcrypto.io","","org.ki","","","","","prd.km","","ddns.net","","","","","","","","","","","bozen-s\303\274dtirol.it","123homepage.it","","","","","tm.za","","","","","","","in-butter.de","","","","","","","","","","","","","","","","","","lind\303\245s.no","","","","","","","","","","","","","","","","","","","","ftpaccess.cc","","","","","","","","","","","","","","","","","","","","","org.ec","","","","","","","","evje-og-hornnes.no","","org.gn","","","","","","","","","","org.ci","","","","","","","","lig.it","or.it","","","","","maringa.br","","","","","","","","myspreadshop.ie","","fitjar.no","tel.tr","","","","","","","","","","","","","","","","myforum.community","","","","","","","","cieszyn.pl","","","","","","","","","","","","","","","","","","myspreadshop.it","","","","","gotdns.com","","","","","","","andria-trani-barletta.it","org.ss","","","","","org.ms","","","","","","","","","","org.bs","","","","name.pm","","","","hagebostad.no","asso.gp","","","","","","","","","","","","","","","","","","","","","","","","","jellybean.jp","","","","","","","","","","","","","","org.is","","","","","","","","","","","","","","","","","","","huissier-justice.fr","","","","","","","tm.mg","","","","","aquila.it","","","","","\345\276\263\345\263\266.jp","","id.au","","","firewalledreplit.co","pro.cy","","","","\345\263\266\346\240\271.jp","","","","","\345\261\261\345\275\242.jp","org.ws","","","","\345\261\261\346\242\250.jp","","","","","\345\261\261\345\217\243.jp","","nyaa.am","","","\345\256\256\345\264\216.jp","isa.us","","","","\345\262\241\345\261\261.jp","","","\330\247\333\214\330\261\330\247\331\206.ir","","\351\263\245\345\217\226.jp","","","","khplay.nl","\345\257\214\345\261\261.jp","","","","","\347\276\244\351\246\254.jp","","","","","\345\256\256\345\237\216.jp","","","","","\347\246\217\344\272\225.jp","","","","","\347\246\217\345\263\266.jp","","","","","\346\235\261\344\272\254.jp","","","","","\351\225\267\345\264\216.jp","org.rs","","","","\346\226\260\346\275\237.jp","backplaneapp.io","blogspot.jp","vix.br","","\345\244\247\351\230\252.jp","","","or.jp","","\347\246\217\345\262\241.jp","","","","","cooperativa.bo","","","","","\345\262\220\351\230\234.jp","","","","","\344\272\254\351\203\275.jp","","","","","\345\205\265\345\272\253.jp","","","","","\351\246\231\345\267\235.jp","","","","hosp.uk","to.gt","","","","skanit.no","\347\237\263\345\267\235.jp","","","","","\351\235\231\345\262\241.jp","","","","nombre.bo","\351\253\230\347\237\245.jp","","","","","\351\235\222\346\243\256.jp","","","","","\350\214\250\345\237\216.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","valle-aosta.it","","","","money.bj","","","","","","doomdns.org","","","","","","","","","","","","","","urbinopesaro.it","","","","","","","","the.br","jele.site","","","","","","","","","","","","","","","","","","","","","","","undo.jp","","","","","\345\237\274\347\216\211.jp","","","","","","","","","","","","","","","","","","","","","","capoo.jp","flap.id","","","","est-le-patron.com","","","","","","","","lt.ua","org.ag","","","","","","","","","","","author.aero","","","\351\225\267\351\207\216.jp","","","","","","","gotdns.ch","","","","","","","","","","","","","","","","","mykolaiv.ua","massacarrara.it","","","","taa.it","","","","","","","","","","","","","","","","","","","","","","","","","","catanzaro.it","","","","","","","","","","","","","","","","","","","","tt.im","org.ps","","mil.ve","","","","nysa.pl","","","","","","","","","","","ufcfan.org","","","","","","","","","","","","","","","","","","","homesklep.pl","","","","org.sa","","","","","org.ma","","","","","","","","","","org.ba","","","","","","","","","\344\270\211\351\207\215.jp","","","","","jcloud.kz","","","","","","","","","","","org.ua","","","","oh.us","","","dynu.net","","","","","","","groks-the.info","","","","","","","mazeplay.com","fastly-edge.com","","","","","","","","balsan-sudtirol.it","","","","","","","","","","","","","","","","","","","h\303\270ylandet.no","","","","","","","","lenug.su","","","","","clerkstage.app","","","","","","","","","","","","","","","vaporcloud.io","","","","","","","","","","","","","","chieti.it","","","","","","","","","","","","","","","","","","","","","","","","","","sula.no","","","","","sola.no","","","","","","","","","","","","","","","","","","","","wloclawek.pl","","","","","homebuilt.aero","","jaworzno.pl","pro.br","","","","","","","","","","","","","","","clan.rip","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dongnai.vn","","","","","tm.no","","","","","","","","","","","","","","","","jele.club","","org.hu","","","","","","","","","","","","","","","rec.co","","","","","sellfy.store","","","","","","","","","","","","","","","","fnwk.site","","","","","","","","","","","","","","rdv.to","utsira.no","","","","","","","","","rec.ro","","","","","","","","","","ltd.ua","","","","","","","","","","","","","","","org.eg","","","","","skodje.no","","stokke.no","pmn.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.hk","","","","","","","","","pro.tt","","","","","","","","","","","","","","suwalki.pl","","","","","","","","","","","","","dynamisches-dns.de","","","","","","","","red.sv","","","","web.app","","","","","","from-az.net","bolt.hu","","","","","","","dongthap.vn","","","","","","","","","","","","","","","","","org.pa","","","","","","","","","","","","","","","","","","","","","","of.je","","","","","","","","","","","","","","dnsalias.com","","","lv.ua","","","","","","","","imb.br","","","","","","","","","","","vinhlong.vn","","","","","","","","","development.run","","","","","","","","","herad.no","","","","","","","","","","","","","","","","","","","","","","","","encr.app","","","","","","","","","","trogstad.no","","","","","","","sn\303\245sa.no","","","","","","","","tas.au","","","","tv.im","","dnsalias.net","sm\303\270la.no","","","","","press.se","","","","","","","","org.ht","","","","","","ketrzyn.pl","","","","","","","","","org.ai","","","","","","","","","","","","","","","pro.pr","","","","mysecuritycamera.org","","","","shop.ht","fastvps.site","org.ls","","","","la.us","","","","","","","","","","","","","","","namsos.no","monza-brianza.it","shop.th","","","","","","","","","kirara.st","ltd.hk","","","","","","","","","","","tr.no","","","","","","","","","","","mex.com","if.ua","tayninh.vn","","","","","","","","","","","","","","","","flor\303\270.no","","","","org.gg","","","","","","","","","flekkefjord.no","","","","","","","","","","","","","","","to.it","","","","","","","","","org.om","","","karmoy.no","","","","","","","","","","","ivanovo.su","","og.it","","","","kartuzy.pl","","org.do","","","","","","","","","","","","","nordre-land.no","","","","","","","","","","","","","knightpoint.systems","","","","org.dm","","","","barsy.club","","","","","","akadns.net","","","","","","","","","","","","","","","ru.net","","","","","","","","","fhsk.se","","","","","","","","","","","","","bounty-full.com","","","","","","","","pro.mv","","","","","","","","","te.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","o.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hepforge.org","","","","","","","","","","","","","","","","","","","","","of.by","","","","","","","","","seidat.net","","","","","","","","","","","","","","","","in.th","","","","","","","","","","","","","","pug.it","","","","","","","","","","","","","","","","cloudflare-ipfs.com","","","","","","","","","","","vanylven.no","","","daa.jp","","","","torsken.no","","","","","","","","","","hole.no","","","","","","storebase.store","","","","","","","","","","","","","","","","","","","","","","","","browsersafetymark.io","","","","","","","","","","","","","","","","","","rec.br","","","","","travinh.vn","","","","","","","","","","","","","","","","","","iki.fi","","lom.no","","","","","","","","","","","","","","","","","","","","vefsn.no","","","","","","","","","shimane.jp","","in-berlin.de","","","","","","","","","","","","","","org.la","","","","","","","","","messwithdns.com","","","","","","","","","","","","diskstation.me","zombie.jp","","","","","","","","","","","","","","","","","","","","","123webseite.at","","","","","","","","","lucca.it","","tr.it","","","","","","","eero.online","","","","","beardu.no","","","","","","","","","org.sl","","","","","org.ml","","","","","","","","","","","","","","","","blog.vu","","","","","","","","","","","","","tv.sd","","","","","","","","crap.jp","","hyogo.jp","","","","","","","","","","","mil.fj","","","","","","","k12.vi","org.il","","","","","","","","","","","","","","","","","org.es","hobol.no","","","","","","","","","","","","","","","","","","","","weblike.jp","123website.nl","","","","","","","","abruzzo.it","","","","lu.it","","","","","lo.it","","","","hattfjelldal.no","","","","","","","","","dnepropetrovsk.ua","","","","","","","","","collegefan.org","","","","","","","","","","","","","","","","","","","","","","","","","","123kotisivu.fi","","","","","","","","","","","","","","bahcavuotna.no","","","","","","","","","","easypanel.app","","","","","dnsalias.org","","","","","","","","","","","","","","","","","","to.md","tmp.br","","","","","","","","","","","","","","","","web.za","","","","","","","","","","","","","le.it","","","","","","org.gi","","","deno-staging.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lg.ua","","torproject.net","","","lt.it","","","","","","","homesecuritypc.com","","","","","","","","","","","","","","","","","","","","","","","","cloudfunctions.net","","","","tv.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ol.no","","","","","","","","","","","pro.in","","","","","","","","fosnes.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","cloudapps.digital","","","","","","","","","","","","","","flog.br","","","","","","","","","","","","","tw.cn","","","","","","","","","","","","","","","","org.pl","","","","","","","","","","","","","","homeftp.org","","","","","","","","","","qbuser.com","","","","","","","","","noop.app","","","","","","","","","","","","","","","","","","","","","","","","nhlfan.net","","","","","","","","","","","","","","","","","","ltd.gi","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","agro.bj","","","daklak.vn","","","lecce.it","","","rio.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","nflfan.org","","","szczytno.pl","","","","","","","","","","","","","","","","","crafting.xyz","","","","","","","","","","","","","","ancona.it","","","","","","","","tn.it","","","","","","org.nr","","","","","","","stange.no","","","","","","","","","","","","","simple-url.com","","","","","int.ni","","","","","fedorainfracloud.org","","","","chirurgiens-dentistes.fr","","","","","","","","","","chirurgiens-dentistes-en-france.fr","","","","","","scrapper-site.net","","","","","","","","","","","","","","","","","","","","","","home-webserver.de","","","","","","","","","","vestre-slidre.no","","press.aero","","","","","boldlygoingnowhere.org","","","","dev-myqnapcloud.com","","","","fukushima.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","myphotos.cc","","","","","","","","","","","","","wphostedmail.com","","","","","","","","friuliv-giulia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","myspreadshop.es","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sicily.it","","","","candypop.jp","","","","","","","","","","","","","","","","","vossevangen.no","","","","","","","","","","","","","","","","","","","","","","pvt.ge","","","","","","","","","","","","webthings.io","","","","","","","","","hoyanger.no","","","","","","","","","","","","","","r2.dev","","","","","","","","","","","","","","","spydeberg.no","","","","","","","","","","","","","","","","","","","","poa.br","","","","","","","","","","","","stuff-4-sale.org","","","","","","","gifu.jp","","","","blogspot.bj","","org.qa","","","","","","","","","","inf.ua","","","","","","","","","","","","szczecin.pl","","","","","","mo-i-rana.no","","","","","","","","","","","pr.us","","","","","","","","","","","","","","","","","","","","","","","","skj\303\245k.no","","","","","","","","","tv.bo","","","","","","","","","","","","tv.kg","","","mil.vc","","","","","srht.site","","","","","","","","","","","","","","","","","","","","","","","","","carboniaiglesias.it","","","","","","idf.il","","oster\303\270y.no","","","","","coop.py","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","web.nf","","","","","","","","","","","","","","","","","dnsking.ch","","","urown.cloud","vladikavkaz.ru","","","","","","","","","","","","","","","","","","","","","","","","crimea.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tm.ro","","","","","","","","orsta.no","","mymediapc.net","","","orsites.com","","mj\303\270ndalen.no","","","","","","","","","","onavstack.net","","","","","","vinnytsia.ua","","org.al","","","","","org.hn","","res.aero","","","","","","","","","","","jelenia-gora.pl","","","","","","","","","","","re.kr","","","accident-prevention.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","usercontent.jp","","","","","","","","","","","","","","fastvps.host","lombardy.it","","","","","","messerli.app","","","","","","","","","","","","","","","","","","","","","aeroclub.aero","","","","","","","lc.it","","","","","","","","","t.se","","","","","","pe.kr","","","","","","","","","","","","","","","","","","","","","","","","hamaroy.no","","","","","i.ph","","","","","","io.in","","","","","","","","","","","","","","","homedns.org","","","","wmflabs.org","","","","","","","","","","","","","","","","or.th","","","","","","","","","","","","","","","cranky.jp","","","","","","","","","edgecompute.app","","","","mircloud.us","","pe.ca","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","gniezno.pl","val-daosta.it","","swiebodzin.pl","","","","","","","","","","","","","","maori.nz","","","","","","mircloud.host","","","","","","prd.mg","","","","","","","","","","","","","","","","","","","","","","","","daynight.jp","parti.se","","","","","","","","","","cloudsite.builders","","","","","myspreadshop.pl","karaganda.su","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","forumz.info","","","","","","","","","","","","","","","","","tysfjord.no","napoli.it","","","","","","","","","per.sg","","","","","","","","valdaosta.it","discourse.team","int.tj","","","","","","","agric.za","","","","","","","l\303\270dingen.no","","","","","","","","","","","","","","","","","","","mil.za","ts.it","","","","","","","","","","","firenze.it","","","","","","","","","","","","","","","","","","","isa-geek.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","wielun.pl","","","","","","","","","","","","","","","","","duckdns.org","","","","","","","","","","","","","","benevento.it","","","","","","","","","","","","","","","","","","","","","","","","","","","isa-geek.net","","","","","","","","","","","","","","","","","","","","","","","","","","chowder.jp","","","","","","","","","","","","","","ambulance.aero","servequake.com","","","","","httpbin.org","","","","","","","","","","","","penza.su","","","","","","","","","","","","","","","","","","","","shizuoka.jp","","","","","","","","","tr\303\270gstad.no","","","ciao.jp","","","fastly-terrarium.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","l.se","ta.it","","","","","","","","","","","","","","","","","","","","","","","","ooguy.com","","","","","","","","","","","","","","test-iserv.de","","","","","","","","","","","","","","","","","","","","","r\303\270mskog.no","","psc.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ln.cn","","","","","","","","","","","","","","","","","","lg.jp","","","","","","","","org.gl","","dnipropetrovsk.ua","outsystemscloud.com","","","","","beiarn.no","","","","","","","","plc.uk","","","","","","","","","","","","","","","davvenjarga.no","","","","","","","","","","or.at","","","","saobernardo.br","","","res.in","","","","","","","","","","","","","","","","","","chicappa.jp","","","","","","","east-kazakhstan.su","","","","","","","pri.ee","","","","","","f\303\270rde.no","","","","scrapping.cc","","bar0.net","","","","","","","myspreadshop.at","","","","","","","","","","","","","","","li.it","","","","","","","","","","","","","","","","org.sh","","","","","","","","","","","","","","","org.bh","","","","","","","treviso.it","","","mil.nz","","","","","","","","","","","","","","","","","","","","","vegarshei.no","","laz.it","","","","","","","","","","","","","storipress.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","emilia-romagna.it","","","123website.ch","","","","","","","","ote.bj","","","","","","","","","","","aparecida.br","","","","","","","","","pol.tr","","","","","","","","","","ras.ru","friuli-ve-giulia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pro.ec","","","","","","","","dyr\303\270y.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","bona.jp","","","","","","","","","","","","","","","","wodzislaw.pl","","","","","","","","","casino.hu","","","","","","","","","","","","val-d-aosta.it","","","","","","","","","","","sdscloud.pl","","","","","","","","","","","","","","","","","","","","","","","","","health.vn","","","","","","","","","","","monzaedellabrianza.it","","","","","","","","","","","","","","","","","","","","","","","","","","vestre-toten.no","","","","","","","","","","","","","","","diadem.cloud","","","","","","","","","","wpdevcloud.com","dontexist.com","","","","","","","","","","webredirect.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","eating-organic.net","","homeunix.com","","","","","","","","","","","","","","","","","ri.us","","","","","","","","","","","","","","","","","","istmein.de","","","","","","yamagata.jp","","","","","","isa-geek.org","","","","","","","","","","","","","","","","","","","","","skoczow.pl","","","","","org.ph","","","","","","express.aero","","","","","","dontexist.net","","","","","","","","","","homeunix.net","","","","","","","","","filegear-gb.me","","","","","","","","ro.im","","","","","","","","","","","","","","","","","","","","","","","","","","int.az","folkebibl.no","","","","","","","","","","","","","salangen.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hlx.page","","","","","","wiki.br","","","","","wiki.bo","","","","algard.no","","","","","","","","","","","","","","sirdal.no","","","","","","","","","","africa.com","","","","","","","rv.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","psi.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hobby-site.org","","","","","","","","","vang.no","","","","","","","","","","","","","","","","","","joinville.br","","","filegear-jp.me","","","","","romskog.no","","","","","","","framer.wiki","","muni.il","","pleskns.com","","dynathome.net","","","","saves-the-whales.com","","","","","","","","dattoweb.com","","fr\303\246na.no","","","","","","","","","","","of.no","","","","","","","vladikavkaz.su","","","","","","","","","","or.id","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pa.us","","","","","","","","","","","","","","","","","","","","","","","","","","","","base.shop","","","","id.lv","","","","cloudaccess.net","","","","","","","","","","","dojin.com","","","","","","","","","","","","","","","","","pyatigorsk.ru","","","","","123webseite.de","finnoy.no","","","","","","","","","","","","","","","","","city.hu","","","","prd.fr","","","","","","","","","","","","gob.mx","","","","wolomin.pl","","","","","","","","","","","","ru.com","","","","","","","","","","","","","","","","","","","tm.pl","","","","","","","","rieti.it","","","nagasaki.jp","","","","transporte.bo","","","","","","","","","","","","","","","","","","trana.no","","","","","","","","","","","","","","internet-dns.de","org.ng","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","drayddns.com","","","museum.tt","","","","","scientist.aero","","","","","","","","","","","","","lesja.no","","plc.ly","","suldal.no","","dscloud.mobi","","","","","","","","","jogasz.hu","troitsk.su","","","","","","","","","","","","","","","","matta-varjjat.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","horten.no","","krakow.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dynvpn.de","","","","","gitlab.io","","","","","","","","","","","","","","","","","","","","","","siljan.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ascoli-piceno.it","","","","","monza-e-della-brianza.it","","","","","maintenance.aero","","","","","","","","","","","","","","","","my-gateway.de","","","fbxos.fr","","","pvh.br","","","","","","","","","","","","","","","","","homeunix.org","gliwice.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","ltd.ng","","","","","","","","","","","","","","","","","","","","","kilo.jp","","","","mangyshlak.su","","","","","","","","","","","","","","","","","","","toscana.it","","","","","camdvr.org","","","hokkaido.jp","","","pro.ht","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","muosat.no","","","","","","","","","","","","","","","","","","","","","","","","ro.it","","","","","","is-certified.com","","","","","","","","","rm.it","","","","","","","ibestad.no","","","","","","","","bitter.jp","","","","","","","","","demon.nl","","","","","","","","nara.jp","","","","","","","","","","","","","framer.media","","","","","","fastvps-server.com","","","","","","","","","","","","","","","","","","pro.om","","","","","","","","","","","pu.it","","","","fortal.br","po.it","","","","","","","","","","pd.it","","","","","","","","","","","","","firewall-gateway.com","","re.it","","apigee.io","","","","","","","","","","","","","","","","","","hosting-cluster.nl","museum.mv","","","","","","","","","half.host","","","","","","","","","","","","","","","","","","","bozen-suedtirol.it","","","","","","","","","","","","","","int.ve","","","","","vall\303\251edaoste.it","","","","","","","","","","","","milan.it","","","","","","wpenginepowered.com","","","","","","","","","","","","","","pe.it","","","","","","","firewall-gateway.de","","","","","","","","","","","","","","","","","","","","","","","","","is-saved.org","","mediatech.by","","","","","tokke.no","","","","","","pt.it","","","","","","","","firewall-gateway.net","","","komvux.se","","","","","","","","","","","","","","","","","","","","","","","","","barrell-of-knowledge.info","babymilk.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","shop.pl","","","","","","","org.ni","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","backdrop.jp","","","","","","","","indie.porn","","","","","","","","","","","","","","","","","fjaler.no","","","","","","","","","","","","","","","","","","","","","","","","","","ip6.arpa","","","","","","","","","","deno.dev","","","","","","","","","","","","","","","microlight.aero","","","museum.om","","","","","","","","","","","","","","","r\303\270st.no","","","","","","","","","","","","","","","","","sigdal.no","","","","","","","","","","","","","","","","","","","","","","halfmoon.jp","","","","","","","","","","it.ao","haiphong.vn","","","","hemnes.no","","","","","","","","","","tempurl.host","","","","","","","dnsupdate.info","","","","","","","","","","","","","","","","","","pr.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","holmestrand.no","","","","","","","","","","","","","","","","per.la","","","","","","","","github.io","","","","press.cy","","","","","","","","org.bj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","try-snowplow.com","","","","","","arvo.network","i.bg","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.gh","","","","","","memset.net","","","l\303\246rdal.no","","","haugesund.no","mypi.co","","","ownip.net","","perso.ht","","","mosvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","stjordalshalsen.no","","","","","","","","","","","","","","","","lecco.it","","","","","rep.br","","","","","rennesoy.no","","","","","","sevastopol.ua","","","","","","","","","","","","","","","","","","","","\347\245\236\345\245\210\345\267\235.jp","","","","","","","","nyanta.jp","","","","","","","","","","","","","","","","","","","","","","","","engine.aero","","","","","","","","","swidnica.pl","","","","","","","","","","","","","9guacu.br","","","","","","","","","","","","","","pv.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tv.bb","","","glogow.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dnsdojo.org","lomza.pl","","","","","","","","","","","","","","","","","","romsa.no","","hotelwithflight.com","","","","","","chillout.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.na","","","","","","","","","","","","","","","","","","","","","","","","","kazimierz-dolny.pl","","","","","","","","","","","","","","","","","","","hippy.jp","","","","","","","","","","","","","","","","","","","","","","","impertrix.com","","","","","","","","","","","","","impertrixcdn.com","","","i234.me","","","","","","","","","dontexist.org","","rn.it","","","","","","org.sz","","","","","org.mz","","","","","","","","","","org.bz","","","","rs.ba","org.kz","","","crew.aero","homesecuritymac.com","","","","","","","","","","","","","","","","org.uz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","drobak.no","","","","","","ngrok-free.app","","","","","","","pn.it","","","","","","","","","","","","","","","","","","","","","hasvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","s\303\270r-aurdal.no","","","","","","mods.jp","","","","","trentino-suedtirol.it","","","","","trentino-s\303\274dtirol.it","","","","","","","","","","","","","","","","rc.it","","","","","warmia.pl","","","","","","","","","","","","","","","","","","","trentino.it","hurdal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mail.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.tj","","","","","","","","","","","","","","pc.it","r\303\245de.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","medicina.bo","","","","","","","","","","","","","","","","","","","revista.bo","","","","","","","","","","","","","","","","","","","","","","","","","","dvrcam.info","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","handcrafted.jp","","incheon.kr","","","","","","","","","","","","","","","","","r\303\246lingen.no","","","","","","","","","","","","","","dyndns.tv","org.iq","","","","mincom.tn","","","","","","","","reg.dk","","","","","","","meteorapp.com","","","","","","","","","","r\303\241hkker\303\241vju.no","pl.ua","sandoy.no","","","","","","","","","","","","","","","","","","org.zm","","","","s\303\270mna.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.zw","","mragowo.pl","","","","","","","","","","","","","","","","","","","fantasyleague.cc","","dscloud.biz","iwate.jp","","","","","","","","mel\303\270y.no","","","","","","","","ivgu.no","","","","","","togliatti.su","","","","","","","","","","","","","","","trentinostirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentino-sued-tirol.it","","","","","trentino-s\303\274d-tirol.it","","","","","","","","","","","","selfip.org","","","","","","","","tp.it","","","","","","","","","","rg.it","","","","","","","","","","","","","","","","friuli-v-giulia.it","","","","","","","","","","","","","","","","","","","","","","","","","mutual.ar","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","schoolbus.jp","","","","","","","","egoism.jp","","","","","","","","","","","","","","","","","","","","pg.it","","","solund.no","","","","","","","hvaler.no","","","ditchyourip.com","","","","","","","l\303\241hppi.no","","","","","","","","","","pp.se","marche.it","lillesand.no","","r.se","","","","","","","","","","","","githubusercontent.com","","","","h\303\246gebostad.no","","","","","","","","","","","","","","","","","","padua.it","","","","","","","","plo.ps","dattolocal.net","","","tj.cn","","","","","","","","","","","","","","","","","kagoshima.jp","flop.jp","","","","","","","","babia-gora.pl","","","","","","","","diskussionsbereich.de","","","","hashbang.sh","","","","","","","","","viterbo.it","saogonca.br","","","","","","p.se","","","afjord.no","","","","org.kp","likescandy.com","","","","","","","","","","","","","","lakas.hu","","","","","","","","","applinzi.com","","thruhere.net","","","","molise.it","","","","","","","","","","org.pf","","","","","","","tksat.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","is-slick.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","folionetwork.site","","veterinaire.km","","","","","pp.ru","","","","oristano.it","","","","","","","","","","","","","","","","","","","","","","","","","","ri.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","int.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pi.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fedorapeople.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","kaszuby.pl","","s\303\270gne.no","","","","","","","","","","","","","","","","","","","","","mond.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tourism.tn","elblag.pl","","","","","","","","","","","","","","","","","framer.photos","","","","","","","","","","","","","","","","ilovecollege.info","malopolska.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","static.land","org.az","","","","","malbork.pl","","","","","","","","","","venice.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mobi.tt","","","","ra.it","vega.no","","","","\347\247\213\347\224\260.jp","","","","","","","","","","","massa-carrara.it","","","","","mycd.eu","","\330\247\331\212\330\261\330\247\331\206.ir","","","","","","","","","","","","","","","","","","","","","","","ppg.br","","","","","","","","","","","","","","","","","","","","","","","","","","","dyn-vpn.de","","","","","","","","","","","","","","","","","","pol.ht","","","","","","","rl.no","","pa.it","","","","","","","","","","","","","","","","rel.ht","","","","","internet.in","","","","","writesthisblog.com","","","","","","oz.au","","","","","","tychy.pl","","","","","","","","perso.tn","","","","","","","","","","","","","","","","","","andria-barletta-trani.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","parma.it","","","","","","","tv.in","","","","","deta.dev","","","","","lcube-server.de","discourse.group","","","","","","dattorelay.com","","","","","mywire.org","","","pointto.us","","","","","","","cloudaccess.host","","","","","","","","","","leadpages.co","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","qualifioapp.com","","","","","","dattolocal.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","liguria.it","","","","","","","","","","","experts-comptables.fr","","","","","","","","slupsk.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.af","","","","","","","lea\305\213gaviika.no","","","","","","desa.id","","","","","","","","","","","lelux.site","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vestby.no","","","","","","","","","aomori.jp","","","","","","","","","","","","","","b-data.io","","","","","trentinos\303\274d-tirol.it","","","","","","","","","","","","","","","dyndns.ws","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","o.bg","","","","","","","","","","vestv\303\245g\303\270y.no","","tgory.pl","dsmynas.org","","","","","","","","myshopblocks.com","","","senasa.ar","","","","","","","","","","","","","","trentinosued-tirol.it","starachowice.pl","","","","osen.no","","","","","","discordsez.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","selfip.net","","","","","","","","","","","","","","","","","","","","","","","","","","vall\303\251e-d-aoste.it","","","vads\303\270.no","","","","","","","","","","","","","","sweetpepper.org","","","","","","","","","","","","","","","","","","","oyer.no","","","","","","","","","","","","","tselinograd.su","","","","","selfip.com","","","yamaguchi.jp","","","","","","","","","","","","","","","","","","","","","","","vxl.sh","","","","","","","vipsinaapp.com","","","","","","","","","dellogliastra.it","","","","org.vu","","","","","","","","","","","","","","","","","","luroy.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","porsangu.no","","","","","","dyndns-remote.com","","","","","","","","","","","","","","","","","","","","","","","","perso.sn","","","","","okinawa.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sondre-land.no","","","appudo.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tjome.no","","","","","","","","","","","tiengiang.vn","","","","","","","","","veneto.it","","","","","","","","","","","","","","","","","","","","","","","","","","film.hu","yandexcloud.net","","","","","","","","","","","","","","","org.ve","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","langevag.no","","","","","","","","","","","","lib.ee","","","","","","servehumour.com","","vard\303\270.no","","","","","","trentinosudtirol.it","","","","","","","","","","","","","","cuiaba.br","","","","","","","","","","","","","","","","","","iglesiascarbonia.it","","","","","","","","","vagan.no","","","","","","","","","","","","pescara.it","","","","","","","","","","","","","","","","","","","","","rel.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","modelling.aero","","principe.st","","","","","","","","","","","","","","","","","","","","","","","","isernia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","balena-devices.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","selfip.info","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hadsel.no","","","","","","","","","","","","","","","","","","","","","","","","","","dynamic-dns.info","","","betainabox.com","","","","","","","","","","","","","","","","lyngdal.no","","barrel-of-knowledge.info","","","","","","","","","","","","","","l\303\270renskog.no","","","","","","","","","","tolga.no","","vall\303\251e-aoste.it","","","","","","","","","","","","","","","inderoy.no","","","","","","","","","","","","","","","","","","","","","genova.it","","","","","","","","","","","","","","","","","","","","","","","","","","","blogspot.mx","","","","","","","","","","","","","","","","","","","","","","","","","","","opole.pl","","","","","","","","","","","","","","","","","","","","","","","","malvik.no","mediocampidano.it","","","","","","","","","","","","valleedaoste.it","","","tran\303\270y.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.gp","","prequalifyme.today","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tickets.io","","","","","","","","","","","","","stripper.jp","","","","","","","","","","","","","","","","fireweb.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","supersale.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","turek.pl","","","","","","","","","","","kalisz.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lund.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","czeladz.pl","","","ovre-eiker.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-student.com","","","","","","","","","","","","","","","","","","","","","","pp.ua","","","","","","rauma.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tra.kp","","","","","","","","","","","","","","","","","tec.ve","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","varese.it","","szex.hu","","","","","","","","","","","","","","","","","og.ao","","","","","","","","","","","","","","","wegrow.pl","","","","","","","","","","","","","","","","","","","","health.nz","","","","","","","","","r\303\270yrvik.no","yamanashi.jp","","","","","","","","","","","","","","","","","","","","","iwi.nz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","matera.it","","","","mozilla-iot.org","","","","","","","","","linkyard.cloud","","","","","piedmont.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vall\303\251eaoste.it","","","","","","","","turin.it","","","","","","","","reservd.com","","","","","","","","","","","","","","","","","venezia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","v\303\245g\303\245.no","","temp-dns.com","","lindesnes.no","lukow.pl","","","","","","","","tashkent.su","","","","","","","","","","","vicenza.it","","","","","","","","","","","","","r\303\245holt.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lpusercontent.com","","","","t.bg","","","","","","","","","","org.fj","","","","","","","","","","","","","","","","","","","gloomy.jp","","","","","snoasa.no","","","","","","vaga.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","poltava.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","savona.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","off.ai","","","","","","","toolforge.org","","halden.no","","","","","","","","","","snaase.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mcpe.me","","","","","","","","intl.tn","","","","","","","","","","","","","","","","","","","","","","","","","","","trentino-sud-tirol.it","","","","","","imperia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hareid.no","","","","","pc.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ownprovider.com","","","","","","","perugia.it","","","","","","","","","govt.nz","","","","r\303\270yken.no","","","","","","","","","","","","","","","","","loginto.me","","","","","","","","","","","","","","","definima.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","friuli-vgiulia.it","","","","","","","","","","","","","","pro.na","","","","","","","or.tz","","","","","","","","","","","","","targi.pl","","","","","","","","","","","","","","","","","is-a-chef.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rissa.no","","","","","","","","myfirewall.org","","","","","","prvcy.page","","","","shoparena.pl","","legnica.pl","","","","","","","","","","","","","","","","","","","","","","","","","l.bg","","","","","","","","","","","","","","","","","","","","","","","","xs4all.space","","","","","","","","is-a-chef.net","","","","","r\303\270d\303\270y.no","","","","","","","","","","","","","","","","","","","","","","elasticbeanstalk.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","daplie.me","","","","","","","","","","","","","","","","","","","","ox.rs","","","","","","org.vn","","","","","","","","","","","","","","","","","","","","","","","","","press.ma","","","","","","","","verdal.no","","","mc.ax","","","","","","","","","","","","","","","","","","","","","","","","","","pz.it","","","","","","raisa.no","","","","","heteml.net","","","","","","","","","","","","","","","","","","","","","","loisirs.bj","","","","","","","","","","","","kirovograd.ua","","","oygarden.no","","","","","","","","","","","","","","","nikita.jp","","","","","","","","","trentins\303\274dtirol.it","","","","mielec.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","porsanger.no","","","","","","friuli-veneziagiulia.it","","","","","","","","verran.no","","","main.jp","","","rennes\303\270y.no","","","","","","","","","mobi.na","","","","","","","","","","","","","","","","","","tinn.no","","","","","","","","","tydal.no","","","","","","","","","","","","","","","","","pokrovsk.su","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tx.us","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","roros.no","","","","","","","","","","","","org.vc","","","","","","","","","","","","","","","","","","","","","definima.io","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","prato.it","","","","","","","","","","melhus.no","","","","","iserv.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","stackhero-network.com","","","","langev\303\245g.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","telebit.io","","","","","","","","","","","","","","","","","","","","","","","langson.vn","","","","","","","","","","","","","","","","","","meldal.no","","","t3l3p0rt.net","","","","","vallee-d-aoste.it","","","","","","","rendalen.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-ip.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vinhphuc.vn","","","","","","","","","","","","","","vercel.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","isteingeek.de","","","","","cesena-forl\303\254.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","my-firewall.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","issmarterthanyou.com","","","","","","","","","","","","","","","","","","","","","","","","","dyndns.org","","","","","dvrdns.org","","","","gitapp.si","","","","","embaixada.st","","museum.no","","oystre-slidre.no","","","","","","","","","leirfjord.no","","","","","africa.bj","","health-carereform.com","","","","","","","","","","","","","","lab.ms","","","","","","","","","","","","","","","","","","","","","","","","","hapmir.no","","livorno.it","","","","","","","","","podzone.net","","","","","","","","","","","","","","","","","rodoy.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mandal.no","","pstmn.io","ostre-toten.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","d\303\270nna.no","lolipop.io","","","","","","","","","mobi.ke","plesk.page","","","secaas.hk","","","","","","","","","","","","","","","","org.za","","","","","","","","","","","","musica.ar","","","","","","","","","","","","","","","","","","","","","","","","","","trentinsuedtirol.it","","","","","","","","","","","","","","","","","","","marker.no","","","","","","","","","","","","","","","","operaunite.com","","","","kanagawa.jp","","","","","","","","deatnu.no","","","","","pages.dev","","","","pors\303\241\305\213gu.no","","","","","","","","gleeze.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pro.az","","","","","","","","","","","","","","","","dyndns-server.com","","","","","","","","","","","","","","midtre-gauldal.no","","","","","","","tm.dz","","","","","","","","","","","","","","","","","","kawaiishop.jp","","","","","","","","","","","","","","","marine.ru","","","","trentino-alto-adige.it","","","","","","","","","piemonte.it","","","leikanger.no","","trentin-s\303\274d-tirol.it","","","","","","","","","","","","vlog.br","","","","","","","","","","","","","","","","","","","","","dyndns-office.com","","","","","","","recht.pro","","","","","","","","","","","","","","ingatlan.hu","","","","","","is-gone.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-at-work.com","","omasvuotna.no","org.dz","","","","","","","","rhcloud.com","","","","","","","","","","","","","","","trentin-sued-tirol.it","","","","","","","","","is-found.org","","","","","","","","","","","","","","","","flight.aero","","","","","","","","","","","","","","","","","","","","","","","","","redirectme.net","","","","","","","","","","","","","","","","withyoutube.com","","","","lviv.ua","","","","","","","","l-o-g-i-n.de","","","","","","","","","","","","","","","","","","","","","is-a-conservative.com","","","","","","","","","","","","","","","","","","","","","","hanggliding.aero","","","","","","","","","","","","","","","","","pordenone.it","","","leksvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.nz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","suli.hu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","v\303\245gan.no","","","","","","","","","","","","","","","","","","","","","","pavia.it","","","","","","","","","","","","","","","","","","","onrender.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lamdong.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rakkestad.no","","","","","","","","","","","","","","","","","","","","","walbrzych.pl","","","","","radio.am","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mongolian.jp","","","","","","","","","","","","","","","","","","","","tempioolbia.it","","","","","","","","","","","","","","","","","","","","","","","","","","oxa.cloud","","tv.tz","","","","","","","stalowa-wola.pl","","trentin-sudtirol.it","","","","","","","","","rygge.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","secret.jp","","","","","","","","","production.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","onporter.run","","","","","","","","","","","","","","","deci.jp","","","","","","is-a-chef.org","","","friuli-vegiulia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","akamaized-staging.net","","","","","","","","","org.vi","","","","","","","","","","","","","","","","lier.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","odesa.ua","","","","","","","remotewd.com","","","","","","osteroy.no","","","","","","","","","","","","","","","myfritz.net","","","","","","","","","","","","","","","","","","","","trentinos\303\274dtirol.it","","","","","","","","","dell-ogliastra.it","trentinosud-tirol.it","geek.nz","","","","","","","","","holy.jp","","","","","","","","","","","protonet.io","","","","","","","","","","","","","","","","valle-daosta.it","","","\321\217.\321\200\321\203\321\201","","","modena.it","","","","","loten.no","","","","","","","","","","","","","","","","","","","netlify.app","","","","","","","lavagis.no","","","","","","","","","","","","","","","","","","strand.no","","","","","","","","","","","","","","","","","","","","","","","trentinosuedtirol.it","","","","","","","","","","","","","","","","","","","m\303\241tta-v\303\241rjjat.no","","","","","","","","","","","","","","","","","","","","","","","","","","","friuli-venezia-giulia.it","","","","","","","vapor.cloud","","","","","","","","","","","","","","","","","","","","","","","","","","","myhome-server.de","","","","","","","","","","","","","","","","","","","","","","","","","thick.jp","","","","","","","","","","","","","laquila.it","","","","","","","","","trieste.it","","","","","","","","","","","","","","","","","","","","warszawa.pl","","","","","","","","","","pg.in","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hasura-app.io","independent-inquest.uk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pub.sa","","","","","","is-a-cpa.com","","","","","","","","","","","","","","","","","","","","","","industria.bo","","","","","","","","","","pymnt.uk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trento.it","dnsfor.me","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rotorcraft.aero","","","","","","","","","","","myshopify.com","appchizi.com","","","","","","","","","","","","","","","","","","","","","","homeip.net","","","","","","","","","","","","","","","","pilot.aero","","","saga.jp","","","","","leitungsen.de","","","","","","","","radio.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","onred.one","","miasta.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rocky.page","","","","","","","ringsaker.no","","","","","","","","","","","","","isa-hockeynut.com","dyndns.info","","","","","","","","","","","","","","","","","","","translated.page","","","","vestvagoy.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.sb","","","","","","","","","","","","","","","org.bb","","","","","","","","","","","","","","","safety.aero","mypets.ws","","","","","mielno.pl","","","","","","","","","","","","is-a-caterer.com","","","","","","","","","","","","","","","","","","","","onthewifi.com","","","","","","","","","","","","","","","","","","","","firebaseapp.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","jozi.biz","","","","hob\303\270l.no","","","","","","","","","","","rec.ve","","","","","","","","","","","","","voagat.no","","","","tunk.org","","trani-andria-barletta.it","","","","","","","","","","","","","","","","","","","shop.hu","","","test.tj","","","trondheim.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","lutsk.ua","","","","","","","","","","","","","","lombardia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\347\265\204\347\271\224.\351\246\231\346\270\257","","","","","","","","","","\346\225\231\350\202\262.\351\246\231\346\270\257","","","","","","","","","","","","","","","","","","","","","","","","","","","","perma.jp","","","","","","","","","","","","","","","penne.jp","","is-uberleet.com","","","","","","","","","","","","","","","","","","","","","","","","loppa.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","t\303\270nsberg.no","","","","","","","rdy.jp","","","","","","","","","","","","","rost.no","","","","","","","","","osaka.jp","","","","trading.aero","","","","","","","","","","","","is-a-rockstar.com","","","","","","","radoy.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pimienta.org","","","","","","","dd-dns.de","","r.bg","","","","","","","","","","","","","","","","","tech.orange","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","myjino.ru","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pro.fj","","","","","","","","","","","","","","","p.bg","","","","","","","","lodi.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-painter.com","","","","","","","","traeumtgerade.de","","","","","szkola.pl","","","","","","","","","","","","","","","","","","roan.no","","","","","","","","","","","","","ravendb.community","","","","","","","law.za","","","","","","","","","","","","","","","","","","","","orangecloud.tn","","","","","sebastopol.ua","","","","","","","","","","","","","","","","","","leasing.aero","","","","okayama.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hatinh.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","realestate.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","veterinaire.fr","","","","","","","","","","","","","","e164.arpa","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mazury.pl","oppeg\303\245rd.no","trapani.it","","","","","","translate.goog","","","","valle-d-aosta.it","priv.no","","","","","","","","","","","","","","","","","","","","","","","","","","","radio.fm","","","kolobrzeg.pl","","","","","","","","","vibo-valentia.it","is-lost.org","","","","","","","","","","","","","trentino-s-tirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","discordsays.com","","","teaches-yoga.com","","","","","","","","","","","","","","","","","","","","","","floppy.jp","","","","","","","","","","","","","","","","","is-a-nascarfan.com","","","","","","itigo.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.lb","","","","","","","","","","","","kobierzyce.pl","","","","","","","","","","","","","","","","karpacz.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lodingen.no","","","","","","myddns.rocks","","","","framer.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","sadist.jp","","","","","","","","","","","","","","","","","","point2this.com","","","","","","","","","trentino-sudtirol.it","","","","","","","","","","","","","","","pro.vn","","","","","trentins\303\274d-tirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tecnologia.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.et","","","","","","","","","","","","","","","","bydgoszcz.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.tt","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tochigi.jp","","","","","","","","","","odda.no","","","","","","","","","","trentino-stirol.it","","","","","","","","","","","","","","","","","","","","risor.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","independent-inquiry.uk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-into-cars.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ostroda.pl","","","","","","","","","","","","","","","","","","","","","","","info.tr","","","","","","","","","","","","","","rar.ve","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-doctor.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinsud-tirol.it","","","","","","","","","","","","","","mobi.ng","","","","","","","","","","","","","","","","","","","","","","","","ravendb.me","","","","","","","","","","","","","","","","","","","lillehammer.no","","","","","","","","","","","","","","","","","","opencraft.hosting","","","\345\262\251\346\211\213.jp","","","","","","","","","","","","","","","","","","","","","","","","","laakesvuemie.no","","","","","","","","","","","","","","","","","run.app","","","","","","","","","bieszczady.pl","","","","","","","","","","","","","","","","","","time.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.ro","","","","","info.mv","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","priv.at","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ostrowiec.pl","","","","","","","","","","","","","","","","","","","","","","trentino-a-adige.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","design.aero","","","","","","","","","","","","","","","","","troandin.no","","","","","","","","","","","","","","","","","","is-a-green.com","","","","","","","","","","","","","","","","","restaurant.bj","","","","","","","","","","","","","","","","","","verona.it","","","","","","","","","","","","","","","","","","","","","","hiroshima.jp","","","","","radom.pl","","","","","","","","","","","","","","","","","","","","trentinsued-tirol.it","vagsoy.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-work.com","","","","","","","","","","","","","","websozai.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinsudtirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","platterp.us","","","presse.km","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","logistics.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tottori.jp","","","","","mine.nu","","","","","","","","","","","","","","","","","","","taifun-dns.de","rgr.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-llama.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lorenskog.no","","","","","","","","","","","","","","","","","","","","","","info.nr","","","","","","","","","","","","","","","","","orkanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","leirvik.no","","","","lebtimnetz.de","","","","","","","","","","","","","","","","","","","","","","whitesnow.jp","","","","","","","","","","","","","","","","","","","","","","","","","","plurinacional.bo","volyn.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-geek.com","","","","","","","","","","","","","","","","","","","","","oppegard.no","","","","","","","","","","","","","info.co","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tuscany.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lovepop.jp","","is-a-geek.net","","habmer.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hiho.jp","","","","","","","","","","","","","","","","","","","","","","","","","","primetel.cloud","palermo.it","","","","","","","","","","","","","v-info.info","","is-by.us","","","cesena-forli.it","deca.jp","","pya.jp","","","","","","","","","","","","","","","","","trentin-s\303\274dtirol.it","","","","","","","","","","trentin-sud-tirol.it","","","","","","","","","","myqnapcloud.com","","","indigena.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","wakayama.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentin-suedtirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","doesntexist.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinos-tirol.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pistoia.it","","","","","","","","","","dyndns-wiki.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rep.kp","","","","","","","","","","","","","","","","","","","","","","","","","","","hungry.jp","","","","","","","","","","","","","","","","","","","","","lima-city.de","","","","","","","","","","","","","","","port.fr","","","","","","","","","","kagawa.jp","","","","","","","thanhhoa.vn","","","","","","","","","","","","","","","","","","","","","","","","","kiwi.nz","","","","","urbino-pesaro.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","poivron.org","","per.nf","","","","","","","","","show.aero","magazine.aero","","","","","","","","","","","","","","","","","tourism.bj","","snowflake.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","iamallama.com","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-teacher.com","","","tourism.pl","","","","","","","","","","","","","","","","","","oslo.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ravendb.cloud","","","","","","","","","","","","","","independent-review.uk","","","","","","","","","mobi.gp","","","trycloudflare.com","","","","","","","","","","","","londrina.br","","","","","riopreto.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","watson.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","priv.me","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","overhalla.no","pantheonsite.io","","","","","","","","","","","","","","","","","","","","","","","","info.at","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","monzabrianza.it","","","","","","","","","","","","","","","","","","","","leka.no","","","perspecta.cloud","","","","","","","","","","","punyu.jp","","","resto.bj","","","","dyndns-free.com","","","","","","","lolipopmc.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tcp4.me","","","","","","","trentino-aadige.it","","museum.mw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","porsgrunn.no","","","","","","","","","","","","lazio.it","","","","","","","","","","","","","","","","","","","","tvedestrand.no","","","","","","","","","","","","","","","","","","","","","royrvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rec.nf","","","","","","","","","","","in-vpn.org","","","","","","","","","","","","","","","","","","","travel.tt","","","asso.dz","","","","","","","olsztyn.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lur\303\270y.no","","","","","","","","","","","","","","","official.ec","","","","","in-vpn.de","","","","","","","","","","","","","","","","","","","","maceio.br","","","","","presse.ml","","","","","","","","","","","","","","","","","","","","","","","tokyo.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","traniandriabarletta.it","","","dgca.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","podlasie.pl","","","","","","","","","","","","","","","school.nz","","","","","","","","","","","","","","","","","","","\345\200\213\344\272\272.\351\246\231\346\270\257","","","","is-a-democrat.com","","","","","","","","","","","","","","","","","","","","","","","","","framer.website","","","","","","","","","","","","","","","repl.co","","","","","","","","is-a-hard-worker.com","","","","","obninsk.su","","","","","iobb.net","","","","","","","","","","","","","","","","","","","","","","","townnews-staging.com","laocai.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ternopil.ua","","","","sblo.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","itcouldbewor.se","","","is-very-sweet.org","","","","","","","","","","","","","","","","","","","","","","linkyard-cloud.ch","","","","","","","","","dyndns-web.com","","","","","playstation-cloud.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tromso.no","","","","","","","","","","","","","","","","","","","","","","","info.na","","","","","","","","","panel.gg","rome.it","","","","","","","","","","","","","","","","","","","realm.cz","","","","","","","","","","","","","","","","","","","","","name.az","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-at-home.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","versus.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","recreation.aero","","","","","","","","","","","","","","","","","","","","","","","","","","ravendb.run","","","","","","","","","","","","","","","","","","","","likes-pie.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","danang.vn","","","","","","","","","","","","","","","","valled-aosta.it","","","","","","","","is-a-geek.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","deta.app","","","","","","medio-campidano.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ogliastra.it","","","lenvik.no","","mlbfan.org","","","","","","ringerike.no","","","","info.sd","","","","","","","","","","","is-a-candidate.org","","vallee-aoste.it","theshop.jp","","","","","","","","","","","","","","","","","","","","","","","","","prochowice.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trainer.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pol.dz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","prof.pr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","levanger.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","taranto.it","","","","","insurance.aero","","","","","","","","","","","","","","","","","","","","","milano.it","","","","","","","","","info.ke","","","","","","","","","","","","","ilawa.pl","","","","","","","","","info.ht","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-therapist.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ipifony.net","","","","","","","readmyblog.org","","","","","","","","in-vpn.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinoaltoadige.it","caxias.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","in-dsl.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lima-city.at","","","","","","","in-dsl.de","","","","","","nagano.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lucania.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","babyblue.jp","","","","","","","","","","","","","","","","","","","","","readymade.jp","","","","","","webhop.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-home.com","","","tingvoll.no","","","idrett.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","webhop.me","","","","","leangaviika.no","","","","","","","","","","","","is-an-actress.com","","","","","","","","","","","","","","","","magnet.page","tranby.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ivano-frankivsk.ua","","","","","","","","info.ve","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dlugoleka.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","longan.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.ki","","","","","","","","","","","","","","","","","","","","","","orkdal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","doesntexist.com","","","","","","","","","","","","","","","","","","","rade.no","","","","","","","tynset.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tranibarlettaandria.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","olecko.pl","","","","","","","","","","l\303\270ten.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","independent-commission.uk","","","info.la","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trani-barletta-andria.it","","","","","","","","","","","","","","","reklam.hu","","","debian.net","","","","","","","","","","","","","","","","","","","","dyndns-blog.com","","","","","","","","","","","","","","","","","","","","larvik.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tana.no","","","","","","","","","","","","is-an-artist.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","other.nf","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","puglia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trysil.no","","","","","","","","","","","","","","","","","","","","velvet.jp","","","","","","","","","","","","","","potager.org","","","","","","","ravenna.it","","","","","","","","","","","","","","","","","","","","","lunner.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hlx3.page","","","","","","","","","","","","","","trentino-altoadige.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","travel.in","","","","","","","","","","","","","","","","","","","","tromsa.no","","","","","","","","","","","","","","","","","","","","","","","","","","in-dsl.net","","info.ls","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tysvar.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ibaraki.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-designer.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.ni","","","","","podzone.org","","","","","","","","","","","","","","","","","","","webhop.net","info.pr","","pussycat.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vpndns.net","","","","","","","","","","","tananger.no","","","","","","","","","","","","","","","","laspezia.it","","","","","","","","","","","","","","","","","","","","","is-into-cartoons.com","","","","","","lolitapunk.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-blogger.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-into-games.com","","","","","","","","","","","","oksnes.no","","","","","\347\273\204\347\271\224.hk","","","","","\347\265\204\347\271\224.hk","","","","","","","","","","\346\225\231\350\202\262.hk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\346\225\216\350\202\262.hk","","","","","","","","","","","","","","","","","","","on-aptible.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","littlestar.jp","","lugansk.ua","","","","","","research.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","post.in","","","","","","","pgfog.com","olawa.pl","","","","","","","","","","","","","","","","","opensocial.site","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","org.mx","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","musica.bo","","","","","","","","","","","","","","","","","","termez.su","","","","","","info.tn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","podhale.pl","","","","","","","","","","teramo.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","webhop.info","","luster.no","","","","","","","","","","","","","","","","","","","","roma.it","","","","","","","my-wan.de","","","","","","","","","","","","","","","","","","","info.pk","","","","","pagespeedmobilizer.com","","","rivne.ua","","","","","","","","","","","parliament.nz","","","","","","","","","","","","meland.no","","","","","","","","","thuathienhue.vn","","","","","","","info.ec","","","","","","","","","","","","","","","","","","","","thaibinh.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ondigitalocean.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","priv.pl","","","","","","","","","is-very-bad.org","","","","","","","potenza.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","test.ru","","","","","\340\271\200\340\270\231\340\271\207\340\270\225.\340\271\204\340\270\227\340\270\242","","","","","","","is-a-nurse.com","","","","","lavangen.no","","","","","","","","","","","","","","","","","","","","","riik.ee","","","","","","","tj\303\270me.no","is-with-theband.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dopaas.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","zaporizhzhia.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.vn","","is-a-photographer.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ibxos.it","","","","","","","lima-city.ch","","","is-a-lawyer.com","","","","","","","","","","","","","digick.jp","","","","","","","","odessa.ua","","","","","","","","","","","","","","","","","","","","","","","ruovat.no","","","","","lerdal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","omniwe.site","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loabat.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hasura.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tonkotsu.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","isla.pr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","osoyro.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","morena.br","","","iopsys.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-socialist.com","","","","","","","","","","","","","","","","","","loginline.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","activetrail.biz","","","","","","","","","","","","","","","","","","","is-a-personaltrainer.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tysnes.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pb.ao","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","read-books.org","","","","","","","","","","","","","","","","","","","","","","","","","","","school.na","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","laichau.vn","","","","","","","","","","","","","","","is-a-techie.com","","","","","","","","","","","","","","","","","randaberg.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tokushima.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-guru.com","","","","","","","","","","","","","manaus.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","presse.ci","","","","","","","","myfast.space","","","","","","","","","","","","","","","","","","","","","","rad\303\270y.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loginline.io","","","","","","","","","","","","","","","","","","","","trentinoaadige.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","torino.it","","rovno.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trader.aero","","","","","","","","","info.in","","","","","","","","","","","","","","","","","","","","","","","","ragusa.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.nf","","","","","","","","","","","","","","","","","","","","tjeldsund.no","","","","","info.pl","","","","","","","","","","","","","","","","","","","","","","","\320\260\320\272.\321\201\321\200\320\261","","","","","\320\276\320\264.\321\201\321\200\320\261","","","","lohmus.me","","","","","","","","","","","","","rahkkeravju.no","","","","","","","","","","","","","","","","","","","","thingdustdata.com","","","","","","","","","","","","pagefrontapp.com","","","","","","","","","","","pruszkow.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","oops.jp","","","dyndns-mail.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","onflashdrive.app","","","","","","","","","","","","","","","","","","\320\276\321\200\320\263.\321\201\321\200\320\261","","","","","","","","","","","","opoczno.pl","","","","","","","","","","","","","","","","","","","","","","","","","","lima-city.rocks","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ralingen.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vercel.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","dyndns-pics.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lubin.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","macapa.br","lindas.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rovigo.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","rennebu.no","","","","","","myfast.host","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pigboat.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","vibovalentia.it","","","","","","","","","","","","","","","","","","onfabrica.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","postman-echo.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-an-actor.com","","","","rana.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","twmail.cc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pisz.pl","","","","","","","","","","","","","","","","","","","","\345\272\203\345\263\266.jp","","","","","","","","","iris.arpa","","","lardal.no","","","","","","","","","","","","","\346\240\203\346\234\250.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","official.academy","","","","","","","","","","","","osasco.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","thanhphohochiminh.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-into-anime.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\345\215\203\350\221\211.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pdns.page","","","","","","","","","","","","","","","","","","","","","przeworsk.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reggioemilia.it","","","","","","","","phutho.vn","","","","","","","","","","","","","","","","","","saloon.jp","","","","","is-very-nice.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\345\200\213\344\272\272.hk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pythonanywhere.com","","","","","","","","","","","","","","","","","","tranoy.no","","","","","","","","","","","","","pomorskie.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reggio-emilia.it","","","","","","","","","","tr\303\246na.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","poniatowa.pl","","","","","","","","","","","","","","","","","","","","","","","","","police.uk","","","","","","","","","","","","","","","","","lyngen.no","","","","","publ.pt","","","","","","","","","","","","","info.fj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","office-on-the.net","","","","","","","","","","","m\304\201ori.nz","ris\303\270r.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ladesk.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","malatvuopmi.no","","","lomo.jp","","","","","","","","","","vivian.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-very-good.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.zm","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pharmacien.fr","","","","","","","","","","","tank.jp","","","","","","","","","","","","tozsde.hu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pisa.it","","","","","","","","","","","","","","","","","","","","","","","","","","","privatizehealthinsurance.net","","","","","","","","","","","","","","","","","","","","","","","","","","is-an-accountant.com","","","","","","","","","","","tuxfamily.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","olkusz.pl","","","","is-a-knight.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","olbiatempio.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loginline.services","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","travel.pl","","","","","","","","","","","","","","","","","","","","","","","selfip.biz","","","","","","","","","","","","","","","","","","","is-a-anarchist.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ribeirao.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lubartow.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-landscaper.com","","","","","","","","","","","","","","","","","pomorze.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","theworkpc.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","in-brb.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lovesick.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","royken.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ponpes.id","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rollag.no","","","","","","","","","","","","","","","","","","\344\275\220\350\263\200.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\345\225\206\346\245\255.tw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","fbx-os.fr","","","iglesias-carbonia.it","","","","","","","","","","","","","","","","","","","ostrowwlkp.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\347\265\204\347\271\224.tw","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","miyazaki.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","land-4-sale.us","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","school.za","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-player.com","","","","","","","","","","","","","","","riobranco.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","politica.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","twmail.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lierne.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-patsfan.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinoa-adige.it","","","","","","","","","","","icurus.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","orskog.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rawa-maz.pl","","","","","","","","","","","","","","","","","","","","","tlon.network","","","","","","","","","","","","","","","","","","is-a-bruinsfan.org","","","","","","","","","","","","","","","","","","","","","","","\347\273\204\347\273\207.hk","","","","","\347\265\204\347\273\207.hk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","phuyen.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","readthedocs.io","lezajsk.pl","","","","","","","","","","","","","","","","in-addr.arpa","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mmafan.biz","","","","","","","","","","","","zaporizhzhe.ua","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","r\303\241isa.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rindal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pharmaciens.km","","","tempio-olbia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","latina.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","ostroleka.pl","","","","","","","","","","","","","","","","miyagi.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","independent-panel.uk","","","","","","","","","","","","","","","","","","","","","","","profesional.bo","","","","","info.bj","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","oita.jp","","","","","","","","","","","","","","","","twmail.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pgafan.net","","","","","","","","","","","","","","","","","lapy.pl","","","","","","","","","","","","","","","","","","","","","","info.gu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loginline.site","","","","","","","lublin.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","powiat.pl","","","","","","","","","","","","is-an-entertainer.com","","","","","","","","","","","001www.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","royal-commission.uk","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","logoip.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","hoplix.shop","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-liberal.com","","","","","","online.th","","","","","","","","","","ishikawa.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","leczna.pl","","","","","","","","","polkowice.pl","","","","","","","","","","","","","","","","","","","","","info.cx","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lowicz.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loseyourip.com","","","","","","","","","","","","","","","","","","","","","","","","","info.au","","","","","priv.hu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","olbia-tempio.it","","","","","","","","","","","","pupu.jp","","","","","","","","","","","","router.management","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","iservschule.de","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-very-evil.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pixolino.com","","ringebu.no","","","","","","","","","","","","","","parasite.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","turystyka.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-financialadvisor.com","","","","","","","","","","\346\262\226\347\270\204.jp","","","","","","","","","","\346\204\233\345\252\233.jp","","","","","","","","","","","","","","","\346\204\233\347\237\245.jp","","","","","","","","","","","","","","","","","","","","","is-an-engineer.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","poznan.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lima.zone","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","r\303\270ros.no","","","","","","","","","","","","","lebesby.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rulez.jp","","","","","","","","","pp.az","","","","","","","","","","","","","","","","","on-the-web.tv","","is-a-soxfan.org","","","","","","","","","","","","is-a-musician.com","dyndns.biz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","logoip.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-an-anarchist.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","oppdal.no","","","","","","","","","","","","","","","","","","","","","","","","","","","trentinoalto-adige.it","","","","","","","","","","","","","","","","","","","","","","","","","","matrix.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tuva.su","","","","","","","","passenger-association.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rimini.it","","pueblo.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-bookkeeper.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tarnobrzeg.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tuyenquang.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","publishproxy.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.hu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","repl.run","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-hunter.com","","","","","trafficplex.cloud","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pubtls.org","","","","","","","","","","","","","","","rybnik.pl","","","","","","","","","","","","rackmaze.com","","","lahppi.no","","","","","resindevice.io","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rackmaze.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-not-certified.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lanbib.se","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mazowsze.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","limanowa.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","telebit.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reserve-online.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.bb","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","repbody.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","raindrop.jp","","","","","","","","","","","","","","","","","","is-a-libertarian.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reserve-online.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","rzeszow.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","orland.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tula.su","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pila.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","piacenza.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lpages.co","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","2000.hu","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\345\245\210\350\211\257.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pesarourbino.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-celticsfan.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","lebork.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","public-inquiry.uk","","","raholt.no","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\346\273\213\350\263\200.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","typedream.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mimoza.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","mobi.tz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pulawy.pl","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","loginline.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pesaro-urbino.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","paragliding.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","palmas.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","patria.bo","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","taxi.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","recife.br","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reggiocalabria.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","orx.biz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","thainguyen.vn","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","platter-app.dev","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pcloud.host","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\277\321\200.\321\201\321\200\320\261","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","platter-app.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\276\320\261\321\200.\321\201\321\200\320\261","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","platform0.app","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","toyama.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","la-spezia.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","in-the-band.net","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","webhop.biz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pagexl.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","parallel.jp","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-bulls-fan.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pinoko.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","padova.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pecori.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","reggio-calabria.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-linux-user.org","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-republican.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","tuleap-partners.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","iliadboxos.it","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.tz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","o0o0.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","is-a-cubicle-slave.com","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","localzone.xyz","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","pepper.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","info.az","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","parachuting.aero","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","on-web.fr","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","peewee.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\345\244\247\345\210\206.jp","","","","","","","","","","\347\206\212\346\234\254.jp","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\321\201\320\260\320\274\320\260\321\200\320\260.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\261\320\270\320\267.\321\200\321\203\321\201","","","","","\320\272\320\276\320\274.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\274\321\201\320\272.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\321\201\320\276\321\207\320\270.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\272\321\200\321\213\320\274.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\276\321\200\320\263.\321\200\321\203\321\201","","","","","\321\201\320\277\320\261.\321\200\321\203\321\201","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\321\203\320\277\321\200.\321\201\321\200\320\261","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","\320\274\320\270\321\200.\321\200\321\203\321\201"}; + if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) { + unsigned int key = hash(str, len); + + if (key <= MAX_HASH_VALUE) { + const char* s = wordlist[key]; + + if (*str == *s && !strncmp(str + 1, s + 1, len - 1) && s[len] == '\0') + return s; + } + } + return nullptr; +} +#line 5060 "tldLookup.gperf" \ No newline at end of file diff --git a/be/src/vec/functions/url/tldLookup.h b/be/src/vec/functions/url/tldLookup.h new file mode 100644 index 00000000000000..9be88890c14bdd --- /dev/null +++ b/be/src/vec/functions/url/tldLookup.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// This file is copied from +// https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/URL/tldLookup.h +// and modified by Doris + +#pragma once + +#include + +// Definition of the class generated by gperf, present on gperf/tldLookup.gperf +class TopLevelDomainLookupHash { +private: + static inline unsigned int hash(const char* str, size_t len); + +public: + static const char* is_valid(const char* str, size_t len); +}; + +using tldLookup = TopLevelDomainLookupHash; \ No newline at end of file diff --git a/be/src/vec/io/io_helper.h b/be/src/vec/io/io_helper.h index d5ca522146a1cb..221beeccbb3be9 100644 --- a/be/src/vec/io/io_helper.h +++ b/be/src/vec/io/io_helper.h @@ -22,6 +22,7 @@ #include #include +#include #include "common/exception.h" #include "util/binary_cast.hpp" @@ -168,7 +169,9 @@ void read_float_binary(Type& x, BufferReadable& buf) { read_pod_binary(x, buf); } -inline void read_string_binary(std::string& s, BufferReadable& buf, +template + requires(std::is_same_v || std::is_same_v>) +inline void read_string_binary(Type& s, BufferReadable& buf, size_t MAX_STRING_SIZE = DEFAULT_MAX_STRING_SIZE) { UInt64 size = 0; read_var_uint(size, buf); @@ -178,7 +181,7 @@ inline void read_string_binary(std::string& s, BufferReadable& buf, } s.resize(size); - buf.read(s.data(), size); + buf.read((char*)s.data(), size); } inline void read_string_binary(StringRef& s, BufferReadable& buf, @@ -225,7 +228,9 @@ void read_vector_binary(std::vector& v, BufferReadable& buf, } } -inline void read_binary(String& x, BufferReadable& buf) { +template + requires(std::is_same_v || std::is_same_v>) +inline void read_binary(Type& x, BufferReadable& buf) { read_string_binary(x, buf); } diff --git a/be/src/vec/olap/vertical_block_reader.cpp b/be/src/vec/olap/vertical_block_reader.cpp index 5367729f637e15..369bf04459a8f2 100644 --- a/be/src/vec/olap/vertical_block_reader.cpp +++ b/be/src/vec/olap/vertical_block_reader.cpp @@ -76,7 +76,6 @@ Status VerticalBlockReader::_get_segment_iterators(const ReaderParams& read_para << ", version:" << read_params.version; return res; } - _reader_context.is_vertical_compaction = true; for (const auto& rs_split : read_params.rs_splits) { // segment iterator will be inited here // In vertical compaction, every group will load segment so we should cache diff --git a/be/src/vec/sink/vdata_stream_sender.cpp b/be/src/vec/sink/vdata_stream_sender.cpp index ac820bcab2929a..d21b87561b5da7 100644 --- a/be/src/vec/sink/vdata_stream_sender.cpp +++ b/be/src/vec/sink/vdata_stream_sender.cpp @@ -95,7 +95,7 @@ Status Channel::open(RuntimeState* state) { } _be_number = state->be_number(); - _brpc_timeout_ms = std::min(3600, state->execution_timeout()) * 1000; + _brpc_timeout_ms = get_execution_rpc_timeout_ms(state->execution_timeout()); _serializer.set_is_local(_is_local); @@ -238,14 +238,13 @@ Status BlockSerializer::next_serialized_block(Block* block, PBlock* dest, size_t } { + SCOPED_TIMER(_parent->merge_block_timer()); if (rows) { if (!rows->empty()) { - SCOPED_TIMER(_parent->split_block_distribute_by_channel_timer()); const auto* begin = rows->data(); RETURN_IF_ERROR(_mutable_block->add_rows(block, begin, begin + rows->size())); } } else if (!block->empty()) { - SCOPED_TIMER(_parent->merge_block_timer()); RETURN_IF_ERROR(_mutable_block->merge(*block)); } } diff --git a/be/src/vec/sink/vdata_stream_sender.h b/be/src/vec/sink/vdata_stream_sender.h index 88bb804fd8004f..024d87ab32f49c 100644 --- a/be/src/vec/sink/vdata_stream_sender.h +++ b/be/src/vec/sink/vdata_stream_sender.h @@ -86,6 +86,7 @@ class BlockSerializer { void reset_block() { _mutable_block.reset(); } void set_is_local(bool is_local) { _is_local = is_local; } + bool is_local() const { return _is_local; } private: pipeline::ExchangeSinkLocalState* _parent; diff --git a/be/src/vec/sink/vrow_distribution.cpp b/be/src/vec/sink/vrow_distribution.cpp index 3a4c7e911f4c14..b79df49f0626d6 100644 --- a/be/src/vec/sink/vrow_distribution.cpp +++ b/be/src/vec/sink/vrow_distribution.cpp @@ -23,7 +23,7 @@ #include #include -#include +#include #include "common/logging.h" #include "common/status.h" @@ -50,7 +50,7 @@ VRowDistribution::_get_partition_function() { Status VRowDistribution::_save_missing_values( std::vector>& col_strs, // non-const ref for move - int col_size, Block* block, std::vector filter, + int col_size, Block* block, const std::vector& filter, const std::vector& col_null_maps) { // de-duplication for new partitions but save all rows. RETURN_IF_ERROR(_batching_block->add_rows(block, filter)); @@ -116,6 +116,10 @@ Status VRowDistribution::automatic_create_partition() { if (result.status.status_code == TStatusCode::OK) { // add new created partitions RETURN_IF_ERROR(_vpartition->add_partitions(result.partitions)); + for (const auto& part : result.partitions) { + _new_partition_ids.insert(part.id); + VLOG_TRACE << "record new id: " << part.id; + } RETURN_IF_ERROR(_create_partition_callback(_caller, &result)); } @@ -134,7 +138,7 @@ static TCreatePartitionResult cast_as_create_result(TReplacePartitionResult& arg // use _partitions and replace them Status VRowDistribution::_replace_overwriting_partition() { - SCOPED_TIMER(_add_partition_request_timer); + SCOPED_TIMER(_add_partition_request_timer); // also for replace_partition TReplacePartitionRequest request; TReplacePartitionResult result; request.__set_overwrite_group_id(_vpartition->get_overwrite_group_id()); @@ -144,16 +148,20 @@ Status VRowDistribution::_replace_overwriting_partition() { // only request for partitions not recorded for replacement std::set id_deduper; for (const auto* part : _partitions) { - if (part == nullptr) [[unlikely]] { - return Status::InternalError( - "Cannot found origin partitions in auto detect overwriting, stop processing"); - } - if (_new_partition_ids.contains(part->id)) { - // this is a new partition. dont replace again. - } else { - // request for replacement - id_deduper.insert(part->id); - } + if (part != nullptr) { + if (_new_partition_ids.contains(part->id)) { + // this is a new partition. dont replace again. + VLOG_TRACE << "skip new partition: " << part->id; + } else { + // request for replacement + id_deduper.insert(part->id); + } + } else if (_missing_map.empty()) { + // no origin partition. and not allow to create. + return Status::InvalidArgument( + "Cannot found origin partitions in auto detect overwriting, stop " + "processing"); + } // else: part is null and _missing_map is not empty. dealed outside using auto-partition way. nothing to do here. } if (id_deduper.empty()) { return Status::OK(); // no need to request @@ -182,6 +190,7 @@ Status VRowDistribution::_replace_overwriting_partition() { // record new partitions for (const auto& part : result.partitions) { _new_partition_ids.insert(part.id); + VLOG_TRACE << "record new id: " << part.id; } // replace data in _partitions RETURN_IF_ERROR(_vpartition->replace_partitions(request_part_ids, result.partitions)); @@ -304,6 +313,52 @@ Status VRowDistribution::_generate_rows_distribution_for_non_auto_partition( return Status::OK(); } +Status VRowDistribution::_deal_missing_map(vectorized::Block* block, + const std::vector& partition_cols_idx, + int64_t& rows_stat_val) { + // for missing partition keys, calc the missing partition and save in _partitions_need_create + auto [part_ctxs, part_exprs] = _get_partition_function(); + auto part_col_num = part_exprs.size(); + // the two vectors are in column-first-order + std::vector> col_strs; + std::vector col_null_maps; + col_strs.resize(part_col_num); + col_null_maps.reserve(part_col_num); + + for (int i = 0; i < part_col_num; ++i) { + auto return_type = part_exprs[i]->data_type(); + // expose the data column. the return type would be nullable + const auto& [range_left_col, col_const] = + unpack_if_const(block->get_by_position(partition_cols_idx[i]).column); + if (range_left_col->is_nullable()) { + col_null_maps.push_back(&( + assert_cast(range_left_col.get())->get_null_map_data())); + } else { + col_null_maps.push_back(nullptr); + } + for (auto row : _missing_map) { + col_strs[i].push_back( + return_type->to_string(*range_left_col, index_check_const(row, col_const))); + } + } + + // calc the end value and save them. in the end of sending, we will create partitions for them and deal them. + RETURN_IF_ERROR( + _save_missing_values(col_strs, part_col_num, block, _missing_map, col_null_maps)); + + size_t new_bt_rows = _batching_block->rows(); + size_t new_bt_bytes = _batching_block->bytes(); + rows_stat_val -= new_bt_rows - _batching_rows; + _state->update_num_rows_load_total(_batching_rows - new_bt_rows); + _state->update_num_bytes_load_total(_batching_bytes - new_bt_bytes); + DorisMetrics::instance()->load_rows->increment(_batching_rows - new_bt_rows); + DorisMetrics::instance()->load_bytes->increment(_batching_bytes - new_bt_bytes); + _batching_rows = new_bt_rows; + _batching_bytes = new_bt_bytes; + + return Status::OK(); +} + Status VRowDistribution::_generate_rows_distribution_for_auto_partition( vectorized::Block* block, const std::vector& partition_cols_idx, bool has_filtered_rows, std::vector& row_part_tablet_ids, @@ -311,7 +366,7 @@ Status VRowDistribution::_generate_rows_distribution_for_auto_partition( auto num_rows = block->rows(); std::vector partition_keys = _vpartition->get_partition_keys(); - auto partition_col = block->get_by_position(partition_keys[0]); + auto& partition_col = block->get_by_position(partition_keys[0]); _missing_map.clear(); _missing_map.reserve(partition_col.column->size()); bool stop_processing = false; @@ -329,63 +384,64 @@ Status VRowDistribution::_generate_rows_distribution_for_auto_partition( RETURN_IF_ERROR(_filter_block(block, row_part_tablet_ids)); if (!_missing_map.empty()) { - // for missing partition keys, calc the missing partition and save in _partitions_need_create - auto [part_ctxs, part_exprs] = _get_partition_function(); - auto part_col_num = part_exprs.size(); - // the two vectors are in column-first-order - std::vector> col_strs; - std::vector col_null_maps; - col_strs.resize(part_col_num); - col_null_maps.reserve(part_col_num); - - for (int i = 0; i < part_col_num; ++i) { - auto return_type = part_exprs[i]->data_type(); - // expose the data column. the return type would be nullable - const auto& [range_left_col, col_const] = - unpack_if_const(block->get_by_position(partition_cols_idx[i]).column); - if (range_left_col->is_nullable()) { - col_null_maps.push_back(&(assert_cast(range_left_col.get()) - ->get_null_map_data())); - } else { - col_null_maps.push_back(nullptr); - } - for (auto row : _missing_map) { - col_strs[i].push_back( - return_type->to_string(*range_left_col, index_check_const(row, col_const))); - } - } - - // calc the end value and save them. in the end of sending, we will create partitions for them and deal them. - RETURN_IF_ERROR( - _save_missing_values(col_strs, part_col_num, block, _missing_map, col_null_maps)); - - size_t new_bt_rows = _batching_block->rows(); - size_t new_bt_bytes = _batching_block->bytes(); - rows_stat_val -= new_bt_rows - _batching_rows; - _state->update_num_rows_load_total(_batching_rows - new_bt_rows); - _state->update_num_bytes_load_total(_batching_bytes - new_bt_bytes); - DorisMetrics::instance()->load_rows->increment(_batching_rows - new_bt_rows); - DorisMetrics::instance()->load_bytes->increment(_batching_bytes - new_bt_bytes); - _batching_rows = new_bt_rows; - _batching_bytes = new_bt_bytes; + RETURN_IF_ERROR(_deal_missing_map(block, partition_cols_idx, rows_stat_val)); } return Status::OK(); } Status VRowDistribution::_generate_rows_distribution_for_auto_overwrite( - vectorized::Block* block, bool has_filtered_rows, - std::vector& row_part_tablet_ids) { + vectorized::Block* block, const std::vector& partition_cols_idx, + bool has_filtered_rows, std::vector& row_part_tablet_ids, + int64_t& rows_stat_val) { auto num_rows = block->rows(); + // for non-auto-partition situation, goes into two 'else' branch. just find the origin partitions, replace them by rpc, + // and find the new partitions to use. + // for auto-partition's, find and save origins in _partitions and replace them. at meanwhile save the missing values for auto + // partition. then we find partition again to get replaced partitions in _partitions. this time _missing_map is ignored cuz + // we already saved missing values. bool stop_processing = false; - RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, - _tablet_indexes, stop_processing, _skip)); + if (_vpartition->is_auto_partition() && + _state->query_options().enable_auto_create_when_overwrite) { + // allow auto create partition for missing rows. + std::vector partition_keys = _vpartition->get_partition_keys(); + auto partition_col = block->get_by_position(partition_keys[0]); + _missing_map.clear(); + _missing_map.reserve(partition_col.column->size()); + + RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, + _tablet_indexes, stop_processing, _skip, + &_missing_map)); + + // allow and really need to create during auto-detect-overwriting. + if (!_missing_map.empty()) { + RETURN_IF_ERROR(_deal_missing_map(block, partition_cols_idx, rows_stat_val)); + } + } else { + RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, + _tablet_indexes, stop_processing, _skip)); + } RETURN_IF_ERROR(_replace_overwriting_partition()); // regenerate locations for new partitions & tablets _reset_find_tablets(num_rows); - RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, - _tablet_indexes, stop_processing, _skip)); + if (_vpartition->is_auto_partition() && + _state->query_options().enable_auto_create_when_overwrite) { + // here _missing_map is just a placeholder + RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, + _tablet_indexes, stop_processing, _skip, + &_missing_map)); + if (VLOG_TRACE_IS_ON) { + std::string tmp; + for (auto v : _missing_map) { + tmp += std::to_string(v).append(", "); + } + VLOG_TRACE << "Trace missing map of " << this << ':' << tmp; + } + } else { + RETURN_IF_ERROR(_tablet_finder->find_tablets(_state, block, num_rows, _partitions, + _tablet_indexes, stop_processing, _skip)); + } if (has_filtered_rows) { for (int i = 0; i < num_rows; i++) { _skip[i] = _skip[i] || _block_convertor->filter_map()[i]; @@ -456,10 +512,11 @@ Status VRowDistribution::generate_rows_distribution( } Status st = Status::OK(); - if (_vpartition->is_auto_detect_overwrite()) { + if (_vpartition->is_auto_detect_overwrite() && !_deal_batched) { // when overwrite, no auto create partition allowed. - st = _generate_rows_distribution_for_auto_overwrite(block.get(), has_filtered_rows, - row_part_tablet_ids); + st = _generate_rows_distribution_for_auto_overwrite(block.get(), partition_cols_idx, + has_filtered_rows, row_part_tablet_ids, + rows_stat_val); } else if (_vpartition->is_auto_partition() && !_deal_batched) { st = _generate_rows_distribution_for_auto_partition(block.get(), partition_cols_idx, has_filtered_rows, row_part_tablet_ids, diff --git a/be/src/vec/sink/vrow_distribution.h b/be/src/vec/sink/vrow_distribution.h index fffe0e3f7f1887..88002c3c21139d 100644 --- a/be/src/vec/sink/vrow_distribution.h +++ b/be/src/vec/sink/vrow_distribution.h @@ -143,7 +143,7 @@ class VRowDistribution { std::pair _get_partition_function(); Status _save_missing_values(std::vector>& col_strs, int col_size, - Block* block, std::vector filter, + Block* block, const std::vector& filter, const std::vector& col_null_maps); void _get_tablet_ids(vectorized::Block* block, int32_t index_idx, @@ -162,14 +162,19 @@ class VRowDistribution { vectorized::Block* block, const std::vector& partition_col_idx, bool has_filtered_rows, std::vector& row_part_tablet_ids, int64_t& rows_stat_val); + // the whole process to deal missing rows. will call _save_missing_values + Status _deal_missing_map(vectorized::Block* block, + const std::vector& partition_cols_idx, + int64_t& rows_stat_val); Status _generate_rows_distribution_for_non_auto_partition( vectorized::Block* block, bool has_filtered_rows, std::vector& row_part_tablet_ids); Status _generate_rows_distribution_for_auto_overwrite( - vectorized::Block* block, bool has_filtered_rows, - std::vector& row_part_tablet_ids); + vectorized::Block* block, const std::vector& partition_cols_idx, + bool has_filtered_rows, std::vector& row_part_tablet_ids, + int64_t& rows_stat_val); Status _replace_overwriting_partition(); void _reset_row_part_tablet_ids(std::vector& row_part_tablet_ids, diff --git a/be/src/vec/sink/writer/vjdbc_table_writer.cpp b/be/src/vec/sink/writer/vjdbc_table_writer.cpp index d54768e58fe3c6..8c24f4746adf83 100644 --- a/be/src/vec/sink/writer/vjdbc_table_writer.cpp +++ b/be/src/vec/sink/writer/vjdbc_table_writer.cpp @@ -52,7 +52,6 @@ JdbcConnectorParam VJdbcTableWriter::create_connect_param(const doris::TDataSink jdbc_param.connection_pool_max_wait_time = t_jdbc_sink.jdbc_table.connection_pool_max_wait_time; jdbc_param.connection_pool_max_life_time = t_jdbc_sink.jdbc_table.connection_pool_max_life_time; jdbc_param.connection_pool_keep_alive = t_jdbc_sink.jdbc_table.connection_pool_keep_alive; - jdbc_param.enable_connection_pool = t_jdbc_sink.jdbc_table.enable_connection_pool; return jdbc_param; } diff --git a/be/src/vec/sink/writer/vtablet_writer.cpp b/be/src/vec/sink/writer/vtablet_writer.cpp index ebd6f67e2af5d2..504ffb9cb749bf 100644 --- a/be/src/vec/sink/writer/vtablet_writer.cpp +++ b/be/src/vec/sink/writer/vtablet_writer.cpp @@ -1422,7 +1422,7 @@ Status VTabletWriter::_send_new_partition_batch() { Block tmp_block = _row_distribution._batching_block->to_block(); // Borrow out, for lval ref - // these order is only. + // these order is unique. // 1. clear batching stats(and flag goes true) so that we won't make a new batching process in dealing batched block. // 2. deal batched block // 3. now reuse the column of lval block. cuz write doesn't real adjust it. it generate a new block from that. diff --git a/be/src/vec/sink/writer/vtablet_writer_v2.cpp b/be/src/vec/sink/writer/vtablet_writer_v2.cpp index 6b1423b125767a..96dfd85d297208 100644 --- a/be/src/vec/sink/writer/vtablet_writer_v2.cpp +++ b/be/src/vec/sink/writer/vtablet_writer_v2.cpp @@ -531,7 +531,7 @@ Status VTabletWriterV2::_send_new_partition_batch() { Block tmp_block = _row_distribution._batching_block->to_block(); // Borrow out, for lval ref - // these order is only. + // these order is unique. // 1. clear batching stats(and flag goes true) so that we won't make a new batching process in dealing batched block. // 2. deal batched block // 3. now reuse the column of lval block. cuz write doesn't real adjust it. it generate a new block from that. diff --git a/be/src/vec/spill/spill_reader.cpp b/be/src/vec/spill/spill_reader.cpp index f0320ee9b490bb..3c2f00273a4b5d 100644 --- a/be/src/vec/spill/spill_reader.cpp +++ b/be/src/vec/spill/spill_reader.cpp @@ -19,6 +19,7 @@ #include +#include "common/cast_set.h" #include "common/exception.h" #include "io/file_factory.h" #include "io/fs/file_reader.h" @@ -27,6 +28,7 @@ #include "util/slice.h" #include "vec/core/block.h" namespace doris { +#include "common/compile_check_begin.h" namespace io { class FileSystem; } // namespace io @@ -113,7 +115,7 @@ Status SpillReader::read(Block* block, bool* eos) { if (bytes_read > 0) { { SCOPED_TIMER(deserialize_timer_); - if (!pb_block_.ParseFromArray(result.data, result.size)) { + if (!pb_block_.ParseFromArray(result.data, cast_set(result.size))) { return Status::InternalError("Failed to read spilled block"); } RETURN_IF_ERROR(block->deserialize(pb_block_)); diff --git a/be/src/vec/spill/spill_reader.h b/be/src/vec/spill/spill_reader.h index 6694bf91572e7c..6dea8376206df9 100644 --- a/be/src/vec/spill/spill_reader.h +++ b/be/src/vec/spill/spill_reader.h @@ -28,6 +28,7 @@ #include "util/runtime_profile.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" class Block; class SpillReader { public: @@ -78,4 +79,5 @@ class SpillReader { using SpillReaderUPtr = std::unique_ptr; -} // namespace doris::vectorized \ No newline at end of file +} // namespace doris::vectorized +#include "common/compile_check_end.h" diff --git a/be/src/vec/spill/spill_stream.cpp b/be/src/vec/spill/spill_stream.cpp index 7189fad262c465..d83cbabb1893d9 100644 --- a/be/src/vec/spill/spill_stream.cpp +++ b/be/src/vec/spill/spill_stream.cpp @@ -34,6 +34,7 @@ #include "vec/spill/spill_writer.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" SpillStream::SpillStream(RuntimeState* state, int64_t stream_id, SpillDataDir* data_dir, std::string spill_dir, size_t batch_rows, size_t batch_bytes, RuntimeProfile* profile) diff --git a/be/src/vec/spill/spill_stream.h b/be/src/vec/spill/spill_stream.h index ad30a0bbd1d21b..a95a4760322703 100644 --- a/be/src/vec/spill/spill_stream.h +++ b/be/src/vec/spill/spill_stream.h @@ -24,6 +24,7 @@ #include "vec/spill/spill_writer.h" namespace doris { +#include "common/compile_check_begin.h" class RuntimeProfile; class ThreadPool; @@ -109,4 +110,5 @@ class SpillStream { }; using SpillStreamSPtr = std::shared_ptr; } // namespace vectorized -} // namespace doris \ No newline at end of file +} // namespace doris +#include "common/compile_check_end.h" diff --git a/be/src/vec/spill/spill_stream_manager.cpp b/be/src/vec/spill/spill_stream_manager.cpp index 61e96559d23628..89bd09b7fadd06 100644 --- a/be/src/vec/spill/spill_stream_manager.cpp +++ b/be/src/vec/spill/spill_stream_manager.cpp @@ -41,6 +41,7 @@ #include "vec/spill/spill_stream.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" SpillStreamManager::SpillStreamManager( std::unordered_map>&& @@ -350,8 +351,8 @@ Status SpillDataDir::update_capacity() { &_available_bytes)); spill_disk_capacity->set_value(_disk_capacity_bytes); spill_disk_avail_capacity->set_value(_available_bytes); - auto disk_use_max_bytes = (int64_t)(_disk_capacity_bytes * - config::storage_flood_stage_usage_percent / (double)100); + auto disk_use_max_bytes = + (int64_t)(_disk_capacity_bytes * config::storage_flood_stage_usage_percent / 100); bool is_percent = true; _spill_data_limit_bytes = ParseUtil::parse_mem_spec(config::spill_storage_limit, -1, _disk_capacity_bytes, &is_percent); @@ -363,9 +364,8 @@ Status SpillDataDir::update_capacity() { return Status::InvalidArgument(err_msg); } if (is_percent) { - _spill_data_limit_bytes = - (int64_t)(_spill_data_limit_bytes * config::storage_flood_stage_usage_percent / - (double)100); + _spill_data_limit_bytes = (int64_t)(_spill_data_limit_bytes * + config::storage_flood_stage_usage_percent / 100); } if (_spill_data_limit_bytes > disk_use_max_bytes) { _spill_data_limit_bytes = disk_use_max_bytes; diff --git a/be/src/vec/spill/spill_stream_manager.h b/be/src/vec/spill/spill_stream_manager.h index 66c71724f162fe..7bcfe9500979b9 100644 --- a/be/src/vec/spill/spill_stream_manager.h +++ b/be/src/vec/spill/spill_stream_manager.h @@ -28,6 +28,7 @@ #include "util/threadpool.h" #include "vec/spill/spill_stream.h" namespace doris { +#include "common/compile_check_begin.h" class RuntimeProfile; namespace vectorized { @@ -77,7 +78,7 @@ class SpillDataDir { double _get_disk_usage(int64_t incoming_data_size) const { return _disk_capacity_bytes == 0 ? 0 - : (_disk_capacity_bytes - _available_bytes + incoming_data_size) / + : (double)(_disk_capacity_bytes - _available_bytes + incoming_data_size) / (double)_disk_capacity_bytes; } @@ -146,4 +147,5 @@ class SpillStreamManager { std::atomic_uint64_t id_ = 0; }; } // namespace vectorized -} // namespace doris \ No newline at end of file +} // namespace doris +#include "common/compile_check_end.h" diff --git a/be/src/vec/spill/spill_writer.cpp b/be/src/vec/spill/spill_writer.cpp index 46a97285802ea2..bf755e76452f5d 100644 --- a/be/src/vec/spill/spill_writer.cpp +++ b/be/src/vec/spill/spill_writer.cpp @@ -27,6 +27,7 @@ #include "vec/spill/spill_stream_manager.h" namespace doris::vectorized { +#include "common/compile_check_begin.h" Status SpillWriter::open() { if (file_writer_) { return Status::OK(); diff --git a/be/src/vec/spill/spill_writer.h b/be/src/vec/spill/spill_writer.h index d77bbd6908c4ab..921bd8ea874c7f 100644 --- a/be/src/vec/spill/spill_writer.h +++ b/be/src/vec/spill/spill_writer.h @@ -25,6 +25,7 @@ #include "util/runtime_profile.h" #include "vec/core/block.h" namespace doris { +#include "common/compile_check_begin.h" class RuntimeState; namespace vectorized { @@ -86,3 +87,5 @@ class SpillWriter { using SpillWriterUPtr = std::unique_ptr; } // namespace vectorized } // namespace doris + +#include "common/compile_check_end.h" diff --git a/be/test/olap/delete_bitmap_calculator_test.cpp b/be/test/olap/delete_bitmap_calculator_test.cpp index 7e527078613cd2..ee54a061363d5c 100644 --- a/be/test/olap/delete_bitmap_calculator_test.cpp +++ b/be/test/olap/delete_bitmap_calculator_test.cpp @@ -103,7 +103,8 @@ class DeleteBitmapCalculatorTest : public testing::Test { io::FileWriterPtr file_writer; Status st = fs->create_file(path, &file_writer); EXPECT_TRUE(st.ok()); - SegmentWriter writer(file_writer.get(), segment_id, build_schema, nullptr, nullptr, opts); + SegmentWriter writer(file_writer.get(), segment_id, build_schema, nullptr, nullptr, opts, + nullptr); st = writer.init(); EXPECT_TRUE(st.ok()); @@ -127,7 +128,8 @@ class DeleteBitmapCalculatorTest : public testing::Test { EXPECT_NE("", writer.min_encoded_key().to_string()); EXPECT_NE("", writer.max_encoded_key().to_string()); - st = segment_v2::Segment::open(fs, path, segment_id, rowset_id, query_schema, + int64_t tablet_id = 100; + st = segment_v2::Segment::open(fs, path, tablet_id, segment_id, rowset_id, query_schema, io::FileReaderOptions {}, res); EXPECT_TRUE(st.ok()); EXPECT_EQ(nrows, (*res)->num_rows()); diff --git a/be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_test.cpp b/be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_test.cpp new file mode 100644 index 00000000000000..922a77fcaa4e47 --- /dev/null +++ b/be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_test.cpp @@ -0,0 +1,443 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include + +#include "olap/cumulative_compaction.h" +#include "olap/rowset/beta_rowset_writer.h" +#include "olap/rowset/rowset_factory.h" +#include "olap/rowset/segment_v2/inverted_index/query/query_factory.h" +#include "olap/rowset/segment_v2/inverted_index_file_reader.h" +#include "olap/storage_engine.h" + +namespace doris { + +using namespace doris::vectorized; + +constexpr static uint32_t MAX_PATH_LEN = 1024; +constexpr static std::string_view dest_dir = "./ut_dir/inverted_index_test"; +constexpr static std::string_view tmp_dir = "./ut_dir/tmp"; +static int64_t inc_id = 1000; + +struct DataRow { + int key; + std::string word; + std::string url; + int num; +}; + +class IndexCompactionTest : public ::testing::Test { +protected: + void SetUp() override { + // absolute dir + char buffer[MAX_PATH_LEN]; + EXPECT_NE(getcwd(buffer, MAX_PATH_LEN), nullptr); + _curreent_dir = std::string(buffer); + _absolute_dir = _curreent_dir + std::string(dest_dir); + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(_absolute_dir).ok()); + EXPECT_TRUE(io::global_local_filesystem()->create_directory(_absolute_dir).ok()); + + // tmp dir + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(tmp_dir).ok()); + EXPECT_TRUE(io::global_local_filesystem()->create_directory(tmp_dir).ok()); + std::vector paths; + paths.emplace_back(std::string(tmp_dir), 1024000000); + auto tmp_file_dirs = std::make_unique(paths); + EXPECT_TRUE(tmp_file_dirs->init().ok()); + ExecEnv::GetInstance()->set_tmp_file_dir(std::move(tmp_file_dirs)); + + // storage engine + doris::EngineOptions options; + auto engine = std::make_unique(options); + _engine_ref = engine.get(); + _data_dir = std::make_unique(*_engine_ref, _absolute_dir); + static_cast(_data_dir->update_capacity()); + ExecEnv::GetInstance()->set_storage_engine(std::move(engine)); + + // tablet_schema + TabletSchemaPB schema_pb; + schema_pb.set_keys_type(KeysType::DUP_KEYS); + schema_pb.set_inverted_index_storage_format(InvertedIndexStorageFormatPB::V2); + + construct_column(schema_pb.add_column(), schema_pb.add_index(), 10000, "key_index", 0, + "INT", "key"); + construct_column(schema_pb.add_column(), schema_pb.add_index(), 10001, "v1_index", 1, + "STRING", "v1"); + construct_column(schema_pb.add_column(), schema_pb.add_index(), 10002, "v2_index", 2, + "STRING", "v2", true); + construct_column(schema_pb.add_column(), schema_pb.add_index(), 10003, "v3_index", 3, "INT", + "v3"); + + _tablet_schema.reset(new TabletSchema); + _tablet_schema->init_from_pb(schema_pb); + + // tablet + TabletMetaSharedPtr tablet_meta(new TabletMeta(_tablet_schema)); + + _tablet.reset(new Tablet(*_engine_ref, tablet_meta, _data_dir.get())); + EXPECT_TRUE(_tablet->init().ok()); + config::inverted_index_compaction_enable = true; + } + void TearDown() override { + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(_tablet->tablet_path()).ok()); + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(_absolute_dir).ok()); + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(tmp_dir).ok()); + _engine_ref = nullptr; + ExecEnv::GetInstance()->set_storage_engine(nullptr); + } + + void construct_column(ColumnPB* column_pb, TabletIndexPB* tablet_index, int64_t index_id, + const std::string& index_name, int32_t col_unique_id, + const std::string& column_type, const std::string& column_name, + bool parser = false) { + column_pb->set_unique_id(col_unique_id); + column_pb->set_name(column_name); + column_pb->set_type(column_type); + column_pb->set_is_key(false); + column_pb->set_is_nullable(true); + tablet_index->set_index_id(index_id); + tablet_index->set_index_name(index_name); + tablet_index->set_index_type(IndexType::INVERTED); + tablet_index->add_col_unique_id(col_unique_id); + if (parser) { + auto* properties = tablet_index->mutable_properties(); + (*properties)[INVERTED_INDEX_PARSER_KEY] = INVERTED_INDEX_PARSER_UNICODE; + } + } + + RowsetWriterContext rowset_writer_context() { + RowsetWriterContext context; + RowsetId rowset_id; + rowset_id.init(inc_id); + context.rowset_id = rowset_id; + context.rowset_type = BETA_ROWSET; + context.data_dir = _data_dir.get(); + context.rowset_state = VISIBLE; + context.tablet_schema = _tablet_schema; + context.tablet_path = _tablet->tablet_path(); + context.version = Version(inc_id, inc_id); + context.max_rows_per_segment = 200; + inc_id++; + return context; + } + + IndexCompactionTest() = default; + ~IndexCompactionTest() override = default; + +private: + TabletSchemaSPtr _tablet_schema = nullptr; + StorageEngine* _engine_ref = nullptr; + std::unique_ptr _data_dir = nullptr; + TabletSharedPtr _tablet = nullptr; + std::string _absolute_dir; + std::string _curreent_dir; +}; + +std::vector read_data(const std::string file_name) { + std::ifstream file(file_name); + EXPECT_TRUE(file.is_open()); + + std::string line; + std::vector data; + + while (std::getline(file, line)) { + std::stringstream ss(line); + std::string item; + DataRow row; + EXPECT_TRUE(std::getline(ss, item, ',')); + row.key = std::stoi(item); + EXPECT_TRUE(std::getline(ss, item, ',')); + row.word = item; + EXPECT_TRUE(std::getline(ss, item, ',')); + row.url = item; + EXPECT_TRUE(std::getline(ss, item, ',')); + row.num = std::stoi(item); + data.emplace_back(std::move(row)); + } + + file.close(); + return data; +} + +bool query_bkd(const TabletIndex* index, + std::shared_ptr& inverted_index_file_reader, + const std::vector& query_data, const std::vector& query_result) { + const auto& idx_reader = BkdIndexReader::create_shared(index, inverted_index_file_reader); + const auto& index_searcher_builder = std::make_unique(); + auto dir = inverted_index_file_reader->open(index); + EXPECT_TRUE(dir.has_value()); + auto searcher_result = index_searcher_builder->get_index_searcher(dir.value().release()); + EXPECT_TRUE(searcher_result.has_value()); + auto bkd_searcher = std::get_if(&searcher_result.value()); + EXPECT_TRUE(bkd_searcher != nullptr); + idx_reader->_type_info = get_scalar_type_info((FieldType)(*bkd_searcher)->type); + EXPECT_TRUE(idx_reader->_type_info != nullptr); + idx_reader->_value_key_coder = get_key_coder(idx_reader->_type_info->type()); + + for (int i = 0; i < query_data.size(); i++) { + vectorized::Field param_value = Int32(query_data[i]); + std::unique_ptr query_param = nullptr; + EXPECT_TRUE(segment_v2::InvertedIndexQueryParamFactory::create_query_value( + PrimitiveType::TYPE_INT, ¶m_value, query_param) + .ok()); + auto result = std::make_shared(); + EXPECT_TRUE(idx_reader + ->invoke_bkd_query(query_param->get_value(), + InvertedIndexQueryType::EQUAL_QUERY, *bkd_searcher, + result) + .ok()); + EXPECT_EQ(query_result[i], result->cardinality()) << query_data[i]; + } + return true; +} + +bool query_string(const TabletIndex* index, + std::shared_ptr& inverted_index_file_reader, + const std::string& column_name, const std::vector& query_data, + const std::vector& query_result) { + const auto& idx_reader = + StringTypeInvertedIndexReader::create_shared(index, inverted_index_file_reader); + const auto& index_searcher_builder = std::make_unique(); + auto dir = inverted_index_file_reader->open(index); + EXPECT_TRUE(dir.has_value()); + auto searcher_result = index_searcher_builder->get_index_searcher(dir.value().release()); + EXPECT_TRUE(searcher_result.has_value()); + auto string_searcher = std::get_if(&searcher_result.value()); + EXPECT_TRUE(string_searcher != nullptr); + std::wstring column_name_ws = StringUtil::string_to_wstring(column_name); + + for (int i = 0; i < query_data.size(); i++) { + TQueryOptions queryOptions; + auto query = QueryFactory::create(InvertedIndexQueryType::EQUAL_QUERY, *string_searcher, + queryOptions); + EXPECT_TRUE(query != nullptr); + InvertedIndexQueryInfo query_info; + query_info.field_name = column_name_ws; + query_info.terms.emplace_back(query_data[i]); + query->add(query_info); + auto result = std::make_shared(); + query->search(*result); + EXPECT_EQ(query_result[i], result->cardinality()) << query_data[i]; + } + return true; +} + +bool query_fulltext(const TabletIndex* index, + std::shared_ptr& inverted_index_file_reader, + const std::string& column_name, const std::vector& query_data, + const std::vector& query_result) { + const auto& idx_reader = FullTextIndexReader::create_shared(index, inverted_index_file_reader); + const auto& index_searcher_builder = std::make_unique(); + auto dir = inverted_index_file_reader->open(index); + EXPECT_TRUE(dir.has_value()); + auto searcher_result = index_searcher_builder->get_index_searcher(dir.value().release()); + EXPECT_TRUE(searcher_result.has_value()); + auto string_searcher = std::get_if(&searcher_result.value()); + EXPECT_TRUE(string_searcher != nullptr); + std::wstring column_name_ws = StringUtil::string_to_wstring(column_name); + + for (int i = 0; i < query_data.size(); i++) { + TQueryOptions queryOptions; + auto query = QueryFactory::create(InvertedIndexQueryType::MATCH_ANY_QUERY, *string_searcher, + queryOptions); + EXPECT_TRUE(query != nullptr); + InvertedIndexQueryInfo query_info; + query_info.field_name = column_name_ws; + query_info.terms.emplace_back(query_data[i]); + query->add(query_info); + auto result = std::make_shared(); + query->search(*result); + EXPECT_EQ(query_result[i], result->cardinality()) << query_data[i]; + } + return true; +} + +TEST_F(IndexCompactionTest, write_index_test) { + EXPECT_TRUE(io::global_local_filesystem()->delete_directory(_tablet->tablet_path()).ok()); + EXPECT_TRUE(io::global_local_filesystem()->create_directory(_tablet->tablet_path()).ok()); + std::string data_file1 = + _curreent_dir + "/be/test/olap/rowset/segment_v2/inverted_index/data/data1.csv"; + std::string data_file2 = + _curreent_dir + "/be/test/olap/rowset/segment_v2/inverted_index/data/data2.csv"; + + std::vector> data; + data.emplace_back(read_data(data_file1)); + data.emplace_back(read_data(data_file2)); + + std::vector rowsets(data.size()); + for (int i = 0; i < data.size(); i++) { + const auto& res = + RowsetFactory::create_rowset_writer(*_engine_ref, rowset_writer_context(), false); + EXPECT_TRUE(res.has_value()) << res.error(); + const auto& rowset_writer = res.value(); + + Block block = _tablet_schema->create_block(); + auto columns = block.mutate_columns(); + for (const auto& row : data[i]) { + vectorized::Field key = Int32(row.key); + vectorized::Field v1 = row.word; + vectorized::Field v2 = row.url; + vectorized::Field v3 = Int32(row.num); + columns[0]->insert(key); + columns[1]->insert(v1); + columns[2]->insert(v2); + columns[3]->insert(v3); + } + EXPECT_TRUE(rowset_writer->add_block(&block).ok()); + EXPECT_TRUE(rowset_writer->flush().ok()); + const auto& dst_writer = dynamic_cast(rowset_writer.get()); + + // inverted index file writer + for (const auto& [seg_id, idx_file_writer] : dst_writer->_idx_files.get_file_writers()) { + EXPECT_TRUE(idx_file_writer->_closed); + } + + EXPECT_TRUE(rowset_writer->build(rowsets[i]).ok()); + EXPECT_TRUE(_tablet->add_rowset(rowsets[i]).ok()); + EXPECT_TRUE(rowsets[i]->num_segments() == 5); + + // check rowset meta and file + for (int seg_id = 0; seg_id < rowsets[i]->num_segments(); seg_id++) { + const auto& index_info = rowsets[i]->_rowset_meta->inverted_index_file_info(seg_id); + EXPECT_TRUE(index_info.has_index_size()); + const auto& fs = rowsets[i]->_rowset_meta->fs(); + const auto& file_name = fmt::format("{}/{}_{}.idx", rowsets[i]->tablet_path(), + rowsets[i]->rowset_id().to_string(), seg_id); + int64_t file_size = 0; + EXPECT_TRUE(fs->file_size(file_name, &file_size).ok()); + EXPECT_EQ(index_info.index_size(), file_size); + + const auto& seg_path = rowsets[i]->segment_path(seg_id); + EXPECT_TRUE(seg_path.has_value()); + const auto& index_file_path_prefix = + InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value()); + auto inverted_index_file_reader = std::make_shared( + fs, std::string(index_file_path_prefix), + _tablet_schema->get_inverted_index_storage_format(), index_info); + EXPECT_TRUE(inverted_index_file_reader->init().ok()); + const auto& dirs = inverted_index_file_reader->get_all_directories(); + EXPECT_TRUE(dirs.has_value()); + EXPECT_EQ(dirs.value().size(), 4); + } + } + + CumulativeCompaction compaction(*_engine_ref, _tablet); + compaction._input_rowsets = std::move(rowsets); + compaction.build_basic_info(); + + std::vector input_rs_readers; + input_rs_readers.reserve(compaction._input_rowsets.size()); + for (auto& rowset : compaction._input_rowsets) { + RowsetReaderSharedPtr rs_reader; + EXPECT_TRUE(rowset->create_reader(&rs_reader).ok()); + input_rs_readers.push_back(std::move(rs_reader)); + } + + RowsetWriterContext ctx; + EXPECT_TRUE(compaction.construct_output_rowset_writer(ctx).ok()); + + // col word + EXPECT_TRUE(ctx.columns_to_do_index_compaction.contains(1)); + // col url + EXPECT_TRUE(ctx.columns_to_do_index_compaction.contains(2)); + + compaction._stats.rowid_conversion = compaction._rowid_conversion.get(); + EXPECT_TRUE(Merger::vertical_merge_rowsets(_tablet, compaction.compaction_type(), + *(compaction._cur_tablet_schema), input_rs_readers, + compaction._output_rs_writer.get(), 100000, 5, + &compaction._stats) + .ok()); + const auto& dst_writer = + dynamic_cast(compaction._output_rs_writer.get()); + for (const auto& [seg_id, idx_file_writer] : dst_writer->_idx_files.get_file_writers()) { + EXPECT_FALSE(idx_file_writer->_closed); + } + auto st = compaction.do_inverted_index_compaction(); + EXPECT_TRUE(st.ok()) << st.to_string(); + + st = compaction._output_rs_writer->build(compaction._output_rowset); + EXPECT_TRUE(st.ok()) << st.to_string(); + + for (const auto& [seg_id, idx_file_writer] : dst_writer->_idx_files.get_file_writers()) { + EXPECT_TRUE(idx_file_writer->_closed); + } + EXPECT_TRUE(compaction._output_rowset->num_segments() == 1); + + const auto& output_rowset = compaction._output_rowset; + + // check rowset meta and file + for (int seg_id = 0; seg_id < output_rowset->num_segments(); seg_id++) { + // meta + const auto& index_info = output_rowset->_rowset_meta->inverted_index_file_info(seg_id); + EXPECT_TRUE(index_info.has_index_size()); + const auto& fs = output_rowset->_rowset_meta->fs(); + const auto& file_name = fmt::format("{}/{}_{}.idx", output_rowset->tablet_path(), + output_rowset->rowset_id().to_string(), seg_id); + int64_t file_size = 0; + EXPECT_TRUE(fs->file_size(file_name, &file_size).ok()); + EXPECT_EQ(index_info.index_size(), file_size); + + // file + const auto& seg_path = output_rowset->segment_path(seg_id); + EXPECT_TRUE(seg_path.has_value()); + const auto& index_file_path_prefix = + InvertedIndexDescriptor::get_index_file_path_prefix(seg_path.value()); + auto inverted_index_file_reader = std::make_shared( + fs, std::string(index_file_path_prefix), + _tablet_schema->get_inverted_index_storage_format(), index_info); + EXPECT_TRUE(inverted_index_file_reader->init().ok()); + const auto& dirs = inverted_index_file_reader->get_all_directories(); + EXPECT_TRUE(dirs.has_value()); + EXPECT_EQ(dirs.value().size(), 4); + + // read col key + const auto& key = _tablet_schema->column_by_uid(0); + const auto* key_index = _tablet_schema->get_inverted_index(key); + EXPECT_TRUE(key_index != nullptr); + std::vector query_data {99, 66, 56, 87, 85, 96, 20000}; + std::vector query_result {21, 25, 22, 18, 14, 18, 0}; + EXPECT_TRUE(query_bkd(key_index, inverted_index_file_reader, query_data, query_result)); + + // read col v3 + const auto& v3_column = _tablet_schema->column_by_uid(3); + const auto* v3_index = _tablet_schema->get_inverted_index(v3_column); + EXPECT_TRUE(v3_index != nullptr); + std::vector query_data3 {99, 66, 56, 87, 85, 96, 10000}; + std::vector query_result3 {12, 20, 25, 23, 16, 24, 0}; + EXPECT_TRUE(query_bkd(v3_index, inverted_index_file_reader, query_data3, query_result3)); + + // read col v1 + const auto& v1_column = _tablet_schema->column_by_uid(1); + const auto* v1_index = _tablet_schema->get_inverted_index(v1_column); + EXPECT_TRUE(v1_index != nullptr); + std::vector query_data1 {"good", "maybe", "great", "null"}; + std::vector query_result1 {197, 191, 194, 0}; + EXPECT_TRUE(query_string(v1_index, inverted_index_file_reader, "1", query_data1, + query_result1)); + + // read col v2 + const auto& v2_column = _tablet_schema->column_by_uid(2); + const auto* v2_index = _tablet_schema->get_inverted_index(v2_column); + EXPECT_TRUE(v2_index != nullptr); + std::vector query_data2 {"musicstream.com", "http", "https", "null"}; + std::vector query_result2 {191, 799, 1201, 0}; + EXPECT_TRUE(query_fulltext(v2_index, inverted_index_file_reader, "2", query_data2, + query_result2)); + } +} + +} // namespace doris diff --git a/be/test/olap/rowset/segment_v2/inverted_index/index_compaction_test.cpp b/be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_write_index_test.cpp similarity index 96% rename from be/test/olap/rowset/segment_v2/inverted_index/index_compaction_test.cpp rename to be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_write_index_test.cpp index 1343cfd6e34612..ad24b174e7529f 100644 --- a/be/test/olap/rowset/segment_v2/inverted_index/index_compaction_test.cpp +++ b/be/test/olap/rowset/segment_v2/inverted_index/compaction/index_compaction_write_index_test.cpp @@ -51,7 +51,7 @@ CL_NS_USE(util) namespace doris::segment_v2 { -class IndexCompactionTest : public testing::Test { +class IndexCompactionWriteIndexTest : public testing::Test { public: const std::string kTestDir = "./ut_dir/index_compress_test"; @@ -65,8 +65,8 @@ class IndexCompactionTest : public testing::Test { EXPECT_TRUE(io::global_local_filesystem()->delete_directory(kTestDir).ok()); } - IndexCompactionTest() = default; - ~IndexCompactionTest() override = default; + IndexCompactionWriteIndexTest() = default; + ~IndexCompactionWriteIndexTest() override = default; static constexpr int32_t doc_count = 100000; }; @@ -167,7 +167,7 @@ static void index_compaction(const std::string& path, }) } -TEST_F(IndexCompactionTest, test_compaction_exception) { +TEST_F(IndexCompactionWriteIndexTest, test_compaction_exception) { std::srand(getDaySeed()); std::string name = "field_name"; diff --git a/be/test/olap/rowset/segment_v2/inverted_index/data/data1.csv b/be/test/olap/rowset/segment_v2/inverted_index/data/data1.csv new file mode 100644 index 00000000000000..fa4e2129584999 --- /dev/null +++ b/be/test/olap/rowset/segment_v2/inverted_index/data/data1.csv @@ -0,0 +1,1000 @@ +89,fine,https://musicstream.com,97 +44,good,https://yourblog.net,74 +72,ok,https://musicstream.com,80 +14,yes,https://shoponline.com,50 +47,maybe,https://github.com,16 +97,maybe,http://forum.com,88 +17,yes,https://musicstream.com,13 +87,good,https://musicstream.com,53 +6,excellent,http://forum.com,49 +4,terrible,https://yourblog.net,58 +56,bad,https://google.com,10 +5,no,http://example.com,93 +30,bad,http://news.com,28 +30,maybe,https://musicstream.com,13 +83,bad,https://yourblog.net,48 +41,great,https://videosite.com,73 +10,terrible,http://news.com,6 +80,no,https://shoponline.com,21 +92,fine,https://github.com,20 +71,terrible,http://forum.com,8 +84,good,http://mysite.org,77 +25,no,http://example.com,71 +52,maybe,http://mysite.org,0 +99,great,http://news.com,74 +59,ok,http://mysite.org,46 +47,ok,http://mysite.org,26 +77,bad,https://musicstream.com,73 +53,excellent,https://github.com,97 +97,ok,http://example.com,69 +88,great,https://musicstream.com,47 +31,fine,https://videosite.com,1 +66,great,https://google.com,67 +16,yes,http://forum.com,43 +31,yes,http://news.com,49 +35,good,https://github.com,48 +11,terrible,https://google.com,56 +53,maybe,http://mysite.org,87 +16,bad,https://google.com,77 +55,terrible,http://news.com,33 +51,yes,https://google.com,68 +71,good,https://yourblog.net,71 +32,great,https://github.com,37 +22,maybe,http://example.com,16 +36,ok,http://forum.com,52 +97,good,https://yourblog.net,100 +54,no,https://videosite.com,90 +38,terrible,https://shoponline.com,17 +81,excellent,https://yourblog.net,11 +66,great,http://mysite.org,4 +27,fine,https://shoponline.com,88 +48,good,https://musicstream.com,16 +78,good,https://shoponline.com,63 +34,bad,https://shoponline.com,77 +19,yes,https://shoponline.com,98 +39,terrible,https://github.com,86 +83,excellent,http://news.com,63 +38,no,https://google.com,64 +43,terrible,https://yourblog.net,30 +30,fine,https://videosite.com,72 +73,great,http://mysite.org,31 +96,good,http://example.com,16 +25,yes,http://mysite.org,80 +98,fine,https://shoponline.com,46 +60,fine,https://musicstream.com,66 +90,yes,http://news.com,80 +41,excellent,https://github.com,56 +86,no,http://forum.com,31 +81,ok,http://forum.com,71 +42,bad,https://musicstream.com,97 +90,terrible,https://videosite.com,57 +22,yes,http://forum.com,15 +34,excellent,https://shoponline.com,77 +30,no,https://yourblog.net,85 +51,ok,http://news.com,52 +28,terrible,http://example.com,37 +23,yes,http://forum.com,83 +42,no,http://forum.com,15 +83,maybe,http://forum.com,10 +59,ok,https://github.com,100 +83,maybe,http://mysite.org,78 +4,good,https://videosite.com,68 +63,good,http://forum.com,71 +61,ok,http://mysite.org,66 +83,great,http://example.com,75 +82,no,https://google.com,95 +71,terrible,https://musicstream.com,19 +42,bad,http://mysite.org,66 +14,great,https://google.com,83 +4,yes,https://yourblog.net,84 +49,great,https://github.com,2 +2,yes,http://mysite.org,84 +33,excellent,http://forum.com,78 +5,good,http://example.com,53 +5,great,https://yourblog.net,86 +80,yes,http://mysite.org,85 +8,great,https://shoponline.com,84 +26,no,https://github.com,56 +82,fine,https://shoponline.com,39 +0,bad,https://videosite.com,49 +30,terrible,https://videosite.com,41 +76,maybe,http://news.com,60 +44,maybe,http://example.com,33 +9,terrible,http://forum.com,93 +25,ok,https://yourblog.net,62 +45,great,https://google.com,17 +5,terrible,http://news.com,95 +49,good,http://example.com,97 +29,great,https://shoponline.com,69 +73,good,https://google.com,23 +94,yes,http://forum.com,25 +58,bad,https://videosite.com,32 +98,excellent,https://musicstream.com,36 +25,good,http://example.com,56 +14,ok,https://yourblog.net,34 +30,fine,http://example.com,38 +47,yes,http://mysite.org,71 +20,fine,http://news.com,1 +89,bad,https://google.com,50 +8,fine,https://google.com,64 +21,maybe,https://musicstream.com,11 +12,fine,https://videosite.com,48 +27,maybe,https://musicstream.com,2 +76,ok,https://musicstream.com,30 +38,excellent,https://videosite.com,54 +57,terrible,https://yourblog.net,22 +21,good,https://google.com,39 +32,good,https://yourblog.net,68 +47,terrible,https://google.com,49 +90,good,http://mysite.org,21 +5,yes,http://news.com,33 +49,no,http://news.com,54 +76,maybe,http://news.com,16 +24,maybe,http://forum.com,17 +77,good,http://example.com,92 +28,excellent,http://example.com,27 +27,no,https://github.com,52 +55,ok,http://forum.com,97 +85,bad,https://shoponline.com,43 +24,terrible,http://news.com,39 +30,excellent,http://mysite.org,72 +70,maybe,http://mysite.org,76 +14,good,https://videosite.com,32 +73,no,http://news.com,25 +29,terrible,http://mysite.org,45 +52,great,http://news.com,11 +26,yes,http://forum.com,17 +30,excellent,http://news.com,88 +95,great,https://musicstream.com,63 +33,great,https://github.com,70 +26,great,https://github.com,17 +76,great,https://shoponline.com,94 +93,bad,https://google.com,29 +48,no,https://yourblog.net,93 +42,yes,https://videosite.com,71 +4,fine,https://github.com,62 +34,maybe,http://forum.com,78 +36,yes,https://google.com,39 +48,bad,https://shoponline.com,17 +84,no,https://videosite.com,11 +87,ok,https://musicstream.com,51 +7,ok,https://google.com,73 +81,bad,https://github.com,91 +92,ok,https://google.com,82 +15,terrible,http://forum.com,64 +6,excellent,https://google.com,70 +35,bad,http://example.com,48 +96,maybe,https://shoponline.com,96 +25,fine,http://news.com,43 +9,no,http://mysite.org,14 +66,yes,https://videosite.com,87 +76,maybe,https://musicstream.com,13 +51,good,http://forum.com,1 +31,great,http://news.com,51 +72,fine,http://news.com,70 +63,no,http://forum.com,5 +73,great,https://yourblog.net,81 +13,terrible,https://github.com,27 +98,excellent,https://videosite.com,29 +71,good,http://news.com,68 +89,no,https://musicstream.com,60 +68,excellent,http://news.com,54 +76,good,https://google.com,79 +69,good,http://mysite.org,99 +35,good,http://news.com,70 +97,yes,https://yourblog.net,63 +44,ok,https://yourblog.net,4 +73,terrible,http://news.com,36 +37,yes,https://github.com,61 +26,fine,https://videosite.com,41 +37,excellent,https://musicstream.com,9 +18,yes,https://github.com,81 +54,excellent,https://shoponline.com,52 +73,great,https://yourblog.net,67 +19,bad,http://example.com,86 +1,terrible,http://mysite.org,16 +62,fine,https://musicstream.com,64 +61,excellent,https://shoponline.com,93 +38,bad,https://videosite.com,61 +55,great,http://news.com,35 +47,great,https://yourblog.net,42 +36,good,https://yourblog.net,40 +26,ok,http://example.com,58 +40,great,https://github.com,0 +81,fine,https://musicstream.com,92 +50,yes,https://github.com,21 +98,maybe,https://google.com,2 +37,bad,https://videosite.com,90 +39,excellent,https://yourblog.net,31 +81,ok,https://musicstream.com,90 +10,bad,http://news.com,18 +93,good,http://example.com,93 +65,fine,http://news.com,72 +10,yes,https://yourblog.net,77 +5,good,http://forum.com,52 +75,fine,https://github.com,67 +40,excellent,http://example.com,27 +21,good,http://example.com,0 +70,fine,http://forum.com,71 +72,no,https://videosite.com,87 +86,no,http://example.com,63 +5,yes,http://example.com,3 +5,fine,http://example.com,68 +36,excellent,https://videosite.com,77 +3,bad,https://github.com,12 +40,bad,http://example.com,53 +33,maybe,https://shoponline.com,3 +80,maybe,https://musicstream.com,49 +42,ok,https://musicstream.com,53 +17,ok,https://yourblog.net,54 +94,yes,https://musicstream.com,20 +16,great,https://videosite.com,42 +32,great,https://shoponline.com,96 +48,ok,http://mysite.org,34 +30,no,http://forum.com,99 +17,excellent,http://forum.com,57 +73,bad,http://forum.com,55 +24,no,https://videosite.com,97 +99,no,http://forum.com,96 +91,excellent,https://musicstream.com,85 +72,excellent,https://videosite.com,60 +74,maybe,http://mysite.org,1 +65,good,https://videosite.com,16 +58,maybe,http://forum.com,9 +97,good,http://example.com,68 +39,good,http://news.com,29 +30,no,http://forum.com,96 +99,maybe,http://mysite.org,64 +66,terrible,https://yourblog.net,0 +72,good,http://example.com,59 +85,terrible,https://yourblog.net,69 +41,great,https://github.com,97 +95,good,https://yourblog.net,42 +56,good,https://shoponline.com,43 +57,good,http://news.com,9 +68,good,http://example.com,80 +80,yes,https://videosite.com,14 +95,great,https://musicstream.com,43 +2,terrible,http://mysite.org,23 +94,terrible,https://shoponline.com,12 +45,maybe,https://github.com,67 +61,maybe,https://shoponline.com,46 +28,excellent,https://musicstream.com,10 +9,no,http://example.com,36 +75,great,https://google.com,43 +54,maybe,http://news.com,16 +49,bad,https://google.com,62 +33,fine,http://example.com,13 +93,excellent,http://example.com,11 +37,ok,https://videosite.com,67 +92,ok,http://forum.com,33 +1,excellent,https://shoponline.com,100 +87,no,http://mysite.org,0 +89,terrible,http://example.com,11 +26,yes,https://github.com,79 +44,terrible,https://yourblog.net,98 +69,excellent,http://news.com,71 +76,excellent,https://yourblog.net,26 +19,fine,https://google.com,16 +71,great,http://mysite.org,100 +99,no,http://forum.com,20 +98,fine,https://google.com,53 +48,fine,http://news.com,42 +29,maybe,http://news.com,29 +6,terrible,https://videosite.com,40 +77,great,http://forum.com,13 +34,terrible,https://yourblog.net,86 +46,terrible,https://musicstream.com,86 +80,great,https://videosite.com,31 +33,fine,https://google.com,80 +49,excellent,http://example.com,70 +80,ok,http://news.com,49 +72,yes,https://musicstream.com,78 +95,ok,https://videosite.com,4 +70,no,https://github.com,100 +1,yes,https://musicstream.com,35 +26,no,http://mysite.org,45 +72,good,https://videosite.com,28 +23,bad,http://mysite.org,56 +4,great,http://example.com,56 +15,great,https://github.com,52 +89,good,http://example.com,77 +85,ok,http://forum.com,86 +50,fine,http://mysite.org,7 +74,bad,https://videosite.com,78 +1,no,https://github.com,43 +71,maybe,https://github.com,11 +79,no,https://musicstream.com,57 +90,maybe,https://github.com,68 +19,bad,https://github.com,53 +53,good,https://google.com,59 +5,fine,http://mysite.org,67 +34,maybe,http://example.com,76 +40,terrible,https://google.com,50 +99,excellent,http://news.com,9 +77,bad,https://google.com,90 +36,bad,https://videosite.com,75 +85,excellent,https://github.com,40 +8,good,http://mysite.org,17 +43,good,http://mysite.org,79 +15,yes,https://musicstream.com,86 +22,maybe,https://yourblog.net,88 +86,yes,https://github.com,65 +18,terrible,http://news.com,48 +4,terrible,https://yourblog.net,85 +100,fine,https://yourblog.net,3 +41,good,http://forum.com,65 +64,terrible,https://github.com,34 +92,bad,https://google.com,21 +87,excellent,http://mysite.org,37 +30,excellent,https://github.com,14 +39,good,http://example.com,44 +68,yes,https://videosite.com,38 +31,maybe,https://yourblog.net,97 +36,bad,https://shoponline.com,20 +69,yes,https://shoponline.com,94 +45,good,http://mysite.org,30 +84,bad,http://news.com,73 +71,excellent,https://musicstream.com,26 +46,bad,http://mysite.org,4 +98,fine,https://yourblog.net,51 +17,excellent,https://videosite.com,77 +32,excellent,https://yourblog.net,23 +37,good,http://news.com,98 +60,ok,https://musicstream.com,24 +68,ok,https://videosite.com,44 +96,fine,https://musicstream.com,44 +65,yes,https://github.com,81 +25,terrible,https://musicstream.com,61 +54,terrible,https://shoponline.com,72 +5,fine,https://yourblog.net,93 +27,terrible,http://example.com,3 +30,bad,https://google.com,9 +99,excellent,http://forum.com,6 +31,yes,https://google.com,93 +82,good,https://google.com,56 +38,fine,https://google.com,56 +29,ok,https://yourblog.net,42 +91,no,https://google.com,62 +58,good,http://example.com,80 +75,fine,http://example.com,97 +59,maybe,https://google.com,13 +23,ok,http://mysite.org,38 +50,great,https://shoponline.com,43 +0,no,http://forum.com,98 +4,no,https://github.com,47 +20,fine,https://shoponline.com,52 +38,fine,http://example.com,21 +43,bad,http://mysite.org,15 +39,great,https://google.com,65 +92,yes,https://videosite.com,10 +35,terrible,http://mysite.org,53 +58,terrible,https://videosite.com,92 +56,yes,https://musicstream.com,49 +30,ok,https://shoponline.com,45 +24,excellent,https://github.com,68 +33,bad,http://forum.com,3 +87,no,http://forum.com,88 +31,fine,http://mysite.org,32 +40,yes,http://mysite.org,44 +17,yes,http://mysite.org,53 +4,fine,https://shoponline.com,31 +89,bad,https://musicstream.com,86 +24,excellent,https://github.com,97 +58,no,https://github.com,49 +26,ok,http://example.com,62 +59,great,https://shoponline.com,72 +22,yes,https://videosite.com,82 +27,great,http://example.com,24 +1,terrible,https://github.com,25 +2,bad,https://videosite.com,94 +39,good,http://news.com,16 +90,no,http://news.com,25 +6,no,https://yourblog.net,68 +73,ok,http://mysite.org,27 +68,no,https://google.com,64 +26,maybe,http://forum.com,63 +32,excellent,https://musicstream.com,58 +63,fine,https://videosite.com,35 +94,great,https://videosite.com,6 +8,ok,https://yourblog.net,16 +50,ok,https://videosite.com,46 +15,terrible,http://mysite.org,6 +53,excellent,https://github.com,30 +83,maybe,https://github.com,62 +78,good,https://shoponline.com,10 +96,fine,http://news.com,7 +73,ok,https://google.com,90 +95,great,https://shoponline.com,45 +61,excellent,https://github.com,26 +80,no,https://google.com,89 +80,great,http://forum.com,87 +12,excellent,https://musicstream.com,35 +59,great,https://musicstream.com,60 +27,ok,http://example.com,94 +68,maybe,https://google.com,25 +19,terrible,https://yourblog.net,77 +59,yes,http://mysite.org,67 +65,great,https://musicstream.com,70 +38,good,https://google.com,63 +2,no,http://example.com,40 +7,great,http://forum.com,32 +93,great,https://yourblog.net,47 +66,excellent,http://mysite.org,89 +28,excellent,https://shoponline.com,42 +28,no,https://google.com,71 +83,terrible,https://videosite.com,82 +33,good,https://musicstream.com,87 +24,terrible,http://news.com,30 +87,yes,http://example.com,25 +0,ok,http://example.com,87 +93,bad,https://musicstream.com,24 +16,yes,http://mysite.org,63 +81,yes,http://example.com,0 +85,yes,https://videosite.com,96 +62,bad,http://mysite.org,86 +43,great,https://shoponline.com,56 +62,no,https://musicstream.com,45 +35,terrible,http://forum.com,66 +65,ok,https://videosite.com,95 +70,excellent,https://yourblog.net,13 +50,bad,https://google.com,94 +54,no,http://forum.com,44 +1,bad,https://videosite.com,4 +82,yes,https://yourblog.net,9 +96,maybe,https://videosite.com,3 +87,excellent,https://musicstream.com,14 +99,fine,https://google.com,36 +38,bad,http://example.com,71 +21,terrible,http://mysite.org,91 +23,ok,https://yourblog.net,17 +88,good,http://news.com,23 +83,great,https://musicstream.com,68 +25,good,http://news.com,93 +60,fine,https://google.com,89 +45,excellent,http://example.com,21 +59,bad,https://google.com,38 +22,bad,https://shoponline.com,11 +54,maybe,http://example.com,97 +18,great,http://forum.com,35 +37,ok,https://google.com,11 +59,no,http://news.com,68 +5,great,http://example.com,12 +32,yes,https://yourblog.net,15 +91,fine,http://forum.com,12 +40,no,http://news.com,40 +87,terrible,https://videosite.com,28 +15,no,https://videosite.com,52 +63,yes,http://forum.com,39 +81,terrible,http://news.com,36 +53,good,https://google.com,45 +12,excellent,https://shoponline.com,42 +20,ok,http://news.com,78 +65,excellent,http://forum.com,65 +64,maybe,https://shoponline.com,100 +73,no,https://yourblog.net,86 +44,great,http://example.com,58 +7,good,https://yourblog.net,21 +10,good,https://google.com,33 +11,no,https://musicstream.com,24 +59,no,https://github.com,12 +35,yes,http://news.com,65 +78,good,http://example.com,97 +22,no,https://google.com,2 +64,excellent,http://mysite.org,17 +18,ok,https://shoponline.com,21 +91,fine,https://musicstream.com,50 +92,great,http://news.com,9 +13,terrible,https://github.com,17 +13,excellent,https://yourblog.net,43 +93,no,http://mysite.org,80 +90,fine,http://forum.com,59 +33,great,https://videosite.com,54 +94,bad,http://news.com,87 +52,maybe,https://videosite.com,63 +55,terrible,http://forum.com,4 +12,yes,https://videosite.com,5 +50,terrible,http://forum.com,35 +42,bad,https://google.com,77 +69,great,https://shoponline.com,57 +74,maybe,https://google.com,66 +52,ok,http://example.com,86 +18,yes,https://google.com,7 +7,fine,http://forum.com,94 +11,great,https://google.com,69 +76,excellent,https://shoponline.com,79 +20,excellent,https://yourblog.net,98 +60,no,https://videosite.com,21 +52,no,http://forum.com,33 +52,excellent,http://forum.com,57 +48,ok,https://github.com,33 +32,excellent,https://musicstream.com,74 +39,ok,https://yourblog.net,88 +98,excellent,http://example.com,0 +3,excellent,http://example.com,97 +76,excellent,http://mysite.org,16 +44,good,https://videosite.com,45 +51,great,http://mysite.org,96 +44,ok,http://news.com,62 +26,excellent,http://news.com,54 +69,fine,https://shoponline.com,12 +16,excellent,http://forum.com,24 +36,bad,https://musicstream.com,54 +12,good,http://news.com,58 +72,bad,https://videosite.com,87 +2,yes,http://mysite.org,62 +38,yes,https://shoponline.com,29 +31,good,https://yourblog.net,36 +43,bad,https://google.com,34 +72,fine,https://shoponline.com,96 +8,fine,https://yourblog.net,94 +29,ok,https://github.com,90 +53,ok,http://mysite.org,12 +12,excellent,http://forum.com,41 +25,terrible,http://mysite.org,19 +97,bad,https://yourblog.net,32 +2,good,http://mysite.org,3 +96,bad,https://github.com,35 +67,yes,http://forum.com,15 +37,fine,https://google.com,74 +57,no,https://google.com,62 +50,excellent,http://forum.com,21 +40,terrible,https://google.com,29 +68,good,https://shoponline.com,63 +51,maybe,http://news.com,35 +0,great,http://example.com,96 +71,great,http://mysite.org,46 +33,yes,http://news.com,23 +51,great,https://videosite.com,21 +16,excellent,http://example.com,69 +88,no,https://github.com,73 +7,maybe,https://github.com,76 +33,terrible,https://musicstream.com,39 +92,great,https://yourblog.net,47 +57,maybe,https://videosite.com,32 +37,ok,https://videosite.com,20 +12,maybe,https://musicstream.com,84 +72,bad,https://google.com,44 +35,ok,http://mysite.org,4 +17,yes,http://mysite.org,22 +62,excellent,http://news.com,35 +33,bad,https://yourblog.net,66 +45,excellent,https://shoponline.com,38 +99,fine,https://yourblog.net,93 +83,terrible,https://videosite.com,55 +61,fine,https://videosite.com,56 +91,yes,http://forum.com,72 +91,yes,http://news.com,15 +89,fine,http://example.com,17 +32,ok,http://example.com,86 +50,bad,http://mysite.org,65 +12,fine,https://musicstream.com,30 +45,no,http://news.com,75 +42,terrible,https://shoponline.com,95 +48,excellent,http://mysite.org,50 +30,terrible,https://yourblog.net,34 +54,good,http://forum.com,58 +2,excellent,http://mysite.org,89 +15,no,http://news.com,61 +88,fine,https://google.com,76 +96,no,https://musicstream.com,26 +98,fine,https://yourblog.net,18 +52,maybe,https://videosite.com,17 +34,great,https://yourblog.net,20 +80,ok,https://shoponline.com,70 +13,ok,http://news.com,17 +2,terrible,http://forum.com,3 +86,good,http://news.com,68 +51,bad,http://example.com,7 +29,maybe,https://videosite.com,85 +5,no,http://mysite.org,53 +90,ok,http://forum.com,33 +80,fine,https://google.com,62 +52,yes,http://forum.com,2 +38,yes,http://news.com,90 +96,ok,https://videosite.com,9 +13,ok,https://github.com,98 +18,maybe,http://example.com,59 +22,yes,https://videosite.com,40 +36,great,http://example.com,24 +66,terrible,https://shoponline.com,46 +99,maybe,http://example.com,92 +85,good,http://forum.com,96 +14,fine,https://google.com,68 +7,good,http://news.com,75 +37,no,https://shoponline.com,47 +86,excellent,https://yourblog.net,100 +70,no,http://mysite.org,7 +40,yes,http://mysite.org,36 +92,good,https://shoponline.com,92 +79,excellent,http://example.com,80 +58,no,https://yourblog.net,99 +28,yes,https://yourblog.net,36 +81,great,http://mysite.org,36 +14,excellent,https://musicstream.com,23 +18,no,https://musicstream.com,54 +17,great,https://musicstream.com,3 +21,great,https://shoponline.com,15 +10,terrible,https://musicstream.com,68 +94,terrible,http://example.com,13 +2,good,https://google.com,55 +76,yes,https://musicstream.com,84 +10,maybe,https://google.com,99 +97,yes,http://forum.com,42 +63,excellent,https://videosite.com,11 +13,ok,http://example.com,87 +76,great,http://news.com,40 +75,good,https://yourblog.net,98 +35,great,https://shoponline.com,94 +71,maybe,http://mysite.org,6 +34,no,http://forum.com,59 +29,fine,http://example.com,4 +1,yes,http://mysite.org,27 +74,maybe,https://shoponline.com,14 +26,great,http://news.com,84 +30,no,https://videosite.com,1 +21,excellent,https://google.com,81 +74,yes,http://example.com,24 +23,no,https://videosite.com,88 +46,good,https://musicstream.com,77 +62,ok,https://shoponline.com,97 +37,ok,https://yourblog.net,36 +48,excellent,https://yourblog.net,4 +27,excellent,https://videosite.com,33 +24,terrible,https://google.com,17 +1,yes,http://news.com,28 +57,ok,http://example.com,93 +84,good,https://yourblog.net,88 +92,great,https://musicstream.com,61 +64,yes,https://musicstream.com,85 +94,ok,https://yourblog.net,96 +67,ok,http://forum.com,29 +66,great,https://github.com,67 +91,bad,http://example.com,19 +95,great,http://mysite.org,0 +68,ok,http://example.com,67 +3,excellent,https://github.com,49 +61,maybe,https://yourblog.net,100 +19,bad,https://videosite.com,100 +58,excellent,http://forum.com,25 +45,ok,https://yourblog.net,57 +43,maybe,https://shoponline.com,52 +60,terrible,http://example.com,97 +19,bad,https://yourblog.net,76 +26,fine,https://musicstream.com,76 +51,excellent,https://videosite.com,88 +84,fine,http://mysite.org,31 +99,great,https://github.com,30 +60,terrible,http://forum.com,48 +28,ok,https://yourblog.net,58 +23,terrible,https://musicstream.com,37 +41,great,https://shoponline.com,1 +18,ok,https://github.com,6 +39,ok,http://mysite.org,65 +14,terrible,https://google.com,96 +86,fine,http://forum.com,15 +24,yes,https://yourblog.net,1 +86,good,https://github.com,9 +20,good,http://news.com,3 +94,maybe,https://github.com,52 +19,yes,http://forum.com,88 +0,great,https://google.com,37 +5,ok,https://yourblog.net,69 +29,terrible,https://google.com,56 +26,great,https://musicstream.com,89 +64,fine,https://yourblog.net,5 +51,yes,https://yourblog.net,15 +14,excellent,https://google.com,89 +48,ok,https://github.com,60 +77,great,https://musicstream.com,80 +28,bad,https://musicstream.com,46 +8,no,https://videosite.com,32 +48,great,https://google.com,9 +91,ok,https://github.com,12 +82,great,https://videosite.com,2 +99,terrible,http://forum.com,46 +82,maybe,http://forum.com,21 +73,good,http://forum.com,51 +32,bad,https://google.com,30 +52,maybe,http://mysite.org,72 +53,maybe,https://github.com,100 +100,bad,http://example.com,53 +8,ok,http://example.com,28 +62,bad,http://example.com,56 +61,yes,https://shoponline.com,17 +44,bad,http://mysite.org,32 +54,good,https://shoponline.com,16 +4,bad,http://forum.com,75 +65,excellent,http://news.com,4 +72,yes,https://google.com,25 +68,ok,https://shoponline.com,72 +81,excellent,https://yourblog.net,19 +99,good,https://shoponline.com,9 +21,ok,http://news.com,86 +14,maybe,https://videosite.com,83 +63,bad,http://example.com,20 +14,bad,http://example.com,66 +9,no,https://shoponline.com,61 +53,fine,https://shoponline.com,6 +6,no,https://github.com,86 +69,good,https://videosite.com,23 +59,fine,https://musicstream.com,30 +35,maybe,https://github.com,44 +87,fine,https://musicstream.com,17 +55,excellent,https://musicstream.com,82 +22,fine,https://yourblog.net,88 +65,no,https://musicstream.com,22 +90,great,https://google.com,14 +49,good,https://github.com,77 +84,great,https://musicstream.com,10 +31,great,https://github.com,82 +48,terrible,http://news.com,74 +12,fine,http://mysite.org,74 +13,great,http://forum.com,28 +92,no,https://videosite.com,81 +6,no,https://videosite.com,100 +0,good,http://news.com,73 +33,good,https://google.com,38 +51,terrible,https://shoponline.com,17 +72,terrible,https://yourblog.net,52 +47,bad,https://shoponline.com,59 +41,ok,https://yourblog.net,98 +68,great,https://videosite.com,94 +77,good,http://news.com,87 +39,ok,http://example.com,89 +47,excellent,http://example.com,56 +31,bad,http://mysite.org,56 +53,fine,https://shoponline.com,18 +8,yes,https://github.com,63 +99,yes,http://example.com,70 +7,ok,http://example.com,85 +35,bad,https://shoponline.com,14 +73,terrible,https://yourblog.net,84 +26,no,https://musicstream.com,84 +43,bad,https://videosite.com,44 +10,great,https://google.com,48 +53,terrible,https://github.com,55 +58,good,http://forum.com,14 +27,fine,http://news.com,67 +73,ok,http://example.com,9 +28,ok,https://google.com,84 +78,maybe,https://yourblog.net,59 +79,great,https://shoponline.com,64 +16,ok,https://yourblog.net,67 +75,yes,https://musicstream.com,3 +33,good,https://shoponline.com,12 +78,great,https://yourblog.net,30 +90,yes,https://videosite.com,17 +29,ok,http://example.com,40 +94,good,http://example.com,88 +30,maybe,https://videosite.com,93 +57,no,https://google.com,35 +50,no,https://yourblog.net,55 +65,maybe,https://videosite.com,47 +85,maybe,https://google.com,74 +5,maybe,https://yourblog.net,4 +16,terrible,https://google.com,50 +69,yes,https://musicstream.com,45 +47,terrible,http://example.com,4 +27,ok,http://example.com,60 +17,yes,http://example.com,43 +8,yes,https://musicstream.com,98 +75,no,https://github.com,78 +41,great,https://google.com,34 +88,bad,http://example.com,78 +94,good,http://news.com,41 +26,fine,http://forum.com,37 +23,excellent,https://shoponline.com,72 +76,fine,https://yourblog.net,97 +72,terrible,https://shoponline.com,56 +57,fine,https://videosite.com,80 +42,excellent,https://musicstream.com,40 +96,fine,https://shoponline.com,69 +13,fine,https://yourblog.net,22 +2,fine,https://shoponline.com,43 +39,excellent,https://yourblog.net,41 +18,excellent,http://example.com,55 +14,no,https://google.com,52 +30,no,https://yourblog.net,44 +42,good,https://videosite.com,37 +98,yes,https://videosite.com,44 +77,ok,https://musicstream.com,48 +39,ok,https://yourblog.net,41 +60,yes,http://news.com,41 +30,good,https://musicstream.com,61 +33,fine,https://github.com,98 +25,fine,https://videosite.com,45 +43,yes,http://mysite.org,29 +69,maybe,https://musicstream.com,9 +56,bad,https://github.com,83 +29,ok,https://yourblog.net,43 +57,no,https://musicstream.com,99 +72,bad,http://mysite.org,33 +0,terrible,https://videosite.com,70 +43,ok,https://shoponline.com,79 +97,terrible,http://example.com,2 +45,bad,https://musicstream.com,98 +52,no,https://google.com,11 +69,bad,https://videosite.com,14 +52,yes,https://google.com,17 +89,fine,https://github.com,36 +6,yes,https://videosite.com,67 +72,excellent,http://mysite.org,68 +0,maybe,https://google.com,24 +95,yes,https://shoponline.com,62 +40,maybe,https://yourblog.net,14 +41,no,https://videosite.com,87 +58,fine,https://yourblog.net,43 +27,no,http://news.com,49 +46,great,http://example.com,1 +91,maybe,http://news.com,58 +34,no,https://github.com,19 +63,no,http://example.com,71 +100,good,https://musicstream.com,94 +81,yes,https://shoponline.com,46 +95,great,http://forum.com,5 +84,yes,https://shoponline.com,45 +72,excellent,https://github.com,73 +40,no,https://videosite.com,6 +6,excellent,http://news.com,10 +62,great,https://musicstream.com,82 +76,terrible,http://forum.com,4 +13,no,http://mysite.org,86 +36,great,http://example.com,83 +67,maybe,https://yourblog.net,43 +22,excellent,http://example.com,35 +18,great,https://google.com,15 +65,terrible,https://github.com,30 +44,fine,http://forum.com,18 +3,excellent,http://forum.com,37 +10,bad,http://forum.com,65 +17,ok,https://shoponline.com,66 +3,maybe,https://shoponline.com,13 +51,maybe,https://yourblog.net,28 +42,bad,http://news.com,23 +70,excellent,https://yourblog.net,91 +94,bad,https://videosite.com,20 +5,terrible,http://example.com,85 +21,excellent,https://yourblog.net,66 +71,fine,https://yourblog.net,59 +33,terrible,http://news.com,16 +36,bad,https://shoponline.com,86 +45,terrible,https://github.com,94 +62,excellent,https://github.com,42 +80,terrible,http://mysite.org,49 +81,bad,http://news.com,41 +68,bad,http://forum.com,54 +80,bad,https://shoponline.com,15 +63,good,https://videosite.com,34 +67,ok,http://example.com,59 +40,terrible,https://google.com,55 +34,bad,https://yourblog.net,49 +58,no,https://yourblog.net,37 +70,terrible,https://yourblog.net,19 +90,great,http://example.com,78 +5,maybe,https://github.com,96 +34,bad,http://example.com,100 +21,ok,http://news.com,32 +0,no,https://musicstream.com,92 +44,yes,https://musicstream.com,27 +71,bad,https://videosite.com,79 +6,terrible,http://mysite.org,46 +72,excellent,https://musicstream.com,50 +20,maybe,https://google.com,37 +56,great,http://mysite.org,22 +35,maybe,https://shoponline.com,18 +40,terrible,http://mysite.org,33 +40,yes,http://forum.com,39 +95,excellent,https://google.com,58 +31,great,https://musicstream.com,40 +45,good,http://forum.com,46 +59,excellent,http://example.com,44 +11,no,https://videosite.com,33 +78,terrible,https://google.com,92 +70,terrible,https://videosite.com,18 +38,excellent,https://videosite.com,97 +21,yes,http://forum.com,22 +30,fine,http://example.com,46 +64,fine,https://github.com,83 +22,no,https://shoponline.com,92 +14,good,http://forum.com,90 +93,maybe,https://google.com,22 +68,no,https://videosite.com,79 +13,maybe,https://videosite.com,47 +98,great,http://news.com,16 +3,great,https://videosite.com,72 +90,good,https://yourblog.net,78 +85,terrible,http://forum.com,92 +1,fine,https://videosite.com,74 +90,yes,http://forum.com,90 +86,yes,http://example.com,31 +40,terrible,https://github.com,84 +71,yes,https://shoponline.com,69 +4,terrible,https://shoponline.com,15 +74,excellent,http://news.com,97 +97,great,https://github.com,79 +67,great,https://videosite.com,76 +94,bad,https://github.com,51 +26,good,https://github.com,33 +52,fine,https://musicstream.com,17 +43,yes,https://github.com,12 +71,excellent,https://shoponline.com,53 +51,no,http://mysite.org,75 +46,excellent,https://shoponline.com,84 +84,excellent,https://videosite.com,79 +33,maybe,http://mysite.org,45 +17,yes,https://yourblog.net,71 +0,maybe,https://shoponline.com,32 +77,no,https://google.com,24 +5,fine,http://news.com,51 +20,great,https://shoponline.com,58 +26,fine,https://google.com,61 +11,terrible,http://forum.com,85 +14,fine,https://github.com,84 +54,ok,https://google.com,60 +7,excellent,https://google.com,47 +53,no,https://shoponline.com,38 +56,good,https://yourblog.net,35 +60,bad,https://shoponline.com,63 +62,great,https://videosite.com,21 +58,maybe,https://yourblog.net,19 +59,good,https://yourblog.net,11 +40,bad,https://github.com,61 +4,yes,http://example.com,81 +70,great,https://yourblog.net,6 +53,great,https://yourblog.net,37 +50,no,http://example.com,72 +97,ok,https://google.com,16 +83,fine,https://yourblog.net,84 +44,maybe,http://forum.com,38 +77,yes,https://github.com,55 +1,no,https://musicstream.com,22 +35,no,https://google.com,99 +88,excellent,https://yourblog.net,45 +62,ok,http://news.com,16 +51,fine,http://news.com,93 +17,fine,https://musicstream.com,44 +44,maybe,https://videosite.com,32 +6,terrible,https://videosite.com,39 +79,terrible,http://example.com,89 +41,great,https://shoponline.com,95 +63,yes,http://forum.com,92 +34,yes,http://example.com,28 +35,excellent,https://musicstream.com,25 +95,bad,https://shoponline.com,22 +47,maybe,http://example.com,50 +5,terrible,https://musicstream.com,6 +37,fine,http://forum.com,92 +94,good,https://github.com,33 +14,no,http://example.com,95 +63,excellent,http://forum.com,36 +24,fine,https://videosite.com,98 +7,fine,https://yourblog.net,71 +48,good,http://mysite.org,34 +32,fine,https://yourblog.net,31 +48,great,http://news.com,14 +35,no,http://example.com,77 +72,fine,https://google.com,37 +55,excellent,https://google.com,46 +63,great,http://mysite.org,60 +1,excellent,https://github.com,44 +37,fine,https://shoponline.com,72 +97,no,https://yourblog.net,88 +76,excellent,http://mysite.org,11 +88,ok,https://github.com,92 +83,no,http://news.com,44 +44,fine,https://videosite.com,64 +66,fine,https://google.com,87 +31,ok,http://mysite.org,64 +20,good,https://yourblog.net,69 +68,fine,http://forum.com,63 +41,terrible,http://example.com,47 +81,maybe,https://shoponline.com,33 +60,maybe,http://mysite.org,59 +26,good,http://news.com,87 +1,excellent,https://google.com,84 +79,ok,http://example.com,56 +72,excellent,http://mysite.org,64 +0,good,https://videosite.com,1 +97,no,http://news.com,91 +34,terrible,http://forum.com,36 diff --git a/be/test/olap/rowset/segment_v2/inverted_index/data/data2.csv b/be/test/olap/rowset/segment_v2/inverted_index/data/data2.csv new file mode 100644 index 00000000000000..0c8a15bafdd575 --- /dev/null +++ b/be/test/olap/rowset/segment_v2/inverted_index/data/data2.csv @@ -0,0 +1,1000 @@ +37,no,http://example.com,34 +59,ok,https://shoponline.com,9 +44,ok,http://news.com,40 +64,no,https://github.com,37 +61,no,https://musicstream.com,42 +68,great,http://mysite.org,49 +23,good,http://example.com,90 +49,fine,https://yourblog.net,23 +34,excellent,https://musicstream.com,69 +23,maybe,https://yourblog.net,15 +19,great,http://example.com,61 +68,excellent,https://github.com,78 +65,yes,https://shoponline.com,67 +84,excellent,https://videosite.com,8 +45,terrible,http://news.com,96 +13,bad,https://videosite.com,100 +8,great,http://mysite.org,66 +15,fine,http://news.com,18 +35,bad,https://videosite.com,27 +42,bad,https://videosite.com,0 +100,fine,http://forum.com,89 +94,yes,https://shoponline.com,28 +26,good,https://google.com,6 +55,yes,https://google.com,0 +66,excellent,https://google.com,30 +78,terrible,https://github.com,57 +31,excellent,http://news.com,61 +86,fine,http://mysite.org,30 +8,maybe,https://shoponline.com,15 +43,yes,http://mysite.org,57 +79,bad,http://forum.com,26 +28,no,http://mysite.org,64 +29,good,http://mysite.org,25 +17,excellent,https://shoponline.com,87 +72,good,http://forum.com,33 +89,excellent,https://google.com,100 +7,bad,https://google.com,22 +33,excellent,https://github.com,23 +42,fine,https://shoponline.com,79 +96,fine,https://github.com,94 +91,maybe,http://news.com,61 +53,good,http://forum.com,12 +96,yes,http://news.com,33 +20,bad,https://shoponline.com,9 +86,fine,https://musicstream.com,48 +76,maybe,https://google.com,38 +33,yes,https://videosite.com,35 +73,ok,https://videosite.com,40 +41,no,http://news.com,96 +15,bad,http://example.com,44 +18,excellent,https://shoponline.com,11 +46,excellent,https://yourblog.net,71 +87,bad,http://news.com,37 +50,yes,http://news.com,94 +80,excellent,https://musicstream.com,91 +95,maybe,https://google.com,45 +48,terrible,https://github.com,58 +91,ok,http://news.com,90 +42,yes,http://mysite.org,45 +27,excellent,https://github.com,50 +11,ok,https://github.com,61 +61,excellent,https://shoponline.com,54 +1,bad,http://mysite.org,20 +9,yes,http://example.com,30 +81,good,http://news.com,47 +24,ok,https://yourblog.net,52 +1,great,https://yourblog.net,34 +78,ok,http://mysite.org,51 +45,fine,http://news.com,46 +46,excellent,http://mysite.org,63 +18,bad,https://musicstream.com,9 +88,fine,https://github.com,65 +45,terrible,https://videosite.com,36 +79,no,http://forum.com,47 +59,great,https://videosite.com,20 +9,good,http://example.com,38 +17,maybe,https://yourblog.net,55 +52,terrible,http://forum.com,99 +32,maybe,https://videosite.com,12 +81,great,http://example.com,10 +27,yes,https://shoponline.com,5 +29,maybe,https://shoponline.com,32 +80,maybe,https://musicstream.com,4 +15,yes,http://forum.com,26 +58,maybe,https://github.com,39 +13,no,https://shoponline.com,13 +76,great,http://example.com,97 +15,excellent,https://yourblog.net,6 +34,good,http://mysite.org,57 +79,no,https://musicstream.com,85 +49,terrible,https://github.com,90 +92,great,https://google.com,60 +41,great,https://shoponline.com,27 +54,good,http://news.com,2 +24,maybe,http://news.com,57 +3,fine,https://musicstream.com,32 +42,bad,https://shoponline.com,48 +67,good,http://mysite.org,67 +86,ok,https://yourblog.net,6 +19,ok,https://google.com,23 +60,great,http://forum.com,42 +20,good,https://musicstream.com,21 +96,no,http://news.com,94 +4,good,https://videosite.com,85 +4,no,https://github.com,87 +28,bad,https://google.com,21 +85,excellent,https://musicstream.com,78 +80,great,http://mysite.org,18 +22,fine,https://videosite.com,19 +76,maybe,https://github.com,5 +55,bad,https://yourblog.net,49 +50,ok,https://shoponline.com,79 +86,terrible,https://google.com,84 +52,excellent,https://yourblog.net,72 +23,ok,https://musicstream.com,30 +15,excellent,https://videosite.com,23 +1,terrible,https://google.com,23 +59,terrible,http://mysite.org,71 +66,fine,https://musicstream.com,49 +37,bad,http://forum.com,18 +35,ok,https://videosite.com,53 +83,bad,https://google.com,90 +54,excellent,http://news.com,7 +52,fine,http://example.com,71 +51,fine,https://musicstream.com,5 +88,good,http://example.com,5 +0,terrible,https://yourblog.net,91 +72,great,https://videosite.com,34 +8,no,http://forum.com,76 +97,bad,https://yourblog.net,20 +40,fine,https://yourblog.net,76 +29,terrible,http://mysite.org,29 +5,yes,https://shoponline.com,6 +66,maybe,https://musicstream.com,97 +65,fine,https://yourblog.net,70 +3,terrible,http://mysite.org,96 +56,excellent,http://example.com,7 +93,yes,https://google.com,33 +11,terrible,https://github.com,87 +10,yes,http://forum.com,9 +28,terrible,http://mysite.org,40 +85,bad,https://google.com,71 +47,maybe,https://yourblog.net,45 +56,maybe,https://videosite.com,73 +30,terrible,http://mysite.org,22 +66,maybe,http://mysite.org,17 +34,good,https://google.com,56 +0,excellent,http://example.com,54 +24,yes,https://github.com,94 +50,good,https://videosite.com,70 +39,good,http://example.com,62 +41,terrible,http://forum.com,50 +26,great,http://example.com,79 +100,yes,https://google.com,30 +37,great,https://shoponline.com,45 +25,terrible,http://forum.com,72 +82,bad,https://yourblog.net,66 +62,ok,http://news.com,5 +67,excellent,https://musicstream.com,65 +26,maybe,http://mysite.org,37 +77,yes,http://forum.com,100 +32,terrible,https://musicstream.com,49 +20,great,https://yourblog.net,5 +45,maybe,https://google.com,30 +76,good,http://mysite.org,16 +79,great,https://github.com,91 +40,good,https://yourblog.net,83 +89,no,https://yourblog.net,92 +45,yes,http://mysite.org,23 +47,great,http://mysite.org,41 +80,terrible,https://musicstream.com,24 +49,good,https://shoponline.com,100 +85,terrible,https://yourblog.net,3 +31,great,https://github.com,53 +85,fine,https://videosite.com,53 +85,terrible,http://mysite.org,100 +42,bad,https://github.com,73 +93,no,http://forum.com,32 +64,excellent,http://example.com,92 +79,terrible,https://videosite.com,83 +83,bad,http://mysite.org,15 +41,yes,https://shoponline.com,73 +85,yes,http://forum.com,63 +43,yes,http://news.com,23 +27,bad,https://musicstream.com,57 +87,yes,https://google.com,82 +22,fine,https://musicstream.com,0 +68,good,https://shoponline.com,92 +79,excellent,https://videosite.com,39 +68,good,http://forum.com,18 +77,yes,https://github.com,13 +38,great,https://google.com,56 +91,maybe,https://yourblog.net,7 +59,bad,http://news.com,69 +24,great,http://forum.com,53 +91,great,http://news.com,12 +59,bad,https://musicstream.com,15 +14,excellent,https://musicstream.com,40 +79,maybe,http://mysite.org,96 +4,yes,http://forum.com,4 +15,bad,http://news.com,34 +87,good,http://news.com,79 +21,bad,https://github.com,21 +62,terrible,https://google.com,34 +54,ok,https://google.com,24 +57,no,https://videosite.com,9 +40,good,https://shoponline.com,60 +97,maybe,https://google.com,38 +99,no,https://yourblog.net,64 +24,no,https://shoponline.com,82 +17,fine,http://example.com,46 +55,no,https://google.com,47 +47,fine,http://forum.com,54 +17,yes,https://google.com,44 +28,ok,http://news.com,46 +57,excellent,https://google.com,79 +70,yes,https://google.com,36 +46,excellent,https://shoponline.com,71 +30,terrible,https://videosite.com,16 +55,excellent,https://shoponline.com,69 +90,maybe,https://videosite.com,73 +57,terrible,https://videosite.com,20 +7,ok,http://news.com,23 +53,yes,http://news.com,73 +16,bad,https://github.com,56 +35,ok,https://musicstream.com,97 +82,excellent,https://videosite.com,66 +22,yes,https://videosite.com,59 +70,no,http://mysite.org,57 +32,yes,https://shoponline.com,39 +78,yes,https://github.com,33 +70,ok,http://news.com,0 +38,fine,https://shoponline.com,58 +93,no,https://shoponline.com,34 +51,excellent,https://musicstream.com,11 +77,bad,http://example.com,52 +78,ok,http://forum.com,65 +30,bad,https://videosite.com,20 +41,ok,https://github.com,9 +68,yes,https://videosite.com,17 +88,terrible,http://news.com,99 +15,terrible,https://github.com,55 +38,ok,http://forum.com,78 +8,maybe,http://mysite.org,67 +77,bad,http://forum.com,46 +98,terrible,http://news.com,93 +42,maybe,https://videosite.com,57 +23,yes,https://musicstream.com,51 +79,great,http://mysite.org,27 +83,fine,http://example.com,8 +4,yes,http://news.com,48 +99,yes,http://forum.com,51 +40,no,https://google.com,59 +5,terrible,http://example.com,80 +33,great,https://yourblog.net,4 +36,maybe,https://videosite.com,36 +66,maybe,https://shoponline.com,30 +46,maybe,https://musicstream.com,30 +36,great,http://news.com,3 +79,no,http://forum.com,31 +77,excellent,https://google.com,30 +66,terrible,https://google.com,19 +12,yes,https://google.com,28 +13,terrible,https://yourblog.net,40 +46,yes,http://example.com,59 +91,terrible,https://videosite.com,84 +99,maybe,https://videosite.com,29 +100,fine,https://yourblog.net,81 +35,no,http://mysite.org,19 +11,no,https://google.com,28 +71,terrible,http://forum.com,98 +91,great,https://musicstream.com,87 +89,bad,https://videosite.com,28 +79,yes,https://musicstream.com,88 +48,fine,https://yourblog.net,4 +26,ok,https://yourblog.net,66 +77,great,https://shoponline.com,8 +80,fine,https://musicstream.com,37 +63,fine,http://forum.com,100 +95,excellent,http://news.com,90 +65,great,http://mysite.org,71 +21,yes,http://mysite.org,76 +84,terrible,https://shoponline.com,100 +38,ok,https://videosite.com,80 +17,maybe,https://shoponline.com,62 +51,bad,http://mysite.org,18 +71,excellent,https://github.com,60 +77,yes,https://musicstream.com,18 +1,bad,https://musicstream.com,55 +48,terrible,https://google.com,80 +15,bad,https://shoponline.com,68 +68,ok,https://videosite.com,7 +54,bad,https://musicstream.com,72 +79,excellent,https://videosite.com,53 +69,maybe,http://mysite.org,52 +66,bad,https://github.com,18 +86,no,https://shoponline.com,27 +30,great,https://google.com,90 +44,maybe,http://forum.com,80 +90,maybe,https://google.com,49 +84,good,https://github.com,18 +51,good,http://forum.com,69 +68,bad,http://mysite.org,17 +50,good,https://github.com,8 +39,great,http://news.com,45 +53,bad,https://shoponline.com,75 +45,yes,https://github.com,59 +70,bad,http://news.com,4 +83,ok,http://forum.com,98 +33,great,https://shoponline.com,48 +83,bad,https://yourblog.net,97 +34,yes,http://example.com,41 +78,yes,https://shoponline.com,70 +49,yes,http://mysite.org,9 +59,maybe,http://mysite.org,88 +75,good,http://example.com,96 +94,fine,http://forum.com,57 +53,good,http://news.com,66 +18,fine,https://musicstream.com,54 +81,yes,http://mysite.org,0 +63,fine,https://musicstream.com,57 +9,good,http://example.com,35 +93,bad,http://news.com,84 +28,no,https://videosite.com,4 +66,ok,http://example.com,42 +12,terrible,https://github.com,20 +95,yes,http://news.com,82 +58,excellent,https://yourblog.net,26 +21,terrible,https://musicstream.com,16 +23,excellent,https://github.com,56 +21,bad,https://github.com,33 +90,excellent,http://forum.com,65 +39,yes,https://google.com,76 +18,good,http://mysite.org,81 +44,good,http://example.com,71 +98,fine,http://news.com,21 +20,ok,http://news.com,62 +25,bad,https://shoponline.com,72 +2,fine,https://yourblog.net,37 +47,terrible,https://videosite.com,3 +18,no,https://yourblog.net,54 +32,bad,http://news.com,58 +22,terrible,https://videosite.com,54 +56,yes,https://shoponline.com,17 +56,no,http://forum.com,96 +5,yes,https://github.com,10 +12,bad,http://example.com,41 +22,no,http://example.com,80 +45,good,https://shoponline.com,35 +42,yes,http://mysite.org,24 +36,ok,https://google.com,80 +28,excellent,http://example.com,4 +5,good,https://shoponline.com,81 +68,great,https://yourblog.net,0 +20,yes,http://example.com,30 +75,good,https://yourblog.net,45 +81,no,http://mysite.org,88 +8,no,https://github.com,78 +41,terrible,https://github.com,59 +10,terrible,https://videosite.com,38 +42,fine,http://news.com,91 +27,terrible,https://shoponline.com,2 +9,terrible,http://mysite.org,30 +57,yes,http://forum.com,29 +67,excellent,https://github.com,92 +67,great,https://videosite.com,21 +88,bad,https://google.com,63 +86,terrible,https://github.com,30 +16,fine,http://news.com,80 +77,good,https://videosite.com,95 +16,ok,http://mysite.org,20 +8,terrible,http://example.com,34 +71,excellent,https://google.com,59 +31,maybe,https://shoponline.com,74 +57,no,http://mysite.org,21 +73,fine,https://shoponline.com,49 +88,great,http://forum.com,61 +89,fine,http://mysite.org,38 +84,fine,https://yourblog.net,24 +50,excellent,https://videosite.com,6 +82,good,https://github.com,96 +72,bad,https://yourblog.net,18 +55,no,http://example.com,22 +100,excellent,http://example.com,94 +50,maybe,http://mysite.org,14 +88,fine,http://forum.com,13 +56,ok,https://musicstream.com,69 +46,excellent,https://musicstream.com,14 +22,yes,https://github.com,57 +19,maybe,https://google.com,19 +68,excellent,http://example.com,11 +23,yes,https://videosite.com,64 +19,bad,https://shoponline.com,72 +91,yes,https://github.com,41 +99,good,http://mysite.org,91 +39,bad,https://github.com,46 +10,great,http://example.com,82 +72,bad,http://example.com,92 +63,bad,https://videosite.com,14 +33,great,http://example.com,37 +18,great,https://github.com,3 +92,ok,http://example.com,43 +37,excellent,http://mysite.org,92 +31,maybe,https://musicstream.com,24 +34,fine,https://yourblog.net,73 +27,good,http://mysite.org,32 +20,excellent,https://google.com,42 +56,ok,http://example.com,67 +80,fine,https://videosite.com,71 +94,great,http://forum.com,33 +89,no,https://videosite.com,19 +69,no,https://github.com,46 +10,excellent,https://yourblog.net,55 +31,no,https://musicstream.com,47 +76,bad,http://mysite.org,39 +77,excellent,https://yourblog.net,44 +28,maybe,https://google.com,14 +63,ok,http://news.com,69 +98,bad,https://musicstream.com,3 +52,maybe,https://google.com,70 +42,fine,http://mysite.org,90 +45,no,http://forum.com,69 +69,great,https://github.com,67 +41,maybe,http://mysite.org,100 +25,terrible,http://forum.com,55 +88,no,https://videosite.com,50 +27,good,http://example.com,45 +18,excellent,https://videosite.com,22 +52,ok,http://mysite.org,48 +25,bad,https://google.com,58 +42,maybe,https://musicstream.com,55 +17,no,http://forum.com,87 +57,maybe,https://shoponline.com,23 +5,terrible,https://musicstream.com,65 +15,maybe,http://news.com,63 +74,good,https://shoponline.com,45 +60,terrible,http://news.com,91 +37,fine,http://news.com,97 +57,good,https://github.com,6 +28,no,https://yourblog.net,15 +3,excellent,https://musicstream.com,60 +63,bad,https://yourblog.net,94 +10,terrible,http://example.com,31 +93,no,https://google.com,3 +97,yes,https://shoponline.com,30 +40,ok,http://mysite.org,51 +52,great,https://google.com,80 +25,maybe,https://yourblog.net,85 +48,no,https://musicstream.com,43 +39,bad,http://mysite.org,34 +71,no,https://shoponline.com,90 +30,great,https://yourblog.net,64 +28,maybe,https://shoponline.com,84 +82,excellent,http://example.com,27 +15,bad,http://mysite.org,84 +5,good,http://mysite.org,18 +32,good,http://example.com,58 +97,yes,https://google.com,86 +35,bad,https://shoponline.com,30 +4,excellent,https://videosite.com,31 +10,terrible,https://videosite.com,54 +28,terrible,https://musicstream.com,43 +45,excellent,https://musicstream.com,87 +57,great,http://mysite.org,67 +15,excellent,http://forum.com,5 +91,maybe,http://forum.com,78 +29,good,https://shoponline.com,89 +50,ok,http://mysite.org,4 +98,yes,http://example.com,41 +20,no,https://videosite.com,68 +7,terrible,http://mysite.org,57 +59,yes,http://example.com,26 +87,good,https://github.com,77 +89,fine,http://forum.com,88 +37,no,https://github.com,19 +52,maybe,https://videosite.com,65 +43,maybe,http://example.com,54 +17,excellent,http://forum.com,67 +0,yes,http://example.com,6 +46,terrible,http://news.com,40 +92,great,http://news.com,54 +7,great,https://github.com,66 +94,terrible,https://videosite.com,9 +55,no,https://github.com,8 +99,ok,https://videosite.com,50 +69,fine,https://videosite.com,79 +56,no,https://google.com,87 +53,yes,https://videosite.com,15 +15,ok,http://example.com,59 +87,excellent,http://forum.com,63 +99,fine,https://shoponline.com,33 +59,maybe,https://google.com,90 +38,fine,https://videosite.com,67 +73,terrible,https://shoponline.com,52 +81,terrible,https://github.com,1 +65,excellent,http://mysite.org,74 +60,ok,https://musicstream.com,35 +40,excellent,https://yourblog.net,0 +64,no,http://news.com,83 +74,good,http://news.com,45 +93,no,https://google.com,80 +72,terrible,http://mysite.org,16 +28,excellent,http://mysite.org,26 +81,maybe,http://forum.com,20 +5,yes,https://shoponline.com,68 +61,terrible,https://yourblog.net,52 +90,ok,http://mysite.org,26 +97,no,http://mysite.org,6 +44,terrible,https://google.com,83 +49,no,https://github.com,64 +81,great,http://forum.com,30 +67,good,https://videosite.com,93 +66,ok,https://google.com,66 +20,fine,https://yourblog.net,45 +53,excellent,https://musicstream.com,20 +27,terrible,https://github.com,36 +90,yes,https://videosite.com,67 +72,ok,https://shoponline.com,41 +100,no,https://github.com,39 +59,yes,http://mysite.org,43 +98,good,https://musicstream.com,99 +96,bad,http://forum.com,17 +71,great,https://shoponline.com,50 +80,great,http://example.com,19 +30,excellent,https://github.com,85 +70,bad,http://news.com,61 +6,bad,https://shoponline.com,57 +51,yes,https://shoponline.com,51 +63,no,http://example.com,37 +24,great,http://forum.com,0 +10,yes,http://mysite.org,52 +38,maybe,http://mysite.org,41 +79,maybe,http://example.com,61 +99,fine,http://example.com,25 +76,ok,https://musicstream.com,73 +79,no,http://news.com,97 +32,yes,https://github.com,64 +29,fine,https://shoponline.com,98 +34,excellent,http://forum.com,74 +19,fine,http://example.com,37 +11,great,http://mysite.org,57 +48,great,http://mysite.org,70 +29,yes,http://forum.com,52 +69,no,https://shoponline.com,45 +39,no,https://musicstream.com,59 +42,fine,http://forum.com,50 +10,yes,http://forum.com,95 +1,bad,https://yourblog.net,95 +93,terrible,https://musicstream.com,24 +96,ok,http://example.com,70 +75,ok,https://google.com,45 +60,terrible,https://videosite.com,96 +77,excellent,http://mysite.org,69 +30,fine,https://yourblog.net,77 +13,maybe,https://shoponline.com,90 +25,good,https://github.com,41 +59,no,https://videosite.com,32 +43,excellent,https://github.com,12 +32,maybe,https://google.com,17 +94,maybe,https://shoponline.com,29 +5,great,http://example.com,94 +48,great,http://example.com,59 +98,terrible,https://yourblog.net,71 +9,excellent,https://github.com,85 +52,maybe,http://example.com,44 +25,excellent,https://musicstream.com,1 +88,bad,http://mysite.org,3 +83,bad,https://google.com,78 +86,fine,https://google.com,76 +8,maybe,https://yourblog.net,61 +95,great,https://shoponline.com,39 +86,terrible,https://github.com,31 +48,bad,https://videosite.com,36 +14,fine,https://google.com,61 +27,no,http://news.com,97 +96,ok,http://news.com,88 +38,no,https://musicstream.com,38 +86,no,https://shoponline.com,48 +90,excellent,https://yourblog.net,72 +68,maybe,http://example.com,0 +100,good,http://news.com,68 +43,good,https://yourblog.net,12 +83,great,http://mysite.org,64 +84,bad,https://yourblog.net,20 +94,yes,https://github.com,15 +33,excellent,https://musicstream.com,53 +65,bad,http://forum.com,9 +25,yes,http://example.com,30 +8,maybe,https://videosite.com,83 +66,maybe,http://example.com,86 +27,terrible,https://yourblog.net,40 +7,good,http://news.com,29 +21,good,https://google.com,14 +35,maybe,https://videosite.com,45 +46,great,https://videosite.com,38 +18,yes,https://github.com,40 +90,good,https://videosite.com,84 +82,great,http://example.com,20 +5,excellent,http://forum.com,100 +30,terrible,http://example.com,63 +60,no,https://google.com,93 +72,maybe,https://google.com,99 +53,maybe,http://forum.com,52 +87,great,http://forum.com,71 +51,bad,https://shoponline.com,71 +22,good,http://forum.com,89 +87,great,http://forum.com,90 +58,excellent,https://github.com,93 +62,no,https://videosite.com,2 +86,maybe,https://shoponline.com,81 +51,excellent,http://news.com,1 +20,no,http://forum.com,72 +51,no,http://news.com,35 +57,good,https://yourblog.net,41 +71,great,http://news.com,33 +51,fine,https://videosite.com,2 +15,yes,https://musicstream.com,14 +74,yes,http://news.com,64 +41,ok,http://mysite.org,21 +58,fine,https://musicstream.com,29 +0,terrible,http://news.com,77 +94,fine,https://github.com,91 +66,maybe,http://news.com,47 +97,no,http://news.com,18 +25,maybe,https://github.com,26 +26,bad,https://yourblog.net,21 +25,great,https://google.com,33 +17,fine,https://google.com,83 +20,good,http://example.com,40 +3,fine,https://shoponline.com,16 +29,yes,http://news.com,23 +71,excellent,https://github.com,30 +61,ok,https://yourblog.net,24 +14,bad,http://mysite.org,73 +37,ok,http://news.com,52 +92,great,http://news.com,40 +70,bad,https://yourblog.net,29 +39,terrible,https://videosite.com,9 +23,ok,http://example.com,38 +52,excellent,https://yourblog.net,46 +56,ok,https://musicstream.com,76 +83,yes,http://forum.com,96 +1,no,http://news.com,27 +72,excellent,http://mysite.org,12 +75,no,http://forum.com,97 +21,ok,https://yourblog.net,87 +41,yes,http://forum.com,44 +89,excellent,http://mysite.org,40 +46,good,https://google.com,39 +75,terrible,https://shoponline.com,88 +51,terrible,https://shoponline.com,33 +15,terrible,http://mysite.org,20 +93,great,https://yourblog.net,92 +45,excellent,https://musicstream.com,7 +23,excellent,https://google.com,81 +54,no,http://example.com,90 +45,great,https://yourblog.net,33 +67,yes,https://github.com,52 +20,good,https://github.com,44 +39,excellent,https://musicstream.com,60 +20,maybe,https://google.com,20 +16,yes,http://forum.com,19 +52,bad,https://yourblog.net,35 +40,fine,https://google.com,86 +43,ok,https://videosite.com,17 +69,terrible,https://shoponline.com,8 +86,terrible,https://github.com,45 +17,no,https://google.com,29 +53,fine,https://shoponline.com,4 +31,great,https://shoponline.com,5 +51,maybe,https://videosite.com,92 +21,bad,https://videosite.com,0 +96,bad,http://example.com,82 +89,bad,http://forum.com,97 +73,terrible,http://mysite.org,17 +58,bad,https://videosite.com,18 +13,excellent,https://musicstream.com,47 +9,good,https://google.com,3 +46,ok,https://musicstream.com,75 +47,excellent,https://yourblog.net,15 +49,good,https://musicstream.com,16 +47,terrible,https://yourblog.net,60 +63,good,https://musicstream.com,19 +56,good,https://videosite.com,97 +88,great,http://news.com,75 +46,terrible,http://mysite.org,61 +51,ok,https://google.com,90 +67,excellent,http://example.com,56 +4,yes,https://musicstream.com,67 +60,excellent,http://news.com,24 +29,no,https://musicstream.com,38 +37,ok,https://musicstream.com,51 +31,yes,http://mysite.org,66 +4,bad,https://google.com,90 +41,good,http://news.com,64 +19,excellent,https://musicstream.com,19 +79,maybe,https://musicstream.com,56 +100,no,http://news.com,97 +64,yes,https://yourblog.net,8 +45,fine,http://news.com,87 +10,terrible,https://yourblog.net,52 +50,excellent,https://musicstream.com,82 +45,great,http://mysite.org,96 +10,no,https://musicstream.com,0 +28,excellent,http://news.com,77 +100,good,https://shoponline.com,72 +3,fine,http://news.com,68 +87,good,https://shoponline.com,93 +16,ok,http://news.com,65 +41,maybe,http://forum.com,99 +81,maybe,http://mysite.org,33 +1,maybe,http://mysite.org,45 +33,great,https://google.com,64 +73,bad,https://google.com,56 +67,terrible,http://example.com,14 +0,no,https://yourblog.net,8 +25,good,https://google.com,65 +4,ok,http://example.com,74 +89,maybe,https://videosite.com,86 +32,great,http://forum.com,27 +58,no,http://forum.com,91 +64,good,https://github.com,45 +32,bad,https://github.com,92 +31,maybe,http://news.com,12 +70,good,https://yourblog.net,77 +25,bad,https://google.com,8 +25,bad,https://musicstream.com,82 +49,good,http://mysite.org,95 +11,excellent,http://forum.com,39 +28,no,https://yourblog.net,62 +48,maybe,http://example.com,12 +12,excellent,https://shoponline.com,44 +19,excellent,https://shoponline.com,22 +77,yes,https://yourblog.net,94 +24,fine,http://news.com,20 +95,great,http://forum.com,51 +21,maybe,http://example.com,58 +66,no,https://google.com,94 +32,great,https://videosite.com,92 +26,good,https://github.com,54 +52,terrible,https://videosite.com,54 +24,good,http://example.com,89 +67,fine,https://musicstream.com,24 +64,bad,http://news.com,79 +40,bad,https://yourblog.net,76 +2,excellent,https://videosite.com,51 +59,no,http://mysite.org,39 +64,ok,https://yourblog.net,82 +56,ok,http://example.com,75 +82,maybe,https://google.com,87 +14,good,http://news.com,24 +23,yes,http://forum.com,16 +45,great,https://shoponline.com,51 +91,fine,https://google.com,18 +60,bad,https://musicstream.com,64 +33,fine,https://shoponline.com,66 +83,fine,http://mysite.org,40 +7,yes,https://github.com,84 +9,no,https://musicstream.com,90 +51,ok,https://google.com,32 +83,bad,http://example.com,63 +2,terrible,https://videosite.com,2 +76,terrible,https://yourblog.net,43 +6,yes,https://videosite.com,26 +98,great,https://shoponline.com,34 +38,ok,http://example.com,14 +90,fine,http://mysite.org,44 +21,fine,http://forum.com,17 +20,yes,http://mysite.org,11 +47,no,https://shoponline.com,19 +69,bad,https://videosite.com,60 +30,excellent,http://forum.com,48 +43,great,https://yourblog.net,35 +94,yes,https://google.com,54 +97,good,http://forum.com,63 +62,excellent,https://videosite.com,44 +42,ok,https://shoponline.com,14 +11,no,http://example.com,80 +80,no,https://videosite.com,60 +2,excellent,https://musicstream.com,64 +49,maybe,https://videosite.com,18 +48,maybe,http://mysite.org,88 +46,bad,https://musicstream.com,65 +86,excellent,https://yourblog.net,90 +75,ok,https://google.com,54 +81,ok,http://mysite.org,64 +59,good,https://github.com,74 +24,good,http://mysite.org,84 +97,excellent,http://example.com,38 +73,excellent,http://mysite.org,98 +78,bad,http://example.com,57 +37,fine,https://videosite.com,84 +89,maybe,http://news.com,12 +89,bad,https://yourblog.net,36 +63,terrible,http://example.com,5 +91,no,https://videosite.com,68 +52,fine,http://forum.com,26 +34,no,https://shoponline.com,51 +38,no,https://google.com,66 +78,ok,https://shoponline.com,59 +14,great,https://shoponline.com,90 +93,good,http://mysite.org,31 +96,ok,https://shoponline.com,93 +73,maybe,https://shoponline.com,9 +5,ok,https://shoponline.com,37 +73,excellent,http://example.com,49 +32,no,https://github.com,39 +61,bad,http://example.com,62 +20,ok,https://musicstream.com,55 +33,yes,https://shoponline.com,2 +31,terrible,https://google.com,58 +1,bad,http://forum.com,78 +62,great,https://yourblog.net,38 +80,great,http://mysite.org,54 +6,terrible,http://mysite.org,49 +45,yes,https://videosite.com,13 +97,fine,https://shoponline.com,71 +18,no,http://mysite.org,91 +94,bad,https://yourblog.net,52 +4,excellent,http://example.com,92 +95,great,http://forum.com,100 +99,no,https://yourblog.net,36 +13,terrible,https://github.com,55 +36,bad,https://yourblog.net,82 +18,fine,https://videosite.com,7 +43,good,https://github.com,60 +83,excellent,https://google.com,14 +0,fine,https://shoponline.com,34 +66,bad,https://videosite.com,14 +66,no,http://news.com,70 +29,good,http://news.com,65 +83,yes,http://news.com,49 +71,terrible,http://forum.com,85 +52,yes,http://example.com,55 +59,no,http://mysite.org,94 +30,no,http://forum.com,38 +98,good,http://example.com,89 +52,ok,https://yourblog.net,71 +100,bad,http://example.com,73 +43,maybe,https://musicstream.com,56 +56,no,https://yourblog.net,33 +30,yes,http://forum.com,71 +19,maybe,https://google.com,15 +33,ok,http://news.com,69 +84,excellent,https://shoponline.com,6 +75,fine,https://musicstream.com,70 +95,great,https://musicstream.com,16 +5,terrible,https://github.com,90 +90,yes,https://videosite.com,10 +58,excellent,https://yourblog.net,0 +4,bad,https://musicstream.com,30 +54,good,http://example.com,20 +56,excellent,https://videosite.com,48 +65,fine,http://example.com,16 +61,bad,http://example.com,69 +87,maybe,http://forum.com,0 +61,bad,https://github.com,16 +0,maybe,https://google.com,60 +88,terrible,http://mysite.org,24 +17,maybe,https://github.com,81 +61,excellent,https://musicstream.com,39 +84,excellent,https://yourblog.net,30 +51,yes,http://news.com,99 +84,excellent,http://news.com,79 +56,yes,https://videosite.com,51 +23,yes,https://github.com,75 +30,no,https://github.com,92 +81,excellent,https://videosite.com,67 +93,ok,http://example.com,13 +82,good,http://forum.com,75 +100,maybe,http://example.com,37 +52,terrible,https://musicstream.com,26 +90,yes,https://videosite.com,8 +7,yes,http://news.com,5 +35,excellent,https://github.com,90 +16,terrible,http://forum.com,82 +7,terrible,https://github.com,45 +18,great,http://example.com,18 +29,maybe,https://yourblog.net,3 +46,good,http://forum.com,83 +1,maybe,http://example.com,27 +31,fine,https://videosite.com,29 +10,bad,https://musicstream.com,86 +68,no,http://example.com,19 +17,great,http://forum.com,93 +17,good,https://videosite.com,54 +83,bad,https://musicstream.com,51 +89,yes,https://yourblog.net,58 +42,fine,https://github.com,19 +16,yes,https://google.com,93 +72,good,https://videosite.com,53 +56,yes,https://google.com,39 +70,maybe,https://musicstream.com,82 +3,fine,https://shoponline.com,25 +71,great,https://videosite.com,100 +8,yes,http://mysite.org,91 +20,yes,https://videosite.com,88 +40,fine,http://example.com,31 +65,good,http://forum.com,3 +23,yes,http://forum.com,69 +32,ok,http://mysite.org,94 +38,terrible,http://example.com,23 +80,good,https://github.com,63 +28,yes,https://videosite.com,11 +92,bad,http://forum.com,83 +67,maybe,https://musicstream.com,70 +98,maybe,http://forum.com,4 +34,maybe,https://videosite.com,68 +61,great,https://google.com,9 +83,fine,https://yourblog.net,100 +35,bad,https://yourblog.net,65 +16,fine,https://videosite.com,5 +90,yes,http://mysite.org,8 +78,terrible,https://shoponline.com,95 +4,yes,https://google.com,47 +4,good,https://musicstream.com,13 +56,terrible,http://example.com,96 +97,no,http://example.com,95 +77,maybe,http://news.com,15 +88,maybe,https://google.com,60 +0,fine,https://shoponline.com,14 +72,no,https://musicstream.com,69 +61,terrible,http://example.com,31 +24,yes,https://github.com,85 +68,great,http://example.com,97 +50,great,https://videosite.com,19 +6,excellent,https://yourblog.net,91 +81,excellent,https://yourblog.net,98 +22,no,http://mysite.org,45 +67,yes,https://yourblog.net,53 +60,ok,https://yourblog.net,20 +44,good,https://yourblog.net,37 +73,ok,https://github.com,4 +54,good,http://example.com,14 +69,good,https://yourblog.net,39 +59,excellent,http://example.com,15 +53,terrible,https://google.com,29 +56,no,https://musicstream.com,46 +76,ok,https://shoponline.com,64 +2,good,https://musicstream.com,74 +51,excellent,http://forum.com,66 +17,terrible,http://news.com,8 +66,maybe,https://shoponline.com,92 +26,fine,http://mysite.org,51 +45,excellent,http://news.com,50 +92,yes,https://videosite.com,93 +34,terrible,https://google.com,50 +40,yes,https://shoponline.com,73 +50,bad,http://forum.com,10 +38,bad,https://google.com,28 +1,terrible,http://mysite.org,97 +7,fine,https://shoponline.com,18 +36,excellent,https://videosite.com,73 +44,no,https://yourblog.net,10 +60,ok,https://videosite.com,45 +59,ok,https://videosite.com,81 +93,yes,http://news.com,75 +73,no,https://github.com,44 +39,yes,https://musicstream.com,71 +46,excellent,https://musicstream.com,25 +0,maybe,http://mysite.org,82 +65,bad,http://mysite.org,27 +57,no,http://example.com,60 +29,maybe,https://google.com,48 +68,maybe,http://example.com,34 +33,excellent,http://forum.com,70 +12,maybe,https://yourblog.net,63 +83,maybe,http://forum.com,43 +25,yes,https://yourblog.net,74 +58,terrible,https://yourblog.net,79 +31,no,http://example.com,72 +21,great,http://mysite.org,74 +5,no,http://forum.com,41 +44,fine,https://github.com,75 +63,great,https://github.com,72 +9,good,https://github.com,93 +96,maybe,https://yourblog.net,83 +18,yes,http://example.com,14 +13,terrible,https://github.com,80 +30,bad,https://yourblog.net,65 +69,yes,http://news.com,30 +19,yes,http://mysite.org,96 +91,fine,http://example.com,46 +68,maybe,https://github.com,37 +2,terrible,https://yourblog.net,41 +33,ok,https://shoponline.com,39 +54,terrible,http://news.com,60 +60,maybe,http://forum.com,89 +76,great,http://mysite.org,37 +66,bad,https://google.com,16 +63,terrible,https://musicstream.com,60 +44,no,https://yourblog.net,61 +66,great,http://example.com,16 +4,good,https://github.com,67 +17,great,https://shoponline.com,100 +84,good,http://mysite.org,56 +29,maybe,https://musicstream.com,32 +19,fine,https://shoponline.com,51 +36,good,http://mysite.org,77 +37,terrible,http://mysite.org,95 diff --git a/be/test/vec/data_types/serde/data_type_serde_arrow_test.cpp b/be/test/vec/data_types/serde/data_type_serde_arrow_test.cpp deleted file mode 100644 index fc692b8f67569e..00000000000000 --- a/be/test/vec/data_types/serde/data_type_serde_arrow_test.cpp +++ /dev/null @@ -1,654 +0,0 @@ - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "gtest/gtest_pred_impl.h" -#include "olap/hll.h" -#include "runtime/descriptors.cpp" -#include "runtime/descriptors.h" -#include "util/arrow/block_convertor.h" -#include "util/arrow/row_batch.h" -#include "util/bitmap_value.h" -#include "util/quantile_state.h" -#include "util/string_parser.hpp" -#include "vec/columns/column.h" -#include "vec/columns/column_array.h" -#include "vec/columns/column_complex.h" -#include "vec/columns/column_decimal.h" -#include "vec/columns/column_map.h" -#include "vec/columns/column_nullable.h" -#include "vec/columns/column_string.h" -#include "vec/columns/column_vector.h" -#include "vec/core/block.h" -#include "vec/core/field.h" -#include "vec/core/types.h" -#include "vec/data_types/data_type.h" -#include "vec/data_types/data_type_array.h" -#include "vec/data_types/data_type_bitmap.h" -#include "vec/data_types/data_type_date.h" -#include "vec/data_types/data_type_date_time.h" -#include "vec/data_types/data_type_decimal.h" -#include "vec/data_types/data_type_hll.h" -#include "vec/data_types/data_type_ipv4.h" -#include "vec/data_types/data_type_ipv6.h" -#include "vec/data_types/data_type_map.h" -#include "vec/data_types/data_type_nullable.h" -#include "vec/data_types/data_type_number.h" -#include "vec/data_types/data_type_quantilestate.h" -#include "vec/data_types/data_type_string.h" -#include "vec/data_types/data_type_struct.h" -#include "vec/data_types/data_type_time_v2.h" -#include "vec/io/io_helper.h" -#include "vec/runtime/vdatetime_value.h" -#include "vec/utils/arrow_column_to_doris_column.h" - -namespace doris::vectorized { - -template -void serialize_and_deserialize_arrow_test() { - vectorized::Block block; - std::vector> cols; - if constexpr (is_scalar) { - cols = { - {"k1", FieldType::OLAP_FIELD_TYPE_INT, 1, TYPE_INT, false}, - {"k7", FieldType::OLAP_FIELD_TYPE_INT, 7, TYPE_INT, true}, - {"k2", FieldType::OLAP_FIELD_TYPE_STRING, 2, TYPE_STRING, false}, - {"k3", FieldType::OLAP_FIELD_TYPE_DECIMAL128I, 3, TYPE_DECIMAL128I, false}, - {"k11", FieldType::OLAP_FIELD_TYPE_DATETIME, 11, TYPE_DATETIME, false}, - {"k4", FieldType::OLAP_FIELD_TYPE_BOOL, 4, TYPE_BOOLEAN, false}, - {"k5", FieldType::OLAP_FIELD_TYPE_DECIMAL32, 5, TYPE_DECIMAL32, false}, - {"k6", FieldType::OLAP_FIELD_TYPE_DECIMAL64, 6, TYPE_DECIMAL64, false}, - {"k12", FieldType::OLAP_FIELD_TYPE_DATETIMEV2, 12, TYPE_DATETIMEV2, false}, - {"k8", FieldType::OLAP_FIELD_TYPE_IPV4, 8, TYPE_IPV4, false}, - {"k9", FieldType::OLAP_FIELD_TYPE_IPV6, 9, TYPE_IPV6, false}, - }; - } else { - cols = {{"a", FieldType::OLAP_FIELD_TYPE_ARRAY, 6, TYPE_ARRAY, true}, - {"m", FieldType::OLAP_FIELD_TYPE_MAP, 8, TYPE_MAP, true}, - {"s", FieldType::OLAP_FIELD_TYPE_STRUCT, 5, TYPE_STRUCT, true}}; - } - - int row_num = 7; - // make desc and generate block - TupleDescriptor tuple_desc(PTupleDescriptor(), true); - for (auto t : cols) { - TSlotDescriptor tslot; - std::string col_name = std::get<0>(t); - tslot.__set_colName(col_name); - TypeDescriptor type_desc(std::get<3>(t)); - bool is_nullable(std::get<4>(t)); - switch (std::get<3>(t)) { - case TYPE_BOOLEAN: - tslot.__set_slotType(type_desc.to_thrift()); - { - auto vec = vectorized::ColumnVector::create(); - auto& data = vec->get_data(); - for (int i = 0; i < row_num; ++i) { - data.push_back(i % 2); - } - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(vec->get_ptr(), data_type, - col_name); - block.insert(std::move(type_and_name)); - } - break; - case TYPE_INT: - tslot.__set_slotType(type_desc.to_thrift()); - if (is_nullable) { - { - auto column_vector_int32 = vectorized::ColumnVector::create(); - auto column_nullable_vector = - vectorized::make_nullable(std::move(column_vector_int32)); - auto mutable_nullable_vector = std::move(*column_nullable_vector).mutate(); - for (int i = 0; i < row_num; i++) { - if (i % 2 == 0) { - mutable_nullable_vector->insert_default(); - } else { - mutable_nullable_vector->insert(int32(i)); - } - } - auto data_type = vectorized::make_nullable( - std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name( - mutable_nullable_vector->get_ptr(), data_type, col_name); - block.insert(type_and_name); - } - } else { - auto vec = vectorized::ColumnVector::create(); - auto& data = vec->get_data(); - for (int i = 0; i < row_num; ++i) { - data.push_back(i); - } - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(vec->get_ptr(), data_type, - col_name); - block.insert(std::move(type_and_name)); - } - break; - case TYPE_DECIMAL32: - type_desc.precision = 9; - type_desc.scale = 2; - tslot.__set_slotType(type_desc.to_thrift()); - { - vectorized::DataTypePtr decimal_data_type = - std::make_shared>(type_desc.precision, - type_desc.scale); - auto decimal_column = decimal_data_type->create_column(); - auto& data = ((vectorized::ColumnDecimal>*) - decimal_column.get()) - ->get_data(); - for (int i = 0; i < row_num; ++i) { - if (i == 0) { - data.push_back(Int32(0)); - continue; - } - Int32 val; - StringParser::ParseResult result = StringParser::PARSE_SUCCESS; - i % 2 == 0 ? val = StringParser::string_to_decimal( - "1234567.56", 11, type_desc.precision, type_desc.scale, - &result) - : val = StringParser::string_to_decimal( - "-1234567.56", 12, type_desc.precision, type_desc.scale, - &result); - EXPECT_TRUE(result == StringParser::PARSE_SUCCESS); - data.push_back(val); - } - - vectorized::ColumnWithTypeAndName type_and_name(decimal_column->get_ptr(), - decimal_data_type, col_name); - block.insert(type_and_name); - } - break; - case TYPE_DECIMAL64: - type_desc.precision = 18; - type_desc.scale = 6; - tslot.__set_slotType(type_desc.to_thrift()); - { - vectorized::DataTypePtr decimal_data_type = - std::make_shared>(type_desc.precision, - type_desc.scale); - auto decimal_column = decimal_data_type->create_column(); - auto& data = ((vectorized::ColumnDecimal>*) - decimal_column.get()) - ->get_data(); - for (int i = 0; i < row_num; ++i) { - if (i == 0) { - data.push_back(Int64(0)); - continue; - } - Int64 val; - StringParser::ParseResult result = StringParser::PARSE_SUCCESS; - std::string decimal_string = - i % 2 == 0 ? "-123456789012.123456" : "123456789012.123456"; - val = StringParser::string_to_decimal( - decimal_string.c_str(), decimal_string.size(), type_desc.precision, - type_desc.scale, &result); - EXPECT_TRUE(result == StringParser::PARSE_SUCCESS); - data.push_back(val); - } - vectorized::ColumnWithTypeAndName type_and_name(decimal_column->get_ptr(), - decimal_data_type, col_name); - block.insert(type_and_name); - } - break; - case TYPE_DECIMAL128I: - type_desc.precision = 27; - type_desc.scale = 9; - tslot.__set_slotType(type_desc.to_thrift()); - { - vectorized::DataTypePtr decimal_data_type( - doris::vectorized::create_decimal(27, 9, true)); - auto decimal_column = decimal_data_type->create_column(); - auto& data = ((vectorized::ColumnDecimal>*) - decimal_column.get()) - ->get_data(); - for (int i = 0; i < row_num; ++i) { - __int128_t value = __int128_t(i * pow(10, 9) + i * pow(10, 8)); - data.push_back(value); - } - vectorized::ColumnWithTypeAndName type_and_name(decimal_column->get_ptr(), - decimal_data_type, col_name); - block.insert(type_and_name); - } - break; - case TYPE_STRING: - tslot.__set_slotType(type_desc.to_thrift()); - { - auto strcol = vectorized::ColumnString::create(); - for (int i = 0; i < row_num; ++i) { - std::string is = std::to_string(i); - strcol->insert_data(is.c_str(), is.size()); - } - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(strcol->get_ptr(), data_type, - col_name); - block.insert(type_and_name); - } - break; - case TYPE_HLL: - tslot.__set_slotType(type_desc.to_thrift()); - { - vectorized::DataTypePtr hll_data_type(std::make_shared()); - auto hll_column = hll_data_type->create_column(); - std::vector& container = - ((vectorized::ColumnHLL*)hll_column.get())->get_data(); - for (int i = 0; i < row_num; ++i) { - HyperLogLog hll; - hll.update(i); - container.push_back(hll); - } - vectorized::ColumnWithTypeAndName type_and_name(hll_column->get_ptr(), - hll_data_type, col_name); - - block.insert(type_and_name); - } - break; - case TYPE_DATEV2: - tslot.__set_slotType(type_desc.to_thrift()); - { - auto column_vector_date_v2 = vectorized::ColumnVector::create(); - auto& date_v2_data = column_vector_date_v2->get_data(); - for (int i = 0; i < row_num; ++i) { - DateV2Value value; - value.from_date((uint32_t)((2022 << 9) | (6 << 5) | 6)); - date_v2_data.push_back(*reinterpret_cast(&value)); - } - vectorized::DataTypePtr date_v2_type( - std::make_shared()); - vectorized::ColumnWithTypeAndName test_date_v2(column_vector_date_v2->get_ptr(), - date_v2_type, col_name); - block.insert(test_date_v2); - } - break; - case TYPE_DATE: // int64 - tslot.__set_slotType(type_desc.to_thrift()); - { - auto column_vector_date = vectorized::ColumnVector::create(); - auto& date_data = column_vector_date->get_data(); - for (int i = 0; i < row_num; ++i) { - VecDateTimeValue value; - value.from_date_int64(20210501); - date_data.push_back(*reinterpret_cast(&value)); - } - vectorized::DataTypePtr date_type(std::make_shared()); - vectorized::ColumnWithTypeAndName test_date(column_vector_date->get_ptr(), - date_type, col_name); - block.insert(test_date); - } - break; - case TYPE_DATETIME: // int64 - tslot.__set_slotType(type_desc.to_thrift()); - { - auto column_vector_datetime = vectorized::ColumnVector::create(); - auto& datetime_data = column_vector_datetime->get_data(); - for (int i = 0; i < row_num; ++i) { - VecDateTimeValue value; - value.from_date_int64(20210501080910); - datetime_data.push_back(*reinterpret_cast(&value)); - } - vectorized::DataTypePtr datetime_type( - std::make_shared()); - vectorized::ColumnWithTypeAndName test_datetime(column_vector_datetime->get_ptr(), - datetime_type, col_name); - block.insert(test_datetime); - } - break; - case TYPE_DATETIMEV2: // uint64 - tslot.__set_slotType(type_desc.to_thrift()); - { - // 2022-01-01 11:11:11.111 - auto column_vector_datetimev2 = - vectorized::ColumnVector::create(); - // auto& datetimev2_data = column_vector_datetimev2->get_data(); - DateV2Value value; - string date_literal = "2022-01-01 11:11:11.111"; - value.from_date_str(date_literal.c_str(), date_literal.size()); - char to[64] = {}; - std::cout << "value: " << value.to_string(to) << std::endl; - for (int i = 0; i < row_num; ++i) { - column_vector_datetimev2->insert(value.to_date_int_val()); - } - vectorized::DataTypePtr datetimev2_type( - std::make_shared()); - vectorized::ColumnWithTypeAndName test_datetimev2( - column_vector_datetimev2->get_ptr(), datetimev2_type, col_name); - block.insert(test_datetimev2); - } - break; - case TYPE_ARRAY: // array - type_desc.add_sub_type(TYPE_STRING, true); - tslot.__set_slotType(type_desc.to_thrift()); - { - DataTypePtr s = - std::make_shared(std::make_shared()); - DataTypePtr au = std::make_shared(s); - Array a1, a2; - a1.push_back(String("sss")); - a1.push_back(Null()); - a1.push_back(String("clever amory")); - a2.push_back(String("hello amory")); - a2.push_back(Null()); - a2.push_back(String("cute amory")); - a2.push_back(String("sf")); - MutableColumnPtr array_column = au->create_column(); - array_column->reserve(2); - array_column->insert(a1); - array_column->insert(a2); - vectorized::ColumnWithTypeAndName type_and_name(array_column->get_ptr(), au, - col_name); - block.insert(type_and_name); - } - break; - case TYPE_MAP: - type_desc.add_sub_type(TYPE_STRING, true); - type_desc.add_sub_type(TYPE_STRING, true); - tslot.__set_slotType(type_desc.to_thrift()); - { - DataTypePtr s = - std::make_shared(std::make_shared()); - ; - DataTypePtr d = - std::make_shared(std::make_shared()); - DataTypePtr m = std::make_shared(s, d); - Array k1, k2, v1, v2; - k1.push_back("null"); - k1.push_back("doris"); - k1.push_back("clever amory"); - v1.push_back("ss"); - v1.push_back(Null()); - v1.push_back("NULL"); - k2.push_back("hello amory"); - k2.push_back("NULL"); - k2.push_back("cute amory"); - k2.push_back("doris"); - v2.push_back("s"); - v2.push_back("0"); - v2.push_back("sf"); - v2.push_back(Null()); - Map m1, m2; - m1.push_back(k1); - m1.push_back(v1); - m2.push_back(k2); - m2.push_back(v2); - MutableColumnPtr map_column = m->create_column(); - map_column->reserve(2); - map_column->insert(m1); - map_column->insert(m2); - vectorized::ColumnWithTypeAndName type_and_name(map_column->get_ptr(), m, col_name); - block.insert(type_and_name); - } - break; - case TYPE_STRUCT: - type_desc.add_sub_type(TYPE_STRING, "name", true); - type_desc.add_sub_type(TYPE_LARGEINT, "age", true); - type_desc.add_sub_type(TYPE_BOOLEAN, "is", true); - tslot.__set_slotType(type_desc.to_thrift()); - { - DataTypePtr s = - std::make_shared(std::make_shared()); - DataTypePtr d = - std::make_shared(std::make_shared()); - DataTypePtr m = - std::make_shared(std::make_shared()); - DataTypePtr st = - std::make_shared(std::vector {s, d, m}); - Tuple t1, t2; - t1.push_back(String("amory cute")); - t1.push_back(__int128_t(37)); - t1.push_back(true); - t2.push_back("null"); - t2.push_back(__int128_t(26)); - t2.push_back(false); - MutableColumnPtr struct_column = st->create_column(); - struct_column->reserve(2); - struct_column->insert(t1); - struct_column->insert(t2); - vectorized::ColumnWithTypeAndName type_and_name(struct_column->get_ptr(), st, - col_name); - block.insert(type_and_name); - } - break; - case TYPE_IPV4: - tslot.__set_slotType(type_desc.to_thrift()); - { - auto vec = vectorized::ColumnIPv4::create(); - auto& data = vec->get_data(); - for (int i = 0; i < row_num; ++i) { - data.push_back(i); - } - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(vec->get_ptr(), data_type, - col_name); - block.insert(std::move(type_and_name)); - } - break; - case TYPE_IPV6: - tslot.__set_slotType(type_desc.to_thrift()); - { - auto vec = vectorized::ColumnIPv6::create(); - auto& data = vec->get_data(); - for (int i = 0; i < row_num; ++i) { - data.push_back(i); - } - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(vec->get_ptr(), data_type, - col_name); - block.insert(std::move(type_and_name)); - } - break; - default: - break; - } - - tslot.__set_col_unique_id(std::get<2>(t)); - SlotDescriptor* slot = new SlotDescriptor(tslot); - tuple_desc.add_slot(slot); - } - - RowDescriptor row_desc(&tuple_desc, true); - // arrow schema - std::shared_ptr _arrow_schema; - EXPECT_EQ(convert_to_arrow_schema(row_desc, &_arrow_schema, "UTC"), Status::OK()); - - // serialize - std::shared_ptr result; - std::cout << "block data: " << block.dump_data(0, row_num) << std::endl; - std::cout << "_arrow_schema: " << _arrow_schema->ToString(true) << std::endl; - - cctz::time_zone timezone_obj; - TimezoneUtils::find_cctz_time_zone(TimezoneUtils::default_time_zone, timezone_obj); - static_cast(convert_to_arrow_batch(block, _arrow_schema, arrow::default_memory_pool(), - &result, timezone_obj)); - Block new_block = block.clone_empty(); - EXPECT_TRUE(result != nullptr); - std::cout << "result: " << result->ToString() << std::endl; - // deserialize - for (auto t : cols) { - std::string real_column_name = std::get<0>(t); - auto* array = result->GetColumnByName(real_column_name).get(); - auto& column_with_type_and_name = new_block.get_by_name(real_column_name); - if (std::get<3>(t) == PrimitiveType::TYPE_DATE || - std::get<3>(t) == PrimitiveType::TYPE_DATETIME) { - { - auto strcol = vectorized::ColumnString::create(); - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(strcol->get_ptr(), data_type, - real_column_name); - static_cast(arrow_column_to_doris_column( - array, 0, type_and_name.column, type_and_name.type, block.rows(), "UTC")); - { - auto& col = column_with_type_and_name.column.get()->assume_mutable_ref(); - auto& date_data = static_cast&>(col).get_data(); - for (int i = 0; i < strcol->size(); ++i) { - StringRef str = strcol->get_data_at(i); - VecDateTimeValue value; - value.from_date_str(str.data, str.size); - date_data.push_back(*reinterpret_cast(&value)); - } - } - } - continue; - } else if (std::get<3>(t) == PrimitiveType::TYPE_DATEV2) { - auto strcol = vectorized::ColumnString::create(); - vectorized::DataTypePtr data_type(std::make_shared()); - vectorized::ColumnWithTypeAndName type_and_name(strcol->get_ptr(), data_type, - real_column_name); - static_cast(arrow_column_to_doris_column( - array, 0, type_and_name.column, type_and_name.type, block.rows(), "UTC")); - { - auto& col = column_with_type_and_name.column.get()->assume_mutable_ref(); - auto& date_data = static_cast&>(col).get_data(); - for (int i = 0; i < strcol->size(); ++i) { - StringRef str = strcol->get_data_at(i); - DateV2Value value; - value.from_date_str(str.data, str.size); - date_data.push_back(*reinterpret_cast(&value)); - } - } - continue; - } else if (std::get<3>(t) == PrimitiveType::TYPE_DATETIMEV2) { - // now we only support read doris datetimev2 to arrow - block.erase(real_column_name); - new_block.erase(real_column_name); - continue; - } - static_cast(arrow_column_to_doris_column(array, 0, column_with_type_and_name.column, - column_with_type_and_name.type, block.rows(), - "UTC")); - } - - std::cout << block.dump_data() << std::endl; - std::cout << new_block.dump_data() << std::endl; - EXPECT_EQ(block.dump_data(), new_block.dump_data()); -} - -TEST(DataTypeSerDeArrowTest, DataTypeScalaSerDeTest) { - serialize_and_deserialize_arrow_test(); -} - -TEST(DataTypeSerDeArrowTest, DataTypeCollectionSerDeTest) { - serialize_and_deserialize_arrow_test(); -} - -TEST(DataTypeSerDeArrowTest, DataTypeMapNullKeySerDeTest) { - TupleDescriptor tuple_desc(PTupleDescriptor(), true); - TSlotDescriptor tslot; - std::string col_name = "map_null_key"; - tslot.__set_colName(col_name); - TypeDescriptor type_desc(TYPE_MAP); - type_desc.add_sub_type(TYPE_STRING, true); - type_desc.add_sub_type(TYPE_INT, true); - tslot.__set_slotType(type_desc.to_thrift()); - vectorized::Block block; - { - DataTypePtr s = std::make_shared(std::make_shared()); - ; - DataTypePtr d = std::make_shared(std::make_shared()); - DataTypePtr m = std::make_shared(s, d); - Array k1, k2, v1, v2, k3, v3; - k1.push_back("doris"); - k1.push_back("clever amory"); - v1.push_back(Null()); - v1.push_back(30); - k2.push_back("hello amory"); - k2.push_back("NULL"); - k2.push_back("cute amory"); - k2.push_back("doris"); - v2.push_back(26); - v2.push_back(Null()); - v2.push_back(6); - v2.push_back(7); - k3.push_back("test"); - v3.push_back(11); - Map m1, m2, m3; - m1.push_back(k1); - m1.push_back(v1); - m2.push_back(k2); - m2.push_back(v2); - m3.push_back(k3); - m3.push_back(v3); - MutableColumnPtr map_column = m->create_column(); - map_column->reserve(3); - map_column->insert(m1); - map_column->insert(m2); - map_column->insert(m3); - vectorized::ColumnWithTypeAndName type_and_name(map_column->get_ptr(), m, col_name); - block.insert(type_and_name); - } - - tslot.__set_col_unique_id(1); - SlotDescriptor* slot = new SlotDescriptor(tslot); - tuple_desc.add_slot(slot); - RowDescriptor row_desc(&tuple_desc, true); - // arrow schema - std::shared_ptr _arrow_schema; - EXPECT_EQ(convert_to_arrow_schema(row_desc, &_arrow_schema, "UTC"), Status::OK()); - - // serialize - std::shared_ptr result; - std::cout << "block structure: " << block.dump_structure() << std::endl; - std::cout << "_arrow_schema: " << _arrow_schema->ToString(true) << std::endl; - - cctz::time_zone timezone_obj; - TimezoneUtils::find_cctz_time_zone(TimezoneUtils::default_time_zone, timezone_obj); - static_cast(convert_to_arrow_batch(block, _arrow_schema, arrow::default_memory_pool(), - &result, timezone_obj)); - Block new_block = block.clone_empty(); - EXPECT_TRUE(result != nullptr); - std::cout << "result: " << result->ToString() << std::endl; - // deserialize - auto* array = result->GetColumnByName(col_name).get(); - auto& column_with_type_and_name = new_block.get_by_name(col_name); - static_cast(arrow_column_to_doris_column(array, 0, column_with_type_and_name.column, - column_with_type_and_name.type, block.rows(), - "UTC")); - std::cout << block.dump_data() << std::endl; - std::cout << new_block.dump_data() << std::endl; - // new block row_index 0, 2 which row has key null will be filter - EXPECT_EQ(new_block.dump_one_line(0, 1), "{\"doris\":null, \"clever amory\":30}"); - EXPECT_EQ(new_block.dump_one_line(2, 1), "{\"test\":11}"); - EXPECT_EQ(block.dump_data(1, 1), new_block.dump_data(1, 1)); -} - -} // namespace doris::vectorized diff --git a/bin/run-fs-benchmark.sh b/bin/run-fs-benchmark.sh index f4edd4117d01e8..552dfae05e953b 100755 --- a/bin/run-fs-benchmark.sh +++ b/bin/run-fs-benchmark.sh @@ -280,7 +280,7 @@ export LIBHDFS_OPTS="${final_java_opt}" #echo "LIBHDFS_OPTS: ${LIBHDFS_OPTS}" # see https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile -export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" +export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" export AWS_EC2_METADATA_DISABLED=true export AWS_MAX_ATTEMPTS=2 diff --git a/bin/start_be.sh b/bin/start_be.sh index a410912ea06c1d..0ae0914d42ec20 100755 --- a/bin/start_be.sh +++ b/bin/start_be.sh @@ -407,7 +407,7 @@ export LIBHDFS_OPTS="${final_java_opt}" # log "LIBHDFS_OPTS: ${LIBHDFS_OPTS}" if [[ -z ${JEMALLOC_CONF} ]]; then - JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" + JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" fi if [[ -z ${JEMALLOC_PROF_PRFIX} ]]; then diff --git a/build.sh b/build.sh index 1da5df76bb2fdd..35c989d0b0a4ec 100755 --- a/build.sh +++ b/build.sh @@ -271,12 +271,6 @@ else fi fi -ARCH="$(uname -m)" -if [[ "${ARCH}" == "aarch64" ]]; then - echo "WARNING: Cloud module is not supported on ARM platform, will skip building it." - BUILD_CLOUD=0 -fi - if [[ "${HELP}" -eq 1 ]]; then usage fi @@ -448,6 +442,10 @@ if [[ -z "${ENABLE_INJECTION_POINT}" ]]; then ENABLE_INJECTION_POINT='OFF' fi +if [[ -z "${ENABLE_CACHE_LOCK_DEBUG}" ]]; then + ENABLE_CACHE_LOCK_DEBUG='OFF' +fi + if [[ -z "${RECORD_COMPILER_SWITCHES}" ]]; then RECORD_COMPILER_SWITCHES='OFF' fi @@ -494,6 +492,7 @@ echo "Get params: USE_JEMALLOC -- ${USE_JEMALLOC} USE_BTHREAD_SCANNER -- ${USE_BTHREAD_SCANNER} ENABLE_INJECTION_POINT -- ${ENABLE_INJECTION_POINT} + ENABLE_CACHE_LOCK_DEBUG -- ${ENABLE_CACHE_LOCK_DEBUG} DENABLE_CLANG_COVERAGE -- ${DENABLE_CLANG_COVERAGE} DISPLAY_BUILD_TIME -- ${DISPLAY_BUILD_TIME} ENABLE_PCH -- ${ENABLE_PCH} @@ -580,6 +579,7 @@ if [[ "${BUILD_BE}" -eq 1 ]]; then -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \ -DENABLE_INJECTION_POINT="${ENABLE_INJECTION_POINT}" \ + -DENABLE_CACHE_LOCK_DEBUG="${ENABLE_CACHE_LOCK_DEBUG}" \ -DMAKE_TEST=OFF \ -DBUILD_FS_BENCHMARK="${BUILD_FS_BENCHMARK}" \ ${CMAKE_USE_CCACHE:+${CMAKE_USE_CCACHE}} \ @@ -631,6 +631,7 @@ if [[ "${BUILD_CLOUD}" -eq 1 ]]; then -DCMAKE_MAKE_PROGRAM="${MAKE_PROGRAM}" \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \ + -DENABLE_INJECTION_POINT="${ENABLE_INJECTION_POINT}" \ -DMAKE_TEST=OFF \ "${CMAKE_USE_CCACHE}" \ -DUSE_LIBCPP="${USE_LIBCPP}" \ diff --git a/cloud/CMakeLists.txt b/cloud/CMakeLists.txt index 32e60f7bfb5467..627e6f283b91d5 100644 --- a/cloud/CMakeLists.txt +++ b/cloud/CMakeLists.txt @@ -411,32 +411,48 @@ if (${MAKE_TEST} STREQUAL "ON") add_definitions(-DBE_TEST) endif () +if (ENABLE_INJECTION_POINT) + add_definitions(-DENABLE_INJECTION_POINT) +endif() + # Add libs if needed, download to current dir -- ${BUILD_DIR} set(FDB_LIB "fdb_lib_7_1_23.tar.xz") +if (ARCH_AARCH64) + set(FDB_LIB "fdb_lib_7_1_57.aarch64.tar.xz") +endif () file(GLOB RELEASE_FILE_LIST LIST_DIRECTORIES false "/etc/*release*") execute_process(COMMAND "cat" ${RELEASE_FILE_LIST} RESULT_VARIABLE CAT_RET_CODE OUTPUT_VARIABLE CAT_RET_CONTENT) string(TOUPPER "${CAT_RET_CONTENT}" CAT_RET_CONTENT) -if ("${CAT_RET_CONTENT}" MATCHES "UBUNTU") - message("Ubuntu OS") - SET(OS_RELEASE "Ubuntu") - set(FDB_LIB_URL "https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/ubuntu/") - string(APPEND FDB_LIB_URL "${FDB_LIB}") - set(FDB_LIB_MD5SUM "a00fe45da95cfac4e0caffa274bb2b30") -else() - # If it is not ubuntu, it is regarded as centos by default - message("Centos OS") - SET(OS_RELEASE "Centos") - set(FDB_LIB_URL "https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/centos/") - string(APPEND FDB_LIB_URL "${FDB_LIB}") - set(FDB_LIB_MD5SUM "f9839a564849c0232a351143b4340de0") + +if (ARCH_AARCH64) + message("Centos OS") + SET(OS_RELEASE "Centos") + set(FDB_LIB_URL "https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/aarch64/") + string(APPEND FDB_LIB_URL "${FDB_LIB}") + set(FDB_LIB_MD5SUM "2d01a431b7a7465077e4ae5520f89693") +else () + if ("${CAT_RET_CONTENT}" MATCHES "UBUNTU") + message("Ubuntu OS") + SET(OS_RELEASE "Ubuntu") + set(FDB_LIB_URL "https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/ubuntu/") + string(APPEND FDB_LIB_URL "${FDB_LIB}") + set(FDB_LIB_MD5SUM "a00fe45da95cfac4e0caffa274bb2b30") + else() + # If it is not ubuntu, it is regarded as centos by default + message("Centos OS") + SET(OS_RELEASE "Centos") + set(FDB_LIB_URL "https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/centos/") + string(APPEND FDB_LIB_URL "${FDB_LIB}") + set(FDB_LIB_MD5SUM "f9839a564849c0232a351143b4340de0") + endif() endif() if (NOT EXISTS "${THIRDPARTY_SRC}/${FDB_LIB}") file(MAKE_DIRECTORY ${THIRDPARTY_SRC}) - execute_process(COMMAND "curl --retry 10 --retry-delay 2 --retry-max-time 30" "${FDB_LIB_URL}" - "-o" "${THIRDPARTY_SRC}/${FDB_LIB}" "-k" + execute_process(COMMAND curl --retry 10 --retry-delay 2 --retry-max-time 30 ${FDB_LIB_URL} + -o ${THIRDPARTY_SRC}/${FDB_LIB} -k RESULTS_VARIABLE DOWNLOAD_RET) if (NOT ${DOWNLOAD_RET} STREQUAL "0") execute_process(COMMAND "rm" "-rf" "${THIRDPARTY_SRC}/${FDB_LIB}") @@ -454,7 +470,7 @@ if (NOT EXISTS ${THIRDPARTY_DIR}/include/foundationdb) execute_process(COMMAND "rm" "-rf" "${THIRDPARTY_SRC}/${FDB_LIB}") message(FATAL_ERROR "${THIRDPARTY_SRC}/${FDB_LIB} md5sum check failed, remove it") endif () - execute_process(COMMAND "tar" "xf" "${THIRDPARTY_SRC}/${FDB_LIB}" "-C" "${THIRDPARTY_DIR}/") + execute_process(COMMAND tar xf ${THIRDPARTY_SRC}/${FDB_LIB} -C ${THIRDPARTY_DIR}/) endif () # enable glog custom prefix diff --git a/cloud/script/start.sh b/cloud/script/start.sh index 1b1a4a87e53945..1bce9813f4cf52 100644 --- a/cloud/script/start.sh +++ b/cloud/script/start.sh @@ -115,10 +115,10 @@ fi echo "LIBHDFS3_CONF=${LIBHDFS3_CONF}" -# to enable dump jeprof heap stats prodigally, change `prof:false` to `prof:true` +# to enable dump jeprof heap stats prodigally, change `prof_active:false` to `prof_active:true` or curl http://be_host:be_webport/jeheap/prof/true # to control the dump interval change `lg_prof_interval` to a specific value, it is pow/exponent of 2 in size of bytes, default 34 means 2 ** 34 = 16GB # to control the dump path, change `prof_prefix` to a specific path, e.g. /doris_cloud/log/ms_, by default it dumps at the path where the start command called -export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof_prefix:ms_,prof:false,lg_prof_interval:34" +export JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof_prefix:ms_,prof:true,prof_active:false,lg_prof_interval:34" if [[ "${RUN_VERSION}" -ne 0 ]]; then "${bin}" --version diff --git a/cloud/src/common/config.h b/cloud/src/common/config.h index 7caba826520fb3..daeb5ddfee5d34 100644 --- a/cloud/src/common/config.h +++ b/cloud/src/common/config.h @@ -217,5 +217,4 @@ CONF_Int32(max_tablet_index_num_per_batch, "1000"); // Max aborted txn num for the same label name CONF_mInt64(max_num_aborted_txn, "100"); - } // namespace doris::cloud::config diff --git a/cloud/src/meta-service/CMakeLists.txt b/cloud/src/meta-service/CMakeLists.txt index c7c4887a0686c1..d11f87e7fa23d4 100644 --- a/cloud/src/meta-service/CMakeLists.txt +++ b/cloud/src/meta-service/CMakeLists.txt @@ -12,6 +12,7 @@ add_library(MetaService meta_server.cpp meta_service.cpp meta_service_http.cpp + injection_point_http.cpp meta_service_job.cpp meta_service_resource.cpp meta_service_schema.cpp diff --git a/cloud/src/meta-service/injection_point_http.cpp b/cloud/src/meta-service/injection_point_http.cpp new file mode 100644 index 00000000000000..80d1bcfdf2e4d8 --- /dev/null +++ b/cloud/src/meta-service/injection_point_http.cpp @@ -0,0 +1,226 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include "common/config.h" +#include "common/logging.h" +#include "cpp/sync_point.h" +#include "meta-service/keys.h" +#include "meta-service/meta_service_helper.h" +#include "meta-service/txn_kv.h" +#include "meta-service/txn_kv_error.h" +#include "meta_service.h" +#include "meta_service_http.h" + +namespace doris::cloud { + +std::map> suite_map; +std::once_flag register_suites_once; + +inline std::default_random_engine make_random_engine() { + return std::default_random_engine( + static_cast(std::chrono::steady_clock::now().time_since_epoch().count())); +} + +static void register_suites() { + suite_map.emplace("test_txn_lazy_commit", [] { + auto sp = SyncPoint::get_instance(); + + sp->set_call_back("commit_txn_immediately::advance_last_pending_txn_id", [&](auto&& args) { + std::default_random_engine rng = make_random_engine(); + std::uniform_int_distribution u(100, 1000); + uint32_t duration_ms = u(rng); + LOG(INFO) << "commit_txn_immediately::advance_last_pending_txn_id sleep " << duration_ms + << " ms"; + std::this_thread::sleep_for(std::chrono::milliseconds(duration_ms)); + }); + + sp->set_call_back("commit_txn_eventually::txn_lazy_committer_submit", [&](auto&& args) { + std::default_random_engine rng = make_random_engine(); + std::uniform_int_distribution u(100, 1000); + uint32_t duration_ms = u(rng); + LOG(INFO) << "commit_txn_eventually::txn_lazy_committer_submit sleep " << duration_ms + << " ms"; + std::this_thread::sleep_for(std::chrono::milliseconds(duration_ms)); + }); + + sp->set_call_back("commit_txn_eventually::txn_lazy_committer_wait", [&](auto&& args) { + std::default_random_engine rng = make_random_engine(); + std::uniform_int_distribution u(100, 1000); + uint32_t duration_ms = u(rng); + LOG(INFO) << "commit_txn_eventually::txn_lazy_committer_wait sleep " << duration_ms + << " ms"; + std::this_thread::sleep_for(std::chrono::milliseconds(duration_ms)); + }); + + sp->set_call_back("convert_tmp_rowsets::before_commit", [&](auto&& args) { + std::default_random_engine rng = make_random_engine(); + std::uniform_int_distribution u(1, 50); + uint32_t duration_ms = u(rng); + std::this_thread::sleep_for(std::chrono::milliseconds(duration_ms)); + LOG(INFO) << "convert_tmp_rowsets::before_commit sleep " << duration_ms << " ms"; + if (duration_ms <= 25) { + MetaServiceCode* code = try_any_cast(args[0]); + *code = MetaServiceCode::KV_TXN_CONFLICT; + bool* pred = try_any_cast(args.back()); + *pred = true; + LOG(INFO) << "convert_tmp_rowsets::before_commit random_value=" << duration_ms + << " inject kv txn conflict"; + } + }); + }); +} + +HttpResponse set_sleep(const std::string& point, const brpc::URI& uri) { + std::string duration_str(http_query(uri, "duration")); + int64_t duration = 0; + try { + duration = std::stol(duration_str); + } catch (const std::exception& e) { + auto msg = fmt::format("invalid duration:{}", duration_str); + LOG(WARNING) << msg; + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, msg); + } + + auto sp = SyncPoint::get_instance(); + sp->set_call_back(point, [point, duration](auto&& args) { + LOG(INFO) << "injection point hit, point=" << point << " sleep ms=" << duration; + std::this_thread::sleep_for(std::chrono::milliseconds(duration)); + }); + return http_json_reply(MetaServiceCode::OK, "OK"); +} + +HttpResponse set_return(const std::string& point, const brpc::URI& uri) { + auto sp = SyncPoint::get_instance(); + sp->set_call_back(point, [point](auto&& args) { + try { + LOG(INFO) << "injection point hit, point=" << point << " return void"; + auto pred = try_any_cast(args.back()); + *pred = true; + } catch (const std::bad_any_cast& e) { + LOG(ERROR) << "failed to process `return` e:" << e.what(); + } + }); + + return http_json_reply(MetaServiceCode::OK, "OK"); +} + +HttpResponse handle_set(const brpc::URI& uri) { + const std::string point(http_query(uri, "name")); + if (point.empty()) { + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "empty point name"); + } + + const std::string behavior(http_query(uri, "behavior")); + if (behavior.empty()) { + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "empty behavior"); + } + if (behavior == "sleep") { + return set_sleep(point, uri); + } else if (behavior == "return") { + return set_return(point, uri); + } + + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "unknown behavior: " + behavior); +} + +HttpResponse handle_clear(const brpc::URI& uri) { + const std::string point(http_query(uri, "name")); + auto* sp = SyncPoint::get_instance(); + LOG(INFO) << "clear injection point : " << (point.empty() ? "(all points)" : point); + if (point.empty()) { + // If point name is emtpy, clear all + sp->clear_all_call_backs(); + return http_json_reply(MetaServiceCode::OK, "OK"); + } + sp->clear_call_back(point); + return http_json_reply(MetaServiceCode::OK, "OK"); +} + +HttpResponse handle_apply_suite(const brpc::URI& uri) { + const std::string suite(http_query(uri, "name")); + if (suite.empty()) { + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "empty suite name"); + } + + std::call_once(register_suites_once, register_suites); + if (auto it = suite_map.find(suite); it != suite_map.end()) { + it->second(); // set injection callbacks + return http_json_reply(MetaServiceCode::OK, "OK apply suite " + suite + "\n"); + } + + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "unknown suite: " + suite + "\n"); +} + +HttpResponse handle_enable(const brpc::URI& uri) { + SyncPoint::get_instance()->enable_processing(); + return http_json_reply(MetaServiceCode::OK, "OK"); +} + +HttpResponse handle_disable(const brpc::URI& uri) { + SyncPoint::get_instance()->disable_processing(); + return http_json_reply(MetaServiceCode::OK, "OK"); +} + +// +// enable/disable injection point +// ``` +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=enable" +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=disable" +// ``` + +// clear all injection points +// ``` +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=clear" +// ``` + +// apply/activate specific suite with registered action, see `register_suites()` for more details +// ``` +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=apply_suite&name=${suite_name}" +// ``` + +// ``` +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=set +// &name=${injection_point_name}&behavior=sleep&duration=${x_millsec}" # sleep x millisecs + +// curl "ms_ip:port/MetaService/http/v1/injection_point?token=greedisgood9999&op=set +// &name=${injection_point_name}&behavior=return" # return void +// ``` + +HttpResponse process_injection_point(MetaServiceImpl* service, brpc::Controller* ctrl) { + auto& uri = ctrl->http_request().uri(); + LOG(INFO) << "handle InjectionPointAction uri:" << uri; + const std::string op(http_query(uri, "op")); + + if (op == "set") { + return handle_set(uri); + } else if (op == "clear") { + return handle_clear(uri); + } else if (op == "apply_suite") { + return handle_apply_suite(uri); + } else if (op == "enable") { + return handle_enable(uri); + } else if (op == "disable") { + return handle_disable(uri); + } + + return http_json_reply(MetaServiceCode::INVALID_ARGUMENT, "unknown op:" + op); +} +} // namespace doris::cloud \ No newline at end of file diff --git a/cloud/src/meta-service/meta_service.cpp b/cloud/src/meta-service/meta_service.cpp index 7adbc8ccf12aab..10969a0cdb68af 100644 --- a/cloud/src/meta-service/meta_service.cpp +++ b/cloud/src/meta-service/meta_service.cpp @@ -1498,7 +1498,7 @@ void MetaServiceImpl::get_rowset(::google::protobuf::RpcController* controller, if (code != MetaServiceCode::OK) { LOG(WARNING) << "advance_last_txn failed last_txn=" << version_pb.pending_txn_ids(0) << " code=" << code - << "msg=" << msg; + << " msg=" << msg; return; } continue; @@ -2188,4 +2188,54 @@ std::pair MetaServiceImpl::get_instance_info( return {code, std::move(msg)}; } +std::pair init_key_pair(std::string instance_id, int64_t table_id) { + std::string begin_key = stats_tablet_key({instance_id, table_id, 0, 0, 0}); + std::string end_key = stats_tablet_key({instance_id, table_id + 1, 0, 0, 0}); + return std::make_pair(begin_key, end_key); +} + +MetaServiceResponseStatus MetaServiceImpl::fix_tablet_stats(std::string cloud_unique_id_str, + std::string table_id_str) { + // parse params + int64_t table_id; + std::string instance_id; + MetaServiceResponseStatus st = parse_fix_tablet_stats_param( + resource_mgr_, table_id_str, cloud_unique_id_str, table_id, instance_id); + if (st.code() != MetaServiceCode::OK) { + return st; + } + + std::pair key_pair = init_key_pair(instance_id, table_id); + std::string old_begin_key; + while (old_begin_key < key_pair.first) { + // get tablet stats + std::vector> tablet_stat_shared_ptr_vec_batch; + old_begin_key = key_pair.first; + + // fix tablet stats + size_t retry = 0; + do { + st = fix_tablet_stats_internal(txn_kv_, key_pair, tablet_stat_shared_ptr_vec_batch, + instance_id); + if (st.code() != MetaServiceCode::OK) { + LOG_WARNING("failed to fix tablet stats") + .tag("err", st.msg()) + .tag("table id", table_id) + .tag("retry time", retry); + } + retry++; + } while (st.code() != MetaServiceCode::OK && retry < 3); + if (st.code() != MetaServiceCode::OK) { + return st; + } + + // Check tablet stats + st = check_new_tablet_stats(txn_kv_, instance_id, tablet_stat_shared_ptr_vec_batch); + if (st.code() != MetaServiceCode::OK) { + return st; + } + } + return st; +} + } // namespace doris::cloud diff --git a/cloud/src/meta-service/meta_service.h b/cloud/src/meta-service/meta_service.h index f60d795949b366..edc2c97a3eaca8 100644 --- a/cloud/src/meta-service/meta_service.h +++ b/cloud/src/meta-service/meta_service.h @@ -40,6 +40,10 @@ class Transaction; constexpr std::string_view BUILT_IN_STORAGE_VAULT_NAME = "built_in_storage_vault"; +void internal_get_rowset(Transaction* txn, int64_t start, int64_t end, + const std::string& instance_id, int64_t tablet_id, MetaServiceCode& code, + std::string& msg, GetRowsetResponse* response); + class MetaServiceImpl : public cloud::MetaService { public: MetaServiceImpl(std::shared_ptr txn_kv, std::shared_ptr resource_mgr, @@ -298,6 +302,9 @@ class MetaServiceImpl : public cloud::MetaService { const std::string& cloud_unique_id, InstanceInfoPB* instance); + MetaServiceResponseStatus fix_tablet_stats(std::string cloud_unique_id_str, + std::string table_id_str); + private: std::pair alter_instance( const AlterInstanceRequest* request, diff --git a/cloud/src/meta-service/meta_service_http.cpp b/cloud/src/meta-service/meta_service_http.cpp index 9a9f6de97cc4dd..95907376dd28c0 100644 --- a/cloud/src/meta-service/meta_service_http.cpp +++ b/cloud/src/meta-service/meta_service_http.cpp @@ -170,6 +170,8 @@ static std::string_view remove_version_prefix(std::string_view path) { return path; } +HttpResponse process_injection_point(MetaServiceImpl* service, brpc::Controller* ctrl); + static HttpResponse process_alter_cluster(MetaServiceImpl* service, brpc::Controller* ctrl) { static std::unordered_map operations { {"add_cluster", AlterClusterRequest::ADD_CLUSTER}, @@ -468,6 +470,16 @@ static HttpResponse process_get_tablet_stats(MetaServiceImpl* service, brpc::Con return http_text_reply(resp.status(), body); } +static HttpResponse process_fix_tablet_stats(MetaServiceImpl* service, brpc::Controller* ctrl) { + auto& uri = ctrl->http_request().uri(); + std::string_view cloud_unique_id = http_query(uri, "cloud_unique_id"); + std::string_view table_id = http_query(uri, "table_id"); + + MetaServiceResponseStatus st = + service->fix_tablet_stats(std::string(cloud_unique_id), std::string(table_id)); + return http_text_reply(st, st.DebugString()); +} + static HttpResponse process_get_stage(MetaServiceImpl* service, brpc::Controller* ctrl) { GetStageRequest req; PARSE_MESSAGE_OR_RETURN(ctrl, req); @@ -575,11 +587,16 @@ void MetaServiceImpl::http(::google::protobuf::RpcController* controller, {"get_value", process_get_value}, {"show_meta_ranges", process_show_meta_ranges}, {"txn_lazy_commit", process_txn_lazy_commit}, + {"injection_point", process_injection_point}, + {"fix_tablet_stats", process_fix_tablet_stats}, {"v1/decode_key", process_decode_key}, {"v1/encode_key", process_encode_key}, {"v1/get_value", process_get_value}, {"v1/show_meta_ranges", process_show_meta_ranges}, {"v1/txn_lazy_commit", process_txn_lazy_commit}, + {"v1/injection_point", process_injection_point}, + // for get + {"get_instance", process_get_instance_info}, // for get {"get_instance", process_get_instance_info}, {"get_obj_store_info", process_get_obj_store_info}, diff --git a/cloud/src/meta-service/meta_service_resource.cpp b/cloud/src/meta-service/meta_service_resource.cpp index b8bef65c91b264..399e0964f4da1d 100644 --- a/cloud/src/meta-service/meta_service_resource.cpp +++ b/cloud/src/meta-service/meta_service_resource.cpp @@ -989,6 +989,7 @@ void MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr instance.set_default_storage_vault_id(vault.id()); instance.set_default_storage_vault_name(vault.name()); } + response->set_storage_vault_id(vault.id()); LOG_INFO("try to put storage vault_id={}, vault_name={}, vault_key={}", vault.id(), vault.name(), hex(vault_key)); } break; @@ -1006,6 +1007,7 @@ void MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr instance.set_default_storage_vault_id(*instance.resource_ids().rbegin()); instance.set_default_storage_vault_name(*instance.storage_vault_names().rbegin()); } + response->set_storage_vault_id(request->vault().id()); break; } case AlterObjStoreInfoRequest::ADD_BUILT_IN_VAULT: { diff --git a/cloud/src/meta-service/meta_service_tablet_stats.cpp b/cloud/src/meta-service/meta_service_tablet_stats.cpp index 501cecbab76d52..cecccbd67673ad 100644 --- a/cloud/src/meta-service/meta_service_tablet_stats.cpp +++ b/cloud/src/meta-service/meta_service_tablet_stats.cpp @@ -17,13 +17,22 @@ #include "meta-service/meta_service_tablet_stats.h" +#include #include +#include + +#include +#include +#include +#include #include "common/logging.h" #include "common/util.h" #include "meta-service/keys.h" +#include "meta-service/meta_service.h" #include "meta-service/meta_service_helper.h" #include "meta-service/txn_kv.h" +#include "meta-service/txn_kv_error.h" namespace doris::cloud { @@ -156,4 +165,240 @@ void internal_get_tablet_stats(MetaServiceCode& code, std::string& msg, Transact merge_tablet_stats(stats, detached_stats); } +MetaServiceResponseStatus parse_fix_tablet_stats_param( + std::shared_ptr resource_mgr, const std::string& table_id_str, + const std::string& cloud_unique_id_str, int64_t& table_id, std::string& instance_id) { + MetaServiceCode code = MetaServiceCode::OK; + std::string msg; + MetaServiceResponseStatus st; + st.set_code(MetaServiceCode::OK); + + // parse params + try { + table_id = std::stoll(table_id_str); + } catch (...) { + st.set_code(MetaServiceCode::INVALID_ARGUMENT); + st.set_msg("Invalid table_id, table_id: " + table_id_str); + return st; + } + + instance_id = get_instance_id(resource_mgr, cloud_unique_id_str); + if (instance_id.empty()) { + code = MetaServiceCode::INVALID_ARGUMENT; + msg = "empty instance_id"; + LOG(INFO) << msg << ", cloud_unique_id=" << cloud_unique_id_str; + st.set_code(code); + st.set_msg(msg); + return st; + } + return st; +} + +MetaServiceResponseStatus fix_tablet_stats_internal( + std::shared_ptr txn_kv, std::pair& key_pair, + std::vector>& tablet_stat_shared_ptr_vec_batch, + const std::string& instance_id, size_t batch_size) { + std::unique_ptr txn; + MetaServiceResponseStatus st; + st.set_code(MetaServiceCode::OK); + MetaServiceCode code = MetaServiceCode::OK; + std::unique_ptr it; + std::vector> tmp_tablet_stat_vec; + + TxnErrorCode err = txn_kv->create_txn(&txn); + if (err != TxnErrorCode::TXN_OK) { + st.set_code(cast_as(err)); + st.set_msg("failed to create txn"); + return st; + } + + // read tablet stats + err = txn->get(key_pair.first, key_pair.second, &it, true); + if (err != TxnErrorCode::TXN_OK) { + st.set_code(cast_as(err)); + st.set_msg(fmt::format("failed to get tablet stats, err={} ", err)); + return st; + } + + size_t tablet_cnt = 0; + while (it->has_next() && tablet_cnt < batch_size) { + auto [k, v] = it->next(); + key_pair.first = k; + auto k1 = k; + k1.remove_prefix(1); + std::vector, int, int>> out; + decode_key(&k1, &out); + + // 0x01 "stats" ${instance_id} "tablet" ${table_id} ${index_id} ${partition_id} ${tablet_id} -> TabletStatsPB + if (out.size() == 7) { + tablet_cnt++; + TabletStatsPB tablet_stat; + tablet_stat.ParseFromArray(v.data(), v.size()); + tmp_tablet_stat_vec.emplace_back(std::make_shared(tablet_stat)); + } + } + if (it->has_next()) { + key_pair.first = it->next().first; + } + + for (const auto& tablet_stat_ptr : tmp_tablet_stat_vec) { + GetRowsetResponse resp; + std::string msg; + // get rowsets in tablet and accumulate disk size + internal_get_rowset(txn.get(), 0, std::numeric_limits::max() - 1, instance_id, + tablet_stat_ptr->idx().tablet_id(), code, msg, &resp); + if (code != MetaServiceCode::OK) { + st.set_code(code); + st.set_msg(msg); + return st; + } + int64_t total_disk_size = 0; + for (const auto& rs_meta : resp.rowset_meta()) { + total_disk_size += rs_meta.total_disk_size(); + } + + // set new disk size to tabletPB and write it back + TabletStatsPB tablet_stat; + tablet_stat.CopyFrom(*tablet_stat_ptr); + tablet_stat.set_data_size(total_disk_size); + // record tablet stats batch + tablet_stat_shared_ptr_vec_batch.emplace_back(std::make_shared(tablet_stat)); + std::string tablet_stat_key; + std::string tablet_stat_value; + tablet_stat_key = stats_tablet_key( + {instance_id, tablet_stat.idx().table_id(), tablet_stat.idx().index_id(), + tablet_stat.idx().partition_id(), tablet_stat.idx().tablet_id()}); + if (!tablet_stat.SerializeToString(&tablet_stat_value)) { + st.set_code(MetaServiceCode::PROTOBUF_SERIALIZE_ERR); + st.set_msg("failed to serialize tablet stat"); + return st; + } + txn->put(tablet_stat_key, tablet_stat_value); + + // read num segs + // 0x01 "stats" ${instance_id} "tablet" ${table_id} ${index_id} ${partition_id} ${tablet_id} "num_segs" -> int64 + std::string tablet_stat_num_segs_key; + stats_tablet_num_segs_key( + {instance_id, tablet_stat_ptr->idx().table_id(), tablet_stat_ptr->idx().index_id(), + tablet_stat_ptr->idx().partition_id(), tablet_stat_ptr->idx().tablet_id()}, + &tablet_stat_num_segs_key); + int64_t tablet_stat_num_segs = 0; + std::string tablet_stat_num_segs_value(sizeof(tablet_stat_num_segs), '\0'); + err = txn->get(tablet_stat_num_segs_key, &tablet_stat_num_segs_value); + if (err != TxnErrorCode::TXN_OK && err != TxnErrorCode::TXN_KEY_NOT_FOUND) { + st.set_code(cast_as(err)); + } + if (tablet_stat_num_segs_value.size() != sizeof(tablet_stat_num_segs)) [[unlikely]] { + LOG(WARNING) << " malformed tablet stats value v.size=" + << tablet_stat_num_segs_value.size() + << " value=" << hex(tablet_stat_num_segs_value); + } + std::memcpy(&tablet_stat_num_segs, tablet_stat_num_segs_value.data(), + sizeof(tablet_stat_num_segs)); + if constexpr (std::endian::native == std::endian::big) { + tablet_stat_num_segs = bswap_64(tablet_stat_num_segs); + } + + if (tablet_stat_num_segs > 0) { + // set tablet stats data size = 0 + // 0x01 "stats" ${instance_id} "tablet" ${table_id} ${index_id} ${partition_id} ${tablet_id} "data_size" -> int64 + std::string tablet_stat_data_size_key; + stats_tablet_data_size_key( + {instance_id, tablet_stat.idx().table_id(), tablet_stat.idx().index_id(), + tablet_stat.idx().partition_id(), tablet_stat.idx().tablet_id()}, + &tablet_stat_data_size_key); + int64_t tablet_stat_data_size = 0; + std::string tablet_stat_data_size_value(sizeof(tablet_stat_data_size), '\0'); + memcpy(tablet_stat_data_size_value.data(), &tablet_stat_data_size, + sizeof(tablet_stat_data_size)); + txn->put(tablet_stat_data_size_key, tablet_stat_data_size_value); + } + } + + err = txn->commit(); + if (err != TxnErrorCode::TXN_OK) { + st.set_code(cast_as(err)); + st.set_msg("failed to commit txn"); + return st; + } + return st; +} + +MetaServiceResponseStatus check_new_tablet_stats( + std::shared_ptr txn_kv, const std::string& instance_id, + const std::vector>& tablet_stat_shared_ptr_vec_batch) { + std::unique_ptr txn; + MetaServiceResponseStatus st; + st.set_code(MetaServiceCode::OK); + + TxnErrorCode err = txn_kv->create_txn(&txn); + if (err != TxnErrorCode::TXN_OK) { + st.set_code(cast_as(err)); + st.set_msg("failed to create txn"); + return st; + } + + for (const auto& tablet_stat_ptr : tablet_stat_shared_ptr_vec_batch) { + // check tablet stats + std::string tablet_stat_key; + std::string tablet_stat_value; + tablet_stat_key = stats_tablet_key( + {instance_id, tablet_stat_ptr->idx().table_id(), tablet_stat_ptr->idx().index_id(), + tablet_stat_ptr->idx().partition_id(), tablet_stat_ptr->idx().tablet_id()}); + err = txn->get(tablet_stat_key, &tablet_stat_value); + if (err != TxnErrorCode::TXN_OK && err != TxnErrorCode::TXN_KEY_NOT_FOUND) { + st.set_code(cast_as(err)); + return st; + } + TabletStatsPB tablet_stat_check; + tablet_stat_check.ParseFromArray(tablet_stat_value.data(), tablet_stat_value.size()); + if (tablet_stat_check.DebugString() != tablet_stat_ptr->DebugString() && + // If anyone data size of tablet_stat_check and tablet_stat_ptr is twice bigger than another, + // we need to rewrite it this tablet_stat. + (tablet_stat_check.data_size() > 2 * tablet_stat_ptr->data_size() || + tablet_stat_ptr->data_size() > 2 * tablet_stat_check.data_size())) { + LOG_WARNING("[fix tablet stats]:tablet stats check failed") + .tag("tablet stat", tablet_stat_ptr->DebugString()) + .tag("check tabelt stat", tablet_stat_check.DebugString()); + } + + // check data size + std::string tablet_stat_data_size_key; + stats_tablet_data_size_key( + {instance_id, tablet_stat_ptr->idx().table_id(), tablet_stat_ptr->idx().index_id(), + tablet_stat_ptr->idx().partition_id(), tablet_stat_ptr->idx().tablet_id()}, + &tablet_stat_data_size_key); + int64_t tablet_stat_data_size = 0; + std::string tablet_stat_data_size_value(sizeof(tablet_stat_data_size), '\0'); + err = txn->get(tablet_stat_data_size_key, &tablet_stat_data_size_value); + if (err != TxnErrorCode::TXN_OK && err != TxnErrorCode::TXN_KEY_NOT_FOUND) { + st.set_code(cast_as(err)); + return st; + } + int64_t tablet_stat_data_size_check; + + if (tablet_stat_data_size_value.size() != sizeof(tablet_stat_data_size_check)) + [[unlikely]] { + LOG(WARNING) << " malformed tablet stats value v.size=" + << tablet_stat_data_size_value.size() + << " value=" << hex(tablet_stat_data_size_value); + } + std::memcpy(&tablet_stat_data_size_check, tablet_stat_data_size_value.data(), + sizeof(tablet_stat_data_size_check)); + if constexpr (std::endian::native == std::endian::big) { + tablet_stat_data_size_check = bswap_64(tablet_stat_data_size_check); + } + if (tablet_stat_data_size_check != tablet_stat_data_size && + // ditto + (tablet_stat_data_size_check > 2 * tablet_stat_data_size || + tablet_stat_data_size > 2 * tablet_stat_data_size_check)) { + LOG_WARNING("[fix tablet stats]:data size check failed") + .tag("data size", tablet_stat_data_size) + .tag("check data size", tablet_stat_data_size_check); + } + } + + return st; +} + } // namespace doris::cloud diff --git a/cloud/src/meta-service/meta_service_tablet_stats.h b/cloud/src/meta-service/meta_service_tablet_stats.h index 5726cf50b76652..a7aea5885a8e1a 100644 --- a/cloud/src/meta-service/meta_service_tablet_stats.h +++ b/cloud/src/meta-service/meta_service_tablet_stats.h @@ -19,6 +19,8 @@ #include +#include "resource-manager/resource_manager.h" + namespace doris::cloud { class Transaction; class RangeGetIterator; @@ -66,4 +68,17 @@ void internal_get_tablet_stats(MetaServiceCode& code, std::string& msg, Transact TabletStats& detached_stats); // clang-format on +MetaServiceResponseStatus parse_fix_tablet_stats_param( + std::shared_ptr resource_mgr, const std::string& table_id_str, + const std::string& cloud_unique_id_str, int64_t& table_id, std::string& instance_id); + +MetaServiceResponseStatus fix_tablet_stats_internal( + std::shared_ptr txn_kv, std::pair& key_pair, + std::vector>& tablet_stat_shared_ptr_vec_batch, + const std::string& instance_id, size_t batch_size = 20); + +MetaServiceResponseStatus check_new_tablet_stats( + std::shared_ptr txn_kv, const std::string& instance_id, + const std::vector>& tablet_stat_shared_ptr_vec_batch); + } // namespace doris::cloud diff --git a/cloud/src/meta-service/meta_service_txn.cpp b/cloud/src/meta-service/meta_service_txn.cpp index 03cb7866e1abea..32f6b56f51af4c 100644 --- a/cloud/src/meta-service/meta_service_txn.cpp +++ b/cloud/src/meta-service/meta_service_txn.cpp @@ -266,6 +266,7 @@ void MetaServiceImpl::begin_txn(::google::protobuf::RpcController* controller, } // clang-format on } + response->set_txn_status(cur_txn_info.status()); code = MetaServiceCode::TXN_LABEL_ALREADY_USED; ss << "Label [" << label << "] has already been used, relate to txn [" << cur_txn_info.txn_id() << "], status=[" << TxnStatusPB_Name(cur_txn_info.status()) @@ -1166,7 +1167,7 @@ void commit_txn_immediately( // Accumulate affected rows auto& stats = tablet_stats[tablet_id]; - stats.data_size += i.data_disk_size(); + stats.data_size += i.total_disk_size(); stats.num_rows += i.num_rows(); ++stats.num_rowsets; stats.num_segs += i.num_segments(); @@ -1893,7 +1894,7 @@ void commit_txn_eventually( std::pair ret = task->wait(); if (ret.first != MetaServiceCode::OK) { LOG(WARNING) << "txn lazy commit failed txn_id=" << txn_id << " code=" << ret.first - << "msg=" << ret.second; + << " msg=" << ret.second; } std::unordered_map tablet_stats; // tablet_id -> stats diff --git a/cloud/src/meta-service/txn_lazy_committer.cpp b/cloud/src/meta-service/txn_lazy_committer.cpp index fe13f7f8352bab..25d36faab06796 100644 --- a/cloud/src/meta-service/txn_lazy_committer.cpp +++ b/cloud/src/meta-service/txn_lazy_committer.cpp @@ -21,6 +21,7 @@ #include "common/logging.h" #include "common/util.h" +#include "cpp/sync_point.h" #include "meta-service/keys.h" #include "meta-service/meta_service_helper.h" #include "meta-service/meta_service_tablet_stats.h" @@ -128,6 +129,7 @@ void convert_tmp_rowsets( LOG(INFO) << "txn_id=" << txn_id << " key=" << hex(ver_key) << " version_pb:" << version_pb.ShortDebugString(); partition_versions.emplace(tmp_rowset_pb.partition_id(), version_pb); + DCHECK_EQ(partition_versions.size(), 1) << partition_versions.size(); } const VersionPB& version_pb = partition_versions[tmp_rowset_pb.partition_id()]; @@ -178,8 +180,6 @@ void convert_tmp_rowsets( stats.num_segs += tmp_rowset_pb.num_segments(); } - DCHECK(partition_versions.size() == 1); - for (auto& [tablet_id, stats] : tablet_stats) { DCHECK(tablet_ids.count(tablet_id)); auto& tablet_idx = tablet_ids[tablet_id]; @@ -189,6 +189,7 @@ void convert_tmp_rowsets( if (code != MetaServiceCode::OK) return; } + TEST_SYNC_POINT_RETURN_WITH_VOID("convert_tmp_rowsets::before_commit", &code); err = txn->commit(); if (err != TxnErrorCode::TXN_OK) { code = cast_as(err); @@ -489,7 +490,8 @@ std::pair TxnLazyCommitTask::wait() { sw.pause(); if (sw.elapsed_us() > 1000000) { LOG(INFO) << "txn_lazy_commit task wait more than 1000ms, cost=" << sw.elapsed_us() / 1000 - << " ms"; + << " ms" + << " txn_id=" << txn_id_; } return std::make_pair(this->code_, this->msg_); } diff --git a/cloud/test/meta_service_http_test.cpp b/cloud/test/meta_service_http_test.cpp index 20dee957126e4d..e49628fcb3a783 100644 --- a/cloud/test/meta_service_http_test.cpp +++ b/cloud/test/meta_service_http_test.cpp @@ -320,6 +320,8 @@ static doris::RowsetMetaCloudPB create_rowset(int64_t txn_id, int64_t tablet_id, rowset.set_num_segments(1); rowset.set_num_rows(num_rows); rowset.set_data_disk_size(num_rows * 100); + rowset.set_index_disk_size(num_rows * 10); + rowset.set_total_disk_size(num_rows * 110); rowset.mutable_tablet_schema()->set_schema_version(0); rowset.set_txn_expiration(::time(nullptr)); // Required by DCHECK return rowset; @@ -1285,7 +1287,7 @@ TEST(MetaServiceHttpTest, GetTabletStatsTest) { stats_tablet_data_size_key({mock_instance, table_id, index_id, partition_id, tablet_id}, &data_size_key); ASSERT_EQ(txn->get(data_size_key, &data_size_val), TxnErrorCode::TXN_OK); - EXPECT_EQ(*(int64_t*)data_size_val.data(), 20000); + EXPECT_EQ(*(int64_t*)data_size_val.data(), 22000); std::string num_rows_key, num_rows_val; stats_tablet_num_rows_key({mock_instance, table_id, index_id, partition_id, tablet_id}, &num_rows_key); @@ -1306,7 +1308,7 @@ TEST(MetaServiceHttpTest, GetTabletStatsTest) { get_tablet_stats(meta_service.get(), table_id, index_id, partition_id, tablet_id, res); ASSERT_EQ(res.status().code(), MetaServiceCode::OK); ASSERT_EQ(res.tablet_stats_size(), 1); - EXPECT_EQ(res.tablet_stats(0).data_size(), 40000); + EXPECT_EQ(res.tablet_stats(0).data_size(), 44000); EXPECT_EQ(res.tablet_stats(0).num_rows(), 400); EXPECT_EQ(res.tablet_stats(0).num_rowsets(), 5); EXPECT_EQ(res.tablet_stats(0).num_segments(), 4); diff --git a/cloud/test/meta_service_test.cpp b/cloud/test/meta_service_test.cpp index 3baec482710bc4..ee90e604e1c5f6 100644 --- a/cloud/test/meta_service_test.cpp +++ b/cloud/test/meta_service_test.cpp @@ -178,6 +178,8 @@ static doris::RowsetMetaCloudPB create_rowset(int64_t txn_id, int64_t tablet_id, rowset.set_num_segments(1); rowset.set_num_rows(num_rows); rowset.set_data_disk_size(num_rows * 100); + rowset.set_index_disk_size(num_rows * 10); + rowset.set_total_disk_size(num_rows * 110); rowset.mutable_tablet_schema()->set_schema_version(0); rowset.set_txn_expiration(::time(nullptr)); // Required by DCHECK return rowset; @@ -4429,7 +4431,7 @@ TEST(MetaServiceTest, GetTabletStatsTest) { stats_tablet_data_size_key({mock_instance, table_id, index_id, partition_id, tablet_id}, &data_size_key); ASSERT_EQ(txn->get(data_size_key, &data_size_val), TxnErrorCode::TXN_OK); - EXPECT_EQ(*(int64_t*)data_size_val.data(), 20000); + EXPECT_EQ(*(int64_t*)data_size_val.data(), 22000); std::string num_rows_key, num_rows_val; stats_tablet_num_rows_key({mock_instance, table_id, index_id, partition_id, tablet_id}, &num_rows_key); @@ -4450,7 +4452,7 @@ TEST(MetaServiceTest, GetTabletStatsTest) { get_tablet_stats(meta_service.get(), table_id, index_id, partition_id, tablet_id, res); ASSERT_EQ(res.status().code(), MetaServiceCode::OK); ASSERT_EQ(res.tablet_stats_size(), 1); - EXPECT_EQ(res.tablet_stats(0).data_size(), 40000); + EXPECT_EQ(res.tablet_stats(0).data_size(), 44000); EXPECT_EQ(res.tablet_stats(0).num_rows(), 400); EXPECT_EQ(res.tablet_stats(0).num_rowsets(), 5); EXPECT_EQ(res.tablet_stats(0).num_segments(), 4); diff --git a/cloud/test/schema_kv_test.cpp b/cloud/test/schema_kv_test.cpp index 69ee9aba442209..07f658175c806f 100644 --- a/cloud/test/schema_kv_test.cpp +++ b/cloud/test/schema_kv_test.cpp @@ -293,6 +293,8 @@ static doris::RowsetMetaCloudPB create_rowset(int64_t txn_id, int64_t tablet_id, rowset.set_num_rows(100); rowset.set_num_segments(1); rowset.set_data_disk_size(10000); + rowset.set_index_disk_size(1000); + rowset.set_total_disk_size(11000); if (version > 0) { rowset.set_start_version(version); rowset.set_end_version(version); @@ -478,7 +480,7 @@ TEST(DetachSchemaKVTest, RowsetTest) { EXPECT_EQ(get_rowset_res.stats().num_rows(), 100); EXPECT_EQ(get_rowset_res.stats().num_rowsets(), 2); EXPECT_EQ(get_rowset_res.stats().num_segments(), 1); - EXPECT_EQ(get_rowset_res.stats().data_size(), 10000); + EXPECT_EQ(get_rowset_res.stats().data_size(), 11000); } // new MS read rowsets committed by both old and new MS @@ -527,7 +529,7 @@ TEST(DetachSchemaKVTest, RowsetTest) { EXPECT_EQ(get_rowset_res->stats().num_rows(), 2500); EXPECT_EQ(get_rowset_res->stats().num_rowsets(), 26); EXPECT_EQ(get_rowset_res->stats().num_segments(), 25); - EXPECT_EQ(get_rowset_res->stats().data_size(), 250000); + EXPECT_EQ(get_rowset_res->stats().data_size(), 275000); if (schema != nullptr) { auto schema_version = get_rowset_res->rowset_meta(10).schema_version(); get_rowset_res->mutable_rowset_meta(10)->mutable_tablet_schema()->set_schema_version(3); diff --git a/common/cpp/sync_point.h b/common/cpp/sync_point.h index f26e64fe7c3575..0378918f62753e 100644 --- a/common/cpp/sync_point.h +++ b/common/cpp/sync_point.h @@ -205,7 +205,7 @@ auto try_any_cast_ret(std::vector& any) { // TEST_SYNC_POINT is no op in release build. // Turn on this feature by defining the macro -#ifndef BE_TEST +#if !defined(BE_TEST) && !defined(ENABLE_INJECTION_POINT) # define TEST_SYNC_POINT(x) # define TEST_IDX_SYNC_POINT(x, index) # define TEST_SYNC_POINT_CALLBACK(x, ...) diff --git a/conf/be.conf b/conf/be.conf index fc23e21839507b..5ad5e07176d545 100644 --- a/conf/be.conf +++ b/conf/be.conf @@ -31,7 +31,7 @@ JAVA_OPTS_FOR_JDK_17="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile # https://jemalloc.net/jemalloc.3.html -JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" +JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" JEMALLOC_PROF_PRFIX="jemalloc_heap_profile_" # ports for admin, web, heartbeat service diff --git a/docker/runtime/README.md b/docker/runtime/README.md index 71818b0823c630..fc79dd56d47b6c 100644 --- a/docker/runtime/README.md +++ b/docker/runtime/README.md @@ -42,33 +42,38 @@ your workspace should like this: 1. Go to the doris [official website](https://doris.apache.org/download) to download the binary package you need(pay attention to selecting the doris version and architecture you need), and extract archive binary -Here we take the build of x64 (avx2) and arm64 platforms of 2.1.5 version as an example. +Here we take the build of x64 (avx2) and arm64 platforms of 3.0.2 version as an example. ```shell -$ wget https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-2.1.5-bin-x64.tar.gz && tar -zxvf apache-doris-2.1.5-bin-x64.tar.gz +$ wget https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-3.0.2-bin-x64.tar.gz && tar -zxvf apache-doris-3.0.2-bin-x64.tar.gz # or -$ wget https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-2.1.5-bin-arm64.tar.gz && tar -zxvf apache-doris-2.1.5-bin-arm64.tar.gz +$ wget https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-3.0.2-bin-arm64.tar.gz && tar -zxvf apache-doris-3.0.2-bin-arm64.tar.gz ``` 2. You need to copy the corresponding directories to different directories in sequence as below in the table: +() amd64(avx2) platform: | doris type | doris package | docker file path | |:----------:|:--------------------------------------------------------:|:--------------------------------------------------------------------------------------:| -| fe | apache-doris-2.1.5-bin-x64/fe | runtime/fe/resource/amd64/apache-doris-2.1.5-bin-x64/fe | -| be | apache-doris-2.1.5-bin-x64/be | runtime/be/resource/amd64/apache-doris-2.1.5-bin-x64/be | -| ms | apache-doris-2.1.5-bin-x64/ms | runtime/ms/resource/amd64/apache-doris-2.1.5-bin-x64/ms | -| broker | apache-doris-2.1.5-bin-x64/extensions/apache_hdfs_broker | runtime/broker/resource/amd64/apache-doris-2.1.5-bin-x64/extensions/apache_hdfs_broker | +| fe | apache-doris-3.0.2-bin-x64/fe | runtime/fe/resource/amd64/apache-doris-3.0.2-bin-x64/fe | +| be | apache-doris-3.0.2-bin-x64/be | runtime/be/resource/amd64/apache-doris-3.0.2-bin-x64/be | +| ms | apache-doris-3.0.2-bin-x64/ms | runtime/ms/resource/amd64/apache-doris-3.0.2-bin-x64/ms | +| broker | apache-doris-3.0.2-bin-x64/extensions/apache_hdfs_broker | runtime/broker/resource/amd64/apache-doris-3.0.2-bin-x64/extensions/apache_hdfs_broker | arm64 platform: | doris type | doris package | docker file path | |:----------:|:----------------------------------------------------------:|:----------------------------------------------------------------------------------------:| -| fe | apache-doris-2.1.5-bin-arm64/fe | runtime/fe/resource/arm64/apache-doris-2.1.5-bin-arm64/fe | -| be | apache-doris-2.1.5-bin-arm64/be | runtime/be/resource/arm64/apache-doris-2.1.5-bin-arm64/be | -| ms | apache-doris-2.1.5-bin-arm64/ms | runtime/ms/resource/arm64/apache-doris-2.1.5-bin-arm64/ms | -| broker | apache-doris-2.1.5-bin-arm64/extensions/apache_hdfs_broker | runtime/broker/resource/arm64/apache-doris-2.1.5-bin-arm64/extensions/apache_hdfs_broker | +| fe | apache-doris-3.0.2-bin-arm64/fe | runtime/fe/resource/arm64/apache-doris-3.0.2-bin-arm64/fe | +| be | apache-doris-3.0.2-bin-arm64/be | runtime/be/resource/arm64/apache-doris-3.0.2-bin-arm64/be | +| ms | apache-doris-3.0.2-bin-arm64/ms | runtime/ms/resource/arm64/apache-doris-3.0.2-bin-arm64/ms | +| broker | apache-doris-3.0.2-bin-arm64/extensions/apache_hdfs_broker | runtime/broker/resource/arm64/apache-doris-3.0.2-bin-arm64/extensions/apache_hdfs_broker | + +**NOTICE** + +Only after doris 3.0, the storage-computing separation mode requires the ms (meta-service) component. ### Build base image @@ -100,12 +105,12 @@ ARG DORIS_VERSION="x.x.x" as the following commands, Docker will automatically confirm the architecture ```shell -$ cd doris/runtime/fe && docker build . -t doris.fe:2.1.5 -f Dockerfile --build-arg DORIS_VERSION=2.1.5 -$ cd doris/runtime/be && docker build . -t doris.be:2.1.5 -f Dockerfile --build-arg DORIS_VERSION=2.1.5 -$ cd doris/runtime/ms && docker build . -t doris.ms:2.1.5 -f Dockerfile --build-arg DORIS_VERSION=2.1.5 -$ cd doris/runtime/broker && docker build . -t doris.broker:2.1.5 -f Dockerfile --build-arg DORIS_VERSION=2.1.5 +$ cd doris/runtime/fe && docker build . -t doris.fe:3.0.2 -f Dockerfile --build-arg DORIS_VERSION=3.0.2 +$ cd doris/runtime/be && docker build . -t doris.be:3.0.2 -f Dockerfile --build-arg DORIS_VERSION=3.0.2 +$ cd doris/runtime/ms && docker build . -t doris.ms:3.0.2 -f Dockerfile --build-arg DORIS_VERSION=3.0.2 +$ cd doris/runtime/broker && docker build . -t doris.broker:3.0.2 -f Dockerfile --build-arg DORIS_VERSION=3.0.2 ``` ### Latest update time -2024-8-12 +2024-10-28 diff --git a/docker/runtime/doris-compose/command.py b/docker/runtime/doris-compose/command.py index 469437c28b2da6..1e55e74b8a6cb3 100644 --- a/docker/runtime/doris-compose/command.py +++ b/docker/runtime/doris-compose/command.py @@ -211,6 +211,7 @@ def run(self, args): LOG.info( utils.render_green("{} succ, total related node num {}".format( show_cmd, related_node_num))) + return "" if for_all: related_nodes = cluster.get_all_nodes() @@ -1255,6 +1256,20 @@ def run(self, args): return self._handle_data(header, rows) +class AddRWPermCommand(Command): + + def add_parser(self, args_parsers): + parser = args_parsers.add_parser( + "add-rw-perm", + help="Add read and write permissions to the cluster files") + parser.add_argument("NAME", help="Specify cluster name.") + self._add_parser_common_args(parser) + + def run(self, args): + utils.enable_dir_with_rw_perm(CLUSTER.get_cluster_path(args.NAME)) + return "" + + ALL_COMMANDS = [ UpCommand("up"), DownCommand("down"), @@ -1266,4 +1281,5 @@ def run(self, args): GetCloudIniCommand("get-cloud-ini"), GenConfCommand("config"), ListCommand("ls"), + AddRWPermCommand("add-rw-perm"), ] diff --git a/docker/runtime/doris-compose/utils.py b/docker/runtime/doris-compose/utils.py index bfe71dbbbfb135..4332ae6cf48d03 100644 --- a/docker/runtime/doris-compose/utils.py +++ b/docker/runtime/doris-compose/utils.py @@ -17,7 +17,7 @@ import contextlib import docker -import json +import jsonpickle import logging import os import pwd @@ -321,7 +321,7 @@ def write_compose_file(file, compose): def pretty_json(json_data): - return json.dumps(json_data, indent=4, sort_keys=True) + return jsonpickle.dumps(json_data, indent=4) def is_true(val): diff --git a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java index 61382e6c2532f3..03e5ca1fa7c5d4 100644 --- a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java +++ b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/BaseJdbcExecutor.java @@ -41,7 +41,6 @@ import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.Date; -import java.sql.Driver; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -52,7 +51,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.function.Function; public abstract class BaseJdbcExecutor implements JdbcExecutor { @@ -94,8 +92,7 @@ public BaseJdbcExecutor(byte[] thriftParams) throws Exception { .setConnectionPoolMaxSize(request.connection_pool_max_size) .setConnectionPoolMaxWaitTime(request.connection_pool_max_wait_time) .setConnectionPoolMaxLifeTime(request.connection_pool_max_life_time) - .setConnectionPoolKeepAlive(request.connection_pool_keep_alive) - .setEnableConnectionPool(request.enable_connection_pool); + .setConnectionPoolKeepAlive(request.connection_pool_keep_alive); JdbcDataSource.getDataSource().setCleanupInterval(request.connection_pool_cache_clear_time); System.setProperty("com.zaxxer.hikari.useWeakReferences", "true"); init(config, request.statement); @@ -120,12 +117,10 @@ public void close() throws Exception { } } finally { closeResources(resultSet, stmt, conn); - if (config.isEnableConnectionPool()) { - if (config.getConnectionPoolMinSize() == 0 && hikariDataSource != null) { - hikariDataSource.close(); - JdbcDataSource.getDataSource().getSourcesMap().remove(config.createCacheKey()); - hikariDataSource = null; - } + if (config.getConnectionPoolMinSize() == 0 && hikariDataSource != null) { + hikariDataSource.close(); + JdbcDataSource.getDataSource().getSourcesMap().remove(config.createCacheKey()); + hikariDataSource = null; } } } @@ -147,12 +142,10 @@ protected void abortReadConnection(Connection connection, ResultSet resultSet) } public void cleanDataSource() { - if (config.isEnableConnectionPool()) { - if (hikariDataSource != null) { - hikariDataSource.close(); - JdbcDataSource.getDataSource().getSourcesMap().remove(config.createCacheKey()); - hikariDataSource = null; - } + if (hikariDataSource != null) { + hikariDataSource.close(); + JdbcDataSource.getDataSource().getSourcesMap().remove(config.createCacheKey()); + hikariDataSource = null; } } @@ -294,64 +287,51 @@ public boolean hasNext() throws JdbcExecutorException { private void init(JdbcDataSourceConfig config, String sql) throws JdbcExecutorException { ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader(); + String hikariDataSourceKey = config.createCacheKey(); try { ClassLoader parent = getClass().getClassLoader(); ClassLoader classLoader = UdfUtils.getClassLoader(config.getJdbcDriverUrl(), parent); Thread.currentThread().setContextClassLoader(classLoader); - if (config.isEnableConnectionPool()) { - String hikariDataSourceKey = config.createCacheKey(); - hikariDataSource = JdbcDataSource.getDataSource().getSource(hikariDataSourceKey); - if (hikariDataSource == null) { - synchronized (hikariDataSourceLock) { - hikariDataSource = JdbcDataSource.getDataSource().getSource(hikariDataSourceKey); - if (hikariDataSource == null) { - long start = System.currentTimeMillis(); - HikariDataSource ds = new HikariDataSource(); - ds.setDriverClassName(config.getJdbcDriverClass()); - ds.setJdbcUrl(SecurityChecker.getInstance().getSafeJdbcUrl(config.getJdbcUrl())); - ds.setUsername(config.getJdbcUser()); - ds.setPassword(config.getJdbcPassword()); - ds.setMinimumIdle(config.getConnectionPoolMinSize()); // default 1 - ds.setMaximumPoolSize(config.getConnectionPoolMaxSize()); // default 10 - ds.setConnectionTimeout(config.getConnectionPoolMaxWaitTime()); // default 5000 - ds.setMaxLifetime(config.getConnectionPoolMaxLifeTime()); // default 30 min - ds.setIdleTimeout(config.getConnectionPoolMaxLifeTime() / 2L); // default 15 min - setValidationQuery(ds); - if (config.isConnectionPoolKeepAlive()) { - ds.setKeepaliveTime(config.getConnectionPoolMaxLifeTime() / 5L); // default 6 min - } - hikariDataSource = ds; - JdbcDataSource.getDataSource().putSource(hikariDataSourceKey, hikariDataSource); - LOG.info("JdbcClient set" - + " ConnectionPoolMinSize = " + config.getConnectionPoolMinSize() - + ", ConnectionPoolMaxSize = " + config.getConnectionPoolMaxSize() - + ", ConnectionPoolMaxWaitTime = " + config.getConnectionPoolMaxWaitTime() - + ", ConnectionPoolMaxLifeTime = " + config.getConnectionPoolMaxLifeTime() - + ", ConnectionPoolKeepAlive = " + config.isConnectionPoolKeepAlive()); - LOG.info("init datasource [" + (config.getJdbcUrl() + config.getJdbcUser()) + "] cost: " + ( - System.currentTimeMillis() - start) + " ms"); + hikariDataSource = JdbcDataSource.getDataSource().getSource(hikariDataSourceKey); + if (hikariDataSource == null) { + synchronized (hikariDataSourceLock) { + hikariDataSource = JdbcDataSource.getDataSource().getSource(hikariDataSourceKey); + if (hikariDataSource == null) { + long start = System.currentTimeMillis(); + HikariDataSource ds = new HikariDataSource(); + ds.setDriverClassName(config.getJdbcDriverClass()); + ds.setJdbcUrl(SecurityChecker.getInstance().getSafeJdbcUrl(config.getJdbcUrl())); + ds.setUsername(config.getJdbcUser()); + ds.setPassword(config.getJdbcPassword()); + ds.setMinimumIdle(config.getConnectionPoolMinSize()); // default 1 + ds.setMaximumPoolSize(config.getConnectionPoolMaxSize()); // default 10 + ds.setConnectionTimeout(config.getConnectionPoolMaxWaitTime()); // default 5000 + ds.setMaxLifetime(config.getConnectionPoolMaxLifeTime()); // default 30 min + ds.setIdleTimeout(config.getConnectionPoolMaxLifeTime() / 2L); // default 15 min + setValidationQuery(ds); + if (config.isConnectionPoolKeepAlive()) { + ds.setKeepaliveTime(config.getConnectionPoolMaxLifeTime() / 5L); // default 6 min } + hikariDataSource = ds; + JdbcDataSource.getDataSource().putSource(hikariDataSourceKey, hikariDataSource); + LOG.info("JdbcClient set" + + " ConnectionPoolMinSize = " + config.getConnectionPoolMinSize() + + ", ConnectionPoolMaxSize = " + config.getConnectionPoolMaxSize() + + ", ConnectionPoolMaxWaitTime = " + config.getConnectionPoolMaxWaitTime() + + ", ConnectionPoolMaxLifeTime = " + config.getConnectionPoolMaxLifeTime() + + ", ConnectionPoolKeepAlive = " + config.isConnectionPoolKeepAlive()); + LOG.info("init datasource [" + (config.getJdbcUrl() + config.getJdbcUser()) + "] cost: " + ( + System.currentTimeMillis() - start) + " ms"); } } - conn = hikariDataSource.getConnection(); - } else { - Class driverClass = Class.forName(config.getJdbcDriverClass(), true, classLoader); - Driver driverInstance = (Driver) driverClass.getDeclaredConstructor().newInstance(); - - Properties info = new Properties(); - info.put("user", config.getJdbcUser()); - info.put("password", config.getJdbcPassword()); - - conn = driverInstance.connect(SecurityChecker.getInstance().getSafeJdbcUrl(config.getJdbcUrl()), info); - if (conn == null) { - throw new SQLException("Failed to establish a connection. The JDBC driver returned null. " - + "Please check if the JDBC URL is correct: " - + config.getJdbcUrl() - + ". Ensure that the URL format and parameters are valid for the driver: " - + driverInstance.getClass().getName()); - } } + long start = System.currentTimeMillis(); + conn = hikariDataSource.getConnection(); + LOG.info("get connection [" + (config.getJdbcUrl() + config.getJdbcUser()) + "] cost: " + ( + System.currentTimeMillis() - start) + + " ms"); + initializeStatement(conn, config, sql); } catch (MalformedURLException e) { diff --git a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java index 30e94ddd37f49d..a99377add2532d 100644 --- a/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java +++ b/fe/be-java-extensions/jdbc-scanner/src/main/java/org/apache/doris/jdbc/JdbcDataSourceConfig.java @@ -35,7 +35,6 @@ public class JdbcDataSourceConfig { private int connectionPoolMaxWaitTime = 5000; private int connectionPoolMaxLifeTime = 1800000; private boolean connectionPoolKeepAlive = false; - private boolean enableConnectionPool = false; public String createCacheKey() { return catalogId + jdbcUrl + jdbcUser + jdbcPassword + jdbcDriverUrl + jdbcDriverClass @@ -168,13 +167,4 @@ public JdbcDataSourceConfig setConnectionPoolKeepAlive(boolean connectionPoolKee this.connectionPoolKeepAlive = connectionPoolKeepAlive; return this; } - - public boolean isEnableConnectionPool() { - return enableConnectionPool; - } - - public JdbcDataSourceConfig setEnableConnectionPool(boolean enableConnectionPool) { - this.enableConnectionPool = enableConnectionPool; - return this; - } } diff --git a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java index 4ef40d9fa1a6be..f229134e9d8319 100644 --- a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java +++ b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonJniScanner.java @@ -135,7 +135,7 @@ private int[] getProjected() { } private List getPredicates() { - List predicates = PaimonScannerUtils.decodeStringToObject(paimonPredicate); + List predicates = PaimonUtils.deserialize(paimonPredicate); if (LOG.isDebugEnabled()) { LOG.debug("predicates:{}", predicates); } @@ -143,7 +143,7 @@ private List getPredicates() { } private Split getSplit() { - Split split = PaimonScannerUtils.decodeStringToObject(paimonSplit); + Split split = PaimonUtils.deserialize(paimonSplit); if (LOG.isDebugEnabled()) { LOG.debug("split:{}", split); } @@ -224,7 +224,7 @@ private void initTable() { tableExt = PaimonTableCache.getTable(key); } this.table = tableExt.getTable(); - paimonAllFieldNames = PaimonScannerUtils.fieldNames(this.table.rowType()); + paimonAllFieldNames = PaimonUtils.getFieldNames(this.table.rowType()); if (LOG.isDebugEnabled()) { LOG.debug("paimonAllFieldNames:{}", paimonAllFieldNames); } diff --git a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonScannerUtils.java b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonUtils.java similarity index 72% rename from fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonScannerUtils.java rename to fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonUtils.java index 9b1a493874e998..44ffb298c98c33 100644 --- a/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonScannerUtils.java +++ b/fe/be-java-extensions/paimon-scanner/src/main/java/org/apache/doris/paimon/PaimonUtils.java @@ -25,22 +25,23 @@ import java.util.List; import java.util.stream.Collectors; -public class PaimonScannerUtils { - private static final Base64.Decoder BASE64_DECODER = Base64.getUrlDecoder(); +public class PaimonUtils { + private static final Base64.Decoder DECODER = Base64.getUrlDecoder(); - public static T decodeStringToObject(String encodedStr) { - final byte[] bytes = BASE64_DECODER.decode(encodedStr.getBytes(java.nio.charset.StandardCharsets.UTF_8)); - try { - return InstantiationUtil.deserializeObject(bytes, PaimonScannerUtils.class.getClassLoader()); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public static List fieldNames(RowType rowType) { + public static List getFieldNames(RowType rowType) { return rowType.getFields().stream() .map(DataField::name) .map(String::toLowerCase) .collect(Collectors.toList()); } + + public static T deserialize(String encodedStr) { + try { + return InstantiationUtil.deserializeObject( + DECODER.decode(encodedStr.getBytes(java.nio.charset.StandardCharsets.UTF_8)), + PaimonUtils.class.getClassLoader()); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } } diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java index 97bcb39403a251..dd0aca5923e74a 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java @@ -1565,12 +1565,29 @@ public class Config extends ConfigBase { */ @ConfField(mutable = true, masterOnly = true) public static String multi_partition_name_prefix = "p_"; + /** * Control the max num of backup/restore job per db */ @ConfField(mutable = true, masterOnly = true) public static int max_backup_restore_job_num_per_db = 10; + /** + * A internal config, to reduce the restore job size during serialization by compress. + * + * WARNING: Once this option is enabled and a restore is performed, the FE version cannot be rolled back. + */ + @ConfField(mutable = false) + public static boolean restore_job_compressed_serialization = false; + + /** + * A internal config, to reduce the backup job size during serialization by compress. + * + * WARNING: Once this option is enabled and a backup is performed, the FE version cannot be rolled back. + */ + @ConfField(mutable = false) + public static boolean backup_job_compressed_serialization = false; + /** * Control the max num of tablets per backup job involved. */ @@ -2016,6 +2033,12 @@ public class Config extends ConfigBase { @ConfField(mutable = true, masterOnly = true) public static long max_backend_heartbeat_failure_tolerance_count = 1; + /** + * Even if a backend is healthy, still write a heartbeat editlog to update backend's lastUpdateMs of bdb image. + */ + @ConfField(mutable = true, masterOnly = true) + public static int editlog_healthy_heartbeat_seconds = 300; + /** * Abort transaction time after lost heartbeat. * The default value is 300s, which means transactions of be will be aborted after lost heartbeat 300s. @@ -2709,6 +2732,13 @@ public class Config extends ConfigBase { }) public static int restore_download_task_num_per_be = 3; + @ConfField(mutable = true, masterOnly = true, description = { + "备份恢复过程中,单次 RPC 分配给每个be的任务最大个数,默认值为10000个。", + "The max number of batched tasks per RPC assigned to each be during the backup/restore process, " + + "the default value is 10000." + }) + public static int backup_restore_batch_task_num_per_rpc = 10000; + @ConfField(description = {"是否开启通过http接口获取log文件的功能", "Whether to enable the function of getting log files through http interface"}) public static boolean enable_get_log_file_api = false; diff --git a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 index 47a45b67aa7b36..8ce8d033108367 100644 --- a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 +++ b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisLexer.g4 @@ -429,6 +429,7 @@ QUANTILE_STATE: 'QUANTILE_STATE'; QUANTILE_UNION: 'QUANTILE_UNION'; QUERY: 'QUERY'; QUOTA: 'QUOTA'; +QUALIFY: 'QUALIFY'; RANDOM: 'RANDOM'; RANGE: 'RANGE'; READ: 'READ'; diff --git a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 index 2dc34aae0470ab..acd139c010e968 100644 --- a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 +++ b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 @@ -53,12 +53,12 @@ statementBase | supportedJobStatement #supportedJobStatementAlias | constraintStatement #constraintStatementAlias | supportedDropStatement #supportedDropStatementAlias + | supportedSetStatement #supportedSetStatementAlias | unsupportedStatement #unsupported ; unsupportedStatement - : unsupportedSetStatement - | unsupoortedUnsetStatement + : unsupoortedUnsetStatement | unsupportedUseStatement | unsupportedDmlStatement | unsupportedKillStatement @@ -798,7 +798,7 @@ functionArgument | dataType ; -unsupportedSetStatement +supportedSetStatement : SET (optionWithType | optionWithoutType) (COMMA (optionWithType | optionWithoutType))* #setOptions | SET identifier AS DEFAULT STORAGE VAULT #setDefaultStorageVault @@ -811,7 +811,7 @@ unsupportedSetStatement ; optionWithType - : (GLOBAL | LOCAL | SESSION) identifier EQ (expression | DEFAULT) + : (GLOBAL | LOCAL | SESSION) identifier EQ (expression | DEFAULT) #setVariableWithType ; optionWithoutType @@ -820,7 +820,7 @@ optionWithoutType | NAMES (charsetName=identifierOrText | DEFAULT) (COLLATE collateName=identifierOrText | DEFAULT)? #setCollate | PASSWORD (FOR userIdentify)? EQ (STRING_LITERAL - | (PASSWORD LEFT_PAREN STRING_LITERAL RIGHT_PAREN)) #setPassword + | (isPlain=PASSWORD LEFT_PAREN STRING_LITERAL RIGHT_PAREN)) #setPassword | LDAP_ADMIN_PASSWORD EQ (STRING_LITERAL | (PASSWORD LEFT_PAREN STRING_LITERAL RIGHT_PAREN)) #setLdapAdminPassword | variable #setVariableWithoutType @@ -1117,6 +1117,7 @@ querySpecification whereClause? aggClause? havingClause? + qualifyClause? {doris_legacy_SQL_syntax}? queryOrganization #regularQuerySpecification ; @@ -1203,6 +1204,10 @@ havingClause : HAVING booleanExpression ; +qualifyClause + : QUALIFY booleanExpression + ; + selectHint: hintStatements+=hintStatement (COMMA? hintStatements+=hintStatement)* HINT_END; hintStatement @@ -2018,6 +2023,7 @@ nonReserved | QUANTILE_UNION | QUERY | QUOTA + | QUALIFY | RANDOM | RECENT | RECOVER diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java index 54e82c7cc092c0..d02e91a379f560 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java @@ -983,7 +983,7 @@ public void processDropMaterializedView(DropMaterializedViewStmt dropMaterialize // Step3: log drop mv operation EditLog editLog = Env.getCurrentEnv().getEditLog(); editLog.logDropRollup( - new DropInfo(db.getId(), olapTable.getId(), olapTable.getName(), mvIndexId, false, 0)); + new DropInfo(db.getId(), olapTable.getId(), olapTable.getName(), mvIndexId, false, false, 0)); deleteIndexList.add(mvIndexId); LOG.info("finished drop materialized view [{}] in table [{}]", mvName, olapTable.getName()); } catch (MetaNotFoundException e) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java index 3d3c0761f5b1c3..c8754aa5d626b0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java @@ -667,14 +667,16 @@ protected void runRunningJob() throws AlterCancelException { } pruneMeta(); - this.jobState = JobState.FINISHED; - this.finishedTimeMs = System.currentTimeMillis(); - Env.getCurrentEnv().getEditLog().logAlterJob(this); LOG.info("schema change job finished: {}", jobId); changeTableState(dbId, tableId, OlapTableState.NORMAL); LOG.info("set table's state to NORMAL, table id: {}, job id: {}", tableId, jobId); + + this.jobState = JobState.FINISHED; + this.finishedTimeMs = System.currentTimeMillis(); + Env.getCurrentEnv().getEditLog().logAlterJob(this); + postProcessOriginIndex(); // Drop table column stats after schema change finished. Env.getCurrentEnv().getAnalysisManager().dropStats(tbl, null); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java index e70fbd71117cde..14680f54b1dac6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/NativeInsertStmt.java @@ -167,7 +167,6 @@ public class NativeInsertStmt extends InsertStmt { boolean hasEmptyTargetColumns = false; private boolean allowAutoPartition = true; - private boolean withAutoDetectOverwrite = false; enum InsertType { NATIVE_INSERT("insert_"), @@ -333,11 +332,6 @@ public boolean isTransactionBegin() { return isTransactionBegin; } - public NativeInsertStmt withAutoDetectOverwrite() { - this.withAutoDetectOverwrite = true; - return this; - } - protected void preCheckAnalyze(Analyzer analyzer) throws UserException { super.analyze(analyzer); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/AbstractJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/AbstractJob.java index 77ec0ae26f4a07..3a00c974f9a2d4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/AbstractJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/AbstractJob.java @@ -18,6 +18,7 @@ package org.apache.doris.backup; import org.apache.doris.catalog.Env; +import org.apache.doris.common.Config; import org.apache.doris.common.FeMetaVersion; import org.apache.doris.common.Pair; import org.apache.doris.common.io.Text; @@ -41,9 +42,10 @@ * 2. isDone() method is used to check whether we can submit the next job. */ public abstract class AbstractJob implements Writable { + public static final String COMPRESSED_JOB_ID = "COMPRESSED"; public enum JobType { - BACKUP, RESTORE + BACKUP, RESTORE, BACKUP_COMPRESSED, RESTORE_COMPRESSED } @SerializedName("t") @@ -174,10 +176,10 @@ public static AbstractJob read(DataInput in) throws IOException { if (Env.getCurrentEnvJournalVersion() < FeMetaVersion.VERSION_136) { AbstractJob job = null; JobType type = JobType.valueOf(Text.readString(in)); - if (type == JobType.BACKUP) { - job = new BackupJob(); - } else if (type == JobType.RESTORE) { - job = new RestoreJob(); + if (type == JobType.BACKUP || type == JobType.BACKUP_COMPRESSED) { + job = new BackupJob(type); + } else if (type == JobType.RESTORE || type == JobType.RESTORE_COMPRESSED) { + job = new RestoreJob(type); } else { throw new IOException("Unknown job type: " + type.name()); } @@ -186,7 +188,12 @@ public static AbstractJob read(DataInput in) throws IOException { job.readFields(in); return job; } else { - return GsonUtils.GSON.fromJson(Text.readString(in), AbstractJob.class); + String json = Text.readString(in); + if (COMPRESSED_JOB_ID.equals(json)) { + return GsonUtils.fromJsonCompressed(in, AbstractJob.class); + } else { + return GsonUtils.GSON.fromJson(json, AbstractJob.class); + } } } @@ -203,7 +210,13 @@ public void write(DataOutput out) throws IOException { count++; } - Text.writeString(out, GsonUtils.GSON.toJson(this)); + if ((type == JobType.BACKUP && Config.backup_job_compressed_serialization) + || (type == JobType.RESTORE && Config.restore_job_compressed_serialization)) { + Text.writeString(out, COMPRESSED_JOB_ID); + GsonUtils.toJsonCompressed(out, this); + } else { + Text.writeString(out, GsonUtils.GSON.toJson(this)); + } } @Deprecated diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index 1ba9fb129b2a1f..dbc7bb08fd4df6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -63,7 +63,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.io.ByteArrayInputStream; import java.io.DataInput; +import java.io.DataInputStream; import java.io.File; import java.io.IOException; import java.nio.file.FileVisitOption; @@ -75,6 +77,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import java.util.zip.GZIPInputStream; public class BackupJob extends AbstractJob { @@ -134,6 +137,11 @@ public BackupJob() { super(JobType.BACKUP); } + public BackupJob(JobType jobType) { + super(jobType); + assert jobType == JobType.BACKUP || jobType == JobType.BACKUP_COMPRESSED; + } + public BackupJob(String label, long dbId, String dbName, List tableRefs, long timeoutMs, BackupContent content, Env env, long repoId) { super(JobType.BACKUP, label, dbId, dbName, timeoutMs, env, repoId); @@ -208,11 +216,10 @@ private synchronized boolean tryNewTabletSnapshotTask(SnapshotTask task) { task.getIndexId(), task.getTabletId(), task.getVersion(), task.getSchemaHash(), timeoutMs, false /* not restore task */); - AgentBatchTask batchTask = new AgentBatchTask(); - batchTask.addTask(newTask); unfinishedTaskIds.put(tablet.getId(), replica.getBackendIdWithoutException()); //send task + AgentBatchTask batchTask = new AgentBatchTask(newTask); AgentTaskQueue.addTask(newTask); AgentTaskExecutor.submit(batchTask); @@ -474,7 +481,7 @@ private void prepareAndSendSnapshotTask() { // copy all related schema at this moment List copiedTables = Lists.newArrayList(); List copiedResources = Lists.newArrayList(); - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (TableRef tableRef : tableRefs) { String tblName = tableRef.getName().getTbl(); Table tbl = db.getTableNullable(tblName); @@ -729,7 +736,7 @@ private void uploadSnapshot() { beToSnapshots.put(info.getBeId(), info); } - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (Long beId : beToSnapshots.keySet()) { List infos = beToSnapshots.get(beId); int totalNum = infos.size(); @@ -892,7 +899,7 @@ private void releaseSnapshots() { } // we do not care about the release snapshot tasks' success or failure, // the GC thread on BE will sweep the snapshot, finally. - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (SnapshotInfo info : snapshotInfos.values()) { ReleaseSnapshotTask releaseTask = new ReleaseSnapshotTask(null, info.getBeId(), info.getDbId(), info.getTabletId(), info.getPath()); @@ -1051,13 +1058,35 @@ public static BackupJob read(DataInput in) throws IOException { job.readFields(in); return job; } else { - return GsonUtils.GSON.fromJson(Text.readString(in), BackupJob.class); + String json = Text.readString(in); + if (AbstractJob.COMPRESSED_JOB_ID.equals(json)) { + return GsonUtils.fromJsonCompressed(in, BackupJob.class); + } else { + return GsonUtils.GSON.fromJson(json, BackupJob.class); + } } } public void readFields(DataInput in) throws IOException { super.readFields(in); + if (type == JobType.BACKUP_COMPRESSED) { + type = JobType.BACKUP; + + Text text = new Text(); + text.readFields(in); + + ByteArrayInputStream byteStream = new ByteArrayInputStream(text.getBytes()); + try (GZIPInputStream gzipStream = new GZIPInputStream(byteStream)) { + try (DataInputStream stream = new DataInputStream(gzipStream)) { + readOthers(in); + } + } + } else { + readOthers(in); + } + } + public void readOthers(DataInput in) throws IOException { // table refs int size = in.readInt(); tableRefs = Lists.newArrayList(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index 57171afe285d18..b60c548976774c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -104,7 +104,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.io.ByteArrayInputStream; import java.io.DataInput; +import java.io.DataInputStream; import java.io.IOException; import java.util.HashMap; import java.util.List; @@ -112,6 +114,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.zip.GZIPInputStream; public class RestoreJob extends AbstractJob implements GsonPostProcessable { private static final String PROP_RESERVE_REPLICA = RestoreStmt.PROP_RESERVE_REPLICA; @@ -216,6 +219,10 @@ public RestoreJob() { super(JobType.RESTORE); } + public RestoreJob(JobType jobType) { + super(jobType); + } + public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad, ReplicaAllocation replicaAlloc, long timeoutMs, int metaVersion, boolean reserveReplica, boolean reserveDynamicPartitionEnable, boolean isBeingSynced, boolean isCleanTables, @@ -894,7 +901,7 @@ private void checkAndPrepareMeta() { AgentBatchTask batchTask = batchTaskPerTable.get(localTbl.getId()); if (batchTask == null) { - batchTask = new AgentBatchTask(); + batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); batchTaskPerTable.put(localTbl.getId(), batchTask); } createReplicas(db, batchTask, localTbl, restorePart); @@ -910,7 +917,7 @@ private void checkAndPrepareMeta() { for (Partition restorePart : restoreOlapTable.getPartitions()) { AgentBatchTask batchTask = batchTaskPerTable.get(restoreTbl.getId()); if (batchTask == null) { - batchTask = new AgentBatchTask(); + batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); batchTaskPerTable.put(restoreTbl.getId(), batchTask); } createReplicas(db, batchTask, restoreOlapTable, restorePart, tabletBases); @@ -1167,7 +1174,7 @@ private void prepareAndSendSnapshotTaskForOlapTable(Database db) { taskProgress.clear(); taskErrMsg.clear(); Multimap bePathsMap = HashMultimap.create(); - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); db.readLock(); try { for (Map.Entry entry : fileMapping.getMapping().entrySet()) { @@ -1652,7 +1659,7 @@ private void downloadRemoteSnapshots() { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (long dbId : dbToSnapshotInfos.keySet()) { List infos = dbToSnapshotInfos.get(dbId); @@ -1812,7 +1819,7 @@ private void downloadLocalSnapshots() { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (long dbId : dbToSnapshotInfos.keySet()) { List infos = dbToSnapshotInfos.get(dbId); @@ -1992,7 +1999,7 @@ private void commit() { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); // tablet id->(be id -> download info) for (Cell cell : snapshotInfos.cellSet()) { SnapshotInfo info = cell.getValue(); @@ -2134,13 +2141,15 @@ private Status dropAllNonRestoredTableAndPartitions(Database db) { } else if (isCleanTables) { // otherwise drop the entire table. LOG.info("drop non restored table {}, table id: {}. {}", tableName, tableId, this); + boolean isView = false; boolean isForceDrop = false; // move this table into recyclebin. - env.getInternalCatalog().dropTableWithoutCheck(db, table, isForceDrop); + env.getInternalCatalog().dropTableWithoutCheck(db, table, isView, isForceDrop); } } else if (tableType == TableType.VIEW && isCleanTables && !restoredViews.contains(tableName)) { LOG.info("drop non restored view {}, table id: {}. {}", tableName, tableId, this); + boolean isView = false; boolean isForceDrop = false; // move this view into recyclebin. - env.getInternalCatalog().dropTableWithoutCheck(db, table, isForceDrop); + env.getInternalCatalog().dropTableWithoutCheck(db, table, isView, isForceDrop); } } return Status.OK; @@ -2182,7 +2191,7 @@ private void releaseSnapshots() { } // we do not care about the release snapshot tasks' success or failure, // the GC thread on BE will sweep the snapshot, finally. - AgentBatchTask batchTask = new AgentBatchTask(); + AgentBatchTask batchTask = new AgentBatchTask(Config.backup_restore_batch_task_num_per_rpc); for (SnapshotInfo info : snapshotInfos.values()) { ReleaseSnapshotTask releaseTask = new ReleaseSnapshotTask(null, info.getBeId(), info.getDbId(), info.getTabletId(), info.getPath()); @@ -2522,7 +2531,12 @@ public static RestoreJob read(DataInput in) throws IOException { job.readFields(in); return job; } else { - return GsonUtils.GSON.fromJson(Text.readString(in), RestoreJob.class); + String json = Text.readString(in); + if (AbstractJob.COMPRESSED_JOB_ID.equals(json)) { + return GsonUtils.fromJsonCompressed(in, RestoreJob.class); + } else { + return GsonUtils.GSON.fromJson(json, RestoreJob.class); + } } } @@ -2530,7 +2544,27 @@ public static RestoreJob read(DataInput in) throws IOException { @Override public void readFields(DataInput in) throws IOException { super.readFields(in); + if (type == JobType.RESTORE_COMPRESSED) { + type = JobType.RESTORE; + + Text text = new Text(); + text.readFields(in); + if (LOG.isDebugEnabled() || text.getLength() > (100 << 20)) { + LOG.info("read restore job compressed size {}", text.getLength()); + } + + ByteArrayInputStream bytesStream = new ByteArrayInputStream(text.getBytes()); + try (GZIPInputStream gzipStream = new GZIPInputStream(bytesStream)) { + try (DataInputStream stream = new DataInputStream(gzipStream)) { + readOthers(stream); + } + } + } else { + readOthers(in); + } + } + private void readOthers(DataInput in) throws IOException { backupTimestamp = Text.readString(in); jobInfo = BackupJobInfo.read(in); allowLoad = in.readBoolean(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/binlog/DropTableRecord.java b/fe/fe-core/src/main/java/org/apache/doris/binlog/DropTableRecord.java index 4417edeb97372d..c998f2e73fee42 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/binlog/DropTableRecord.java +++ b/fe/fe-core/src/main/java/org/apache/doris/binlog/DropTableRecord.java @@ -31,6 +31,8 @@ public class DropTableRecord { private long tableId; @SerializedName(value = "tableName") private String tableName; + @SerializedName(value = "isView") + private boolean isView = false; @SerializedName(value = "rawSql") private String rawSql; @@ -39,7 +41,10 @@ public DropTableRecord(long commitSeq, DropInfo info) { this.dbId = info.getDbId(); this.tableId = info.getTableId(); this.tableName = info.getTableName(); - this.rawSql = String.format("DROP TABLE IF EXISTS `%s`", this.tableName); + this.isView = info.isView(); + this.rawSql = info.isView() + ? String.format("DROP VIEW IF EXISTS `%s`", this.tableName) + : String.format("DROP TABLE IF EXISTS `%s`", this.tableName); } public long getCommitSeq() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java index 8dea4eeb8d2365..ed3f2895cc86ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/BuiltinScalarFunctions.java @@ -139,6 +139,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.CurrentTime; import org.apache.doris.nereids.trees.expressions.functions.scalar.CurrentUser; import org.apache.doris.nereids.trees.expressions.functions.scalar.CutIpv6; +import org.apache.doris.nereids.trees.expressions.functions.scalar.CutToFirstSignificantSubdomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Database; import org.apache.doris.nereids.trees.expressions.functions.scalar.Date; import org.apache.doris.nereids.trees.expressions.functions.scalar.DateDiff; @@ -180,6 +181,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ExtractUrlParameter; import org.apache.doris.nereids.trees.expressions.functions.scalar.Field; import org.apache.doris.nereids.trees.expressions.functions.scalar.FindInSet; +import org.apache.doris.nereids.trees.expressions.functions.scalar.FirstSignificantSubdomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Floor; import org.apache.doris.nereids.trees.expressions.functions.scalar.Fmod; import org.apache.doris.nereids.trees.expressions.functions.scalar.Fpow; @@ -440,6 +442,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ToMonday; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToQuantileState; import org.apache.doris.nereids.trees.expressions.functions.scalar.Tokenize; +import org.apache.doris.nereids.trees.expressions.functions.scalar.TopLevelDomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Translate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Trim; import org.apache.doris.nereids.trees.expressions.functions.scalar.TrimIn; @@ -606,6 +609,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(CurrentTime.class, "curtime", "current_time"), scalar(CurrentUser.class, "current_user"), scalar(CutIpv6.class, "cut_ipv6"), + scalar(CutToFirstSignificantSubdomain.class, "cut_to_first_significant_subdomain"), scalar(Database.class, "database", "schema"), scalar(Date.class, "date"), scalar(DateDiff.class, "datediff"), @@ -647,6 +651,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(ExtractUrlParameter.class, "extract_url_parameter"), scalar(Field.class, "field"), scalar(FindInSet.class, "find_in_set"), + scalar(FirstSignificantSubdomain.class, "first_significant_subdomain"), scalar(Floor.class, "floor"), scalar(Fmod.class, "fmod"), scalar(Fpow.class, "fpow"), @@ -926,6 +931,7 @@ public class BuiltinScalarFunctions implements FunctionHelper { scalar(ToIso8601.class, "to_iso8601"), scalar(Tokenize.class, "tokenize"), scalar(ToMonday.class, "to_monday"), + scalar(TopLevelDomain.class, "top_level_domain"), scalar(ToQuantileState.class, "to_quantile_state"), scalar(Translate.class, "translate"), scalar(Trim.class, "trim"), diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java index 53499079eb17e0..916a213027f2f6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java @@ -455,6 +455,20 @@ public boolean isSkipBitmapColumn() { && nameEquals(SKIP_BITMAP_COL, true); } + // now we only support BloomFilter on (same behavior with BE): + // smallint/int/bigint/largeint + // string/varchar/char/variant + // date/datetime/datev2/datetimev2 + // decimal/decimal32/decimal64/decimal128I/decimal256 + // ipv4/ipv6 + public boolean isSupportBloomFilter() { + PrimitiveType pType = getDataType(); + return (pType == PrimitiveType.SMALLINT || pType == PrimitiveType.INT + || pType == PrimitiveType.BIGINT || pType == PrimitiveType.LARGEINT) + || pType.isCharFamily() || pType.isDateType() || pType.isVariantType() + || pType.isDecimalV2Type() || pType.isDecimalV3Type() || pType.isIPType(); + } + public PrimitiveType getDataType() { return type.getPrimitiveType(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index ef23713a5010b4..cd0c0e80d8f27e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -254,7 +254,6 @@ import org.apache.doris.resource.workloadschedpolicy.WorkloadSchedPolicyMgr; import org.apache.doris.resource.workloadschedpolicy.WorkloadSchedPolicyPublisher; import org.apache.doris.scheduler.manager.TransientTaskManager; -import org.apache.doris.scheduler.registry.ExportTaskRegister; import org.apache.doris.service.ExecuteEnv; import org.apache.doris.service.FrontendOptions; import org.apache.doris.statistics.AnalysisManager; @@ -395,7 +394,6 @@ public class Env { private ExternalMetaIdMgr externalMetaIdMgr; private MetastoreEventsProcessor metastoreEventsProcessor; - private ExportTaskRegister exportTaskRegister; private JobManager, ?> jobManager; private LabelProcessor labelProcessor; private TransientTaskManager transientTaskManager; @@ -709,7 +707,6 @@ public Env(boolean isCheckpointCatalog) { this.jobManager = new JobManager<>(); this.labelProcessor = new LabelProcessor(); this.transientTaskManager = new TransientTaskManager(); - this.exportTaskRegister = new ExportTaskRegister(transientTaskManager); this.replayedJournalId = new AtomicLong(0L); this.stmtIdCounter = new AtomicLong(0L); @@ -4425,11 +4422,6 @@ public SyncJobManager getSyncJobManager() { return this.syncJobManager; } - - public ExportTaskRegister getExportTaskRegister() { - return exportTaskRegister; - } - public JobManager getJobManager() { return jobManager; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java index b7db351f49a4d0..28d58b35297ac3 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcResource.java @@ -108,7 +108,6 @@ public class JdbcResource extends Resource { public static final String CHECK_SUM = "checksum"; public static final String CREATE_TIME = "create_time"; public static final String TEST_CONNECTION = "test_connection"; - public static final String ENABLE_CONNECTION_POOL = "enable_connection_pool"; private static final ImmutableList ALL_PROPERTIES = new ImmutableList.Builder().add( JDBC_URL, @@ -129,8 +128,7 @@ public class JdbcResource extends Resource { CONNECTION_POOL_MAX_WAIT_TIME, CONNECTION_POOL_KEEP_ALIVE, TEST_CONNECTION, - ExternalCatalog.USE_META_CACHE, - ENABLE_CONNECTION_POOL + ExternalCatalog.USE_META_CACHE ).build(); // The default value of optional properties @@ -151,7 +149,6 @@ public class JdbcResource extends Resource { OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(TEST_CONNECTION, "true"); OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(ExternalCatalog.USE_META_CACHE, String.valueOf(ExternalCatalog.DEFAULT_USE_META_CACHE)); - OPTIONAL_PROPERTIES_DEFAULT_VALUE.put(ENABLE_CONNECTION_POOL, "false"); } // timeout for both connection and read. 10 seconds is long enough. diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java index 7c3678c8ed8c6c..6dce40a2684fbc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/JdbcTable.java @@ -97,7 +97,6 @@ public class JdbcTable extends Table { @SerializedName("cid") private long catalogId = -1; - private boolean enableConnectionPool; private int connectionPoolMinSize; private int connectionPoolMaxSize; private int connectionPoolMaxWaitTime; @@ -191,11 +190,6 @@ public long getCatalogId() { return catalogId; } - public boolean isEnableConnectionPool() { - return Boolean.parseBoolean(getFromJdbcResourceOrDefault(JdbcResource.ENABLE_CONNECTION_POOL, - String.valueOf(enableConnectionPool))); - } - public int getConnectionPoolMinSize() { return Integer.parseInt(getFromJdbcResourceOrDefault(JdbcResource.CONNECTION_POOL_MIN_SIZE, String.valueOf(connectionPoolMinSize))); @@ -244,7 +238,6 @@ public TTableDescriptor toThrift() { tJdbcTable.setJdbcDriverUrl(getDriverUrl()); tJdbcTable.setJdbcResourceName(resourceName); tJdbcTable.setJdbcDriverChecksum(checkSum); - tJdbcTable.setEnableConnectionPool(isEnableConnectionPool()); tJdbcTable.setConnectionPoolMinSize(getConnectionPoolMinSize()); tJdbcTable.setConnectionPoolMaxSize(getConnectionPoolMaxSize()); tJdbcTable.setConnectionPoolMaxWaitTime(getConnectionPoolMaxWaitTime()); @@ -401,7 +394,6 @@ private void validate(Map properties) throws DdlException { driverClass = jdbcResource.getProperty(DRIVER_CLASS); driverUrl = jdbcResource.getProperty(DRIVER_URL); checkSum = jdbcResource.getProperty(CHECK_SUM); - enableConnectionPool = Boolean.parseBoolean(jdbcResource.getProperty(JdbcResource.ENABLE_CONNECTION_POOL)); connectionPoolMinSize = Integer.parseInt(jdbcResource.getProperty(JdbcResource.CONNECTION_POOL_MIN_SIZE)); connectionPoolMaxSize = Integer.parseInt(jdbcResource.getProperty(JdbcResource.CONNECTION_POOL_MAX_SIZE)); connectionPoolMaxWaitTime = Integer.parseInt( diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java index 11867bcfb960a6..df9310526e4757 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java @@ -229,7 +229,7 @@ public static void setDefaultVaultToShowVaultResult(List> rows, Str } int vaultIdIndex = IntStream.range(0, columns.size()) - .filter(i -> columns.get(i).getName().equals("StorageVaultId")) + .filter(i -> columns.get(i).getName().equals("Id")) .findFirst() .orElse(-1); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java index 71aa71842abeea..762acc7bed28d0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java @@ -176,29 +176,33 @@ public void alterStorageVault(StorageVaultType type, Map propert @VisibleForTesting public void setDefaultStorageVault(SetDefaultStorageVaultStmt stmt) throws DdlException { + setDefaultStorageVault(stmt.getStorageVaultName()); + } + + public void setDefaultStorageVault(String vaultName) throws DdlException { Cloud.AlterObjStoreInfoRequest.Builder builder = Cloud.AlterObjStoreInfoRequest.newBuilder(); Cloud.StorageVaultPB.Builder vaultBuilder = Cloud.StorageVaultPB.newBuilder(); - vaultBuilder.setName(stmt.getStorageVaultName()); + vaultBuilder.setName(vaultName); builder.setVault(vaultBuilder.build()); builder.setOp(Operation.SET_DEFAULT_VAULT); String vaultId; - LOG.info("try to set vault {} as default vault", stmt.getStorageVaultName()); + LOG.info("try to set vault {} as default vault", vaultName); try { Cloud.AlterObjStoreInfoResponse resp = MetaServiceProxy.getInstance().alterStorageVault(builder.build()); if (resp.getStatus().getCode() != Cloud.MetaServiceCode.OK) { LOG.warn("failed to set default storage vault response: {}, vault name {}", - resp, stmt.getStorageVaultName()); + resp, vaultName); throw new DdlException(resp.getStatus().getMsg()); } vaultId = resp.getStorageVaultId(); } catch (RpcException e) { LOG.warn("failed to set default storage vault due to RpcException: {}, vault name {}", - e, stmt.getStorageVaultName()); + e, vaultName); throw new DdlException(e.getMessage()); } - LOG.info("succeed to set {} as default vault, vault id {}", stmt.getStorageVaultName(), vaultId); - setDefaultStorageVault(Pair.of(stmt.getStorageVaultName(), vaultId)); + LOG.info("succeed to set {} as default vault, vault id {}", vaultName, vaultId); + setDefaultStorageVault(Pair.of(vaultName, vaultId)); } public void unsetDefaultStorageVault() throws DdlException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudPartition.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudPartition.java index b2a9751394f2d8..a075680e47643b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudPartition.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudPartition.java @@ -253,19 +253,15 @@ public static List getSnapshotVisibleVersion(List dbIds, List LOG.debug("get version from meta service, partitions: {}, versions: {}", partitionIds, versions); } - if (isEmptyPartitionPruneDisabled()) { - ArrayList news = new ArrayList<>(); - for (Long v : versions) { - news.add(v == -1 ? 1 : v); - } - return news; - } - if (versionUpdateTimesMs != null) { versionUpdateTimesMs.addAll(resp.getVersionUpdateTimeMsList()); } - return versions; + ArrayList news = new ArrayList<>(); + for (Long v : versions) { + news.add(v == -1 ? Partition.PARTITION_INIT_VERSION : v); + } + return news; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudReplica.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudReplica.java index ff786236cbdfc8..5bf73e448b6bce 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudReplica.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudReplica.java @@ -214,7 +214,7 @@ private String getCurrentClusterId() throws ComputeGroupException { ((CloudEnv) Env.getCurrentEnv()).checkCloudClusterPriv(cluster); } catch (Exception e) { LOG.warn("get compute group by session context exception"); - throw new ComputeGroupException(String.format("default compute group %s check auth failed", + throw new ComputeGroupException(String.format("session context compute group %s check auth failed", cluster), ComputeGroupException.FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_DEFAULT_COMPUTE_GROUP); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/ComputeGroupException.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/ComputeGroupException.java index 3260619c16978a..cdc7a1307f7b99 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/ComputeGroupException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/ComputeGroupException.java @@ -34,6 +34,7 @@ public enum FailedTypeEnum { CONNECT_CONTEXT_NOT_SET_COMPUTE_GROUP, CURRENT_USER_NO_AUTH_TO_USE_ANY_COMPUTE_GROUP, CURRENT_USER_NO_AUTH_TO_USE_DEFAULT_COMPUTE_GROUP, + CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP, CURRENT_COMPUTE_GROUP_NO_BE, COMPUTE_GROUPS_NO_ALIVE_BE, CURRENT_COMPUTE_GROUP_NOT_EXIST, @@ -59,6 +60,8 @@ public enum FailedTypeEnum { helpInfos.put(FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_ANY_COMPUTE_GROUP, " contact the system administrator " + "and request that they grant you the appropriate compute group permissions, " + "use SQL `GRANT USAGE_PRIV ON COMPUTE GROUP {compute_group_name} TO {user}`"); + helpInfos.put(FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP, + "use SQL `GRANT USAGE_PRIV ON COMPUTE GROUP {compute_group_name} TO {user}`"); helpInfos.put(FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_DEFAULT_COMPUTE_GROUP, " contact the system administrator " + "and request that they grant you the default compute group permissions, " diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java index 8be2f70aa5e922..13d5afe5dbbebe 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/transaction/CloudGlobalTransactionMgr.java @@ -273,7 +273,8 @@ public long beginTransaction(long dbId, List tableIdList, String label, TU throw new DuplicatedRequestException(DebugUtil.printId(requestId), beginTxnResponse.getDupTxnId(), beginTxnResponse.getStatus().getMsg()); case TXN_LABEL_ALREADY_USED: - throw new LabelAlreadyUsedException(beginTxnResponse.getStatus().getMsg(), false); + throw new LabelAlreadyUsedException(beginTxnResponse.getStatus().getMsg(), false, + beginTxnResponse.getTxnStatus()); default: if (MetricRepo.isInit) { MetricRepo.COUNTER_TXN_REJECT.increase(1L); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java b/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java index 8c508809d59bf3..f1789881cdcf76 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/LabelAlreadyUsedException.java @@ -17,6 +17,7 @@ package org.apache.doris.common; +import org.apache.doris.cloud.proto.Cloud.TxnStatusPB; import org.apache.doris.transaction.TransactionState; import com.google.common.base.Preconditions; @@ -37,6 +38,26 @@ public LabelAlreadyUsedException(String msg, boolean isLabel) { super(isLabel ? "Label [" + msg + "] has already been used." : msg); } + public LabelAlreadyUsedException(String msg, boolean isLabel, TxnStatusPB txnStatus) { + super(isLabel ? "Label [" + msg + "] has already been used." : msg); + switch (txnStatus) { + case TXN_STATUS_UNKNOWN: + case TXN_STATUS_PREPARED: + jobStatus = "RUNNING"; + break; + case TXN_STATUS_PRECOMMITTED: + jobStatus = "PRECOMMITTED"; + break; + case TXN_STATUS_COMMITTED: + case TXN_STATUS_VISIBLE: + jobStatus = "FINISHED"; + break; + default: + Preconditions.checkState(false, txnStatus); + break; + } + } + public LabelAlreadyUsedException(TransactionState txn) { super("Label [" + txn.getLabel() + "] has already been used, relate to txn [" + txn.getTransactionId() + "], status [" + txn.getTransactionStatus() + "]."); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 238250ab37a398..b4384f8a7fa77a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -642,11 +642,8 @@ public static Set analyzeBloomFilterColumns(Map properti if (column.getName().equalsIgnoreCase(bfColumn)) { PrimitiveType type = column.getDataType(); - // tinyint/float/double columns don't support // key columns and none/replace aggregate non-key columns support - if (type == PrimitiveType.TINYINT || type == PrimitiveType.FLOAT - || type == PrimitiveType.DOUBLE || type == PrimitiveType.BOOLEAN - || type.isComplexType()) { + if (!column.isSupportBloomFilter()) { throw new AnalysisException(type + " is not supported in bloom filter index. " + "invalid column: " + bfColumn); } else if (keysType != KeysType.AGG_KEYS || column.isKey()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java index 3b8551da144cac..93a4c1b19e63fc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java @@ -695,7 +695,7 @@ public void registerExternalTableFromEvent(String dbName, String tableName, } } - public void unregisterExternalDatabase(String dbName, String catalogName, boolean ignoreIfNotExists) + public void unregisterExternalDatabase(String dbName, String catalogName) throws DdlException { CatalogIf catalog = nameToCatalog.get(catalogName); if (catalog == null) { @@ -704,17 +704,10 @@ public void unregisterExternalDatabase(String dbName, String catalogName, boolea if (!(catalog instanceof ExternalCatalog)) { throw new DdlException("Only support drop ExternalCatalog databases"); } - DatabaseIf db = catalog.getDbNullable(dbName); - if (db == null) { - if (!ignoreIfNotExists) { - throw new DdlException("Database " + dbName + " does not exist in catalog " + catalog.getName()); - } - return; - } ((HMSExternalCatalog) catalog).unregisterDatabase(dbName); } - public void registerExternalDatabaseFromEvent(String dbName, String catalogName, boolean ignoreIfExists) + public void registerExternalDatabaseFromEvent(String dbName, String catalogName) throws DdlException { CatalogIf catalog = nameToCatalog.get(catalogName); if (catalog == null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalRowCountCache.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalRowCountCache.java index 4602c594571f56..075091e682d722 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalRowCountCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalRowCountCache.java @@ -113,6 +113,7 @@ public long getCachedRowCount(long catalogId, long dbId, long tableId) { if (f.isDone()) { return f.get().orElse(-1L); } + LOG.info("Row count for table {}.{}.{} is still processing.", catalogId, dbId, tableId); } catch (Exception e) { LOG.warn("Unexpected exception while returning row count", e); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index 60f52aa1f8f143..f8183028c6acd6 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -941,7 +941,7 @@ public void dropTable(DropTableStmt stmt) throws DdlException { } } - dropTableInternal(db, table, stmt.isForceDrop(), watch, costTimes); + dropTableInternal(db, table, stmt.isView(), stmt.isForceDrop(), watch, costTimes); } catch (UserException e) { throw new DdlException(e.getMessage(), e.getMysqlErrorCode()); } finally { @@ -949,18 +949,18 @@ public void dropTable(DropTableStmt stmt) throws DdlException { } watch.stop(); costTimes.put("6:total", watch.getTime()); - LOG.info("finished dropping table: {} from db: {}, is force: {} cost: {}", - tableName, dbName, stmt.isForceDrop(), costTimes); + LOG.info("finished dropping table: {} from db: {}, is view: {}, is force: {}, cost: {}", + tableName, dbName, stmt.isView(), stmt.isForceDrop(), costTimes); } // drop table without any check. - public void dropTableWithoutCheck(Database db, Table table, boolean forceDrop) throws DdlException { + public void dropTableWithoutCheck(Database db, Table table, boolean isView, boolean forceDrop) throws DdlException { if (!db.writeLockIfExist()) { return; } try { LOG.info("drop table {} without check, force: {}", table.getQualifiedName(), forceDrop); - dropTableInternal(db, table, forceDrop, null, null); + dropTableInternal(db, table, isView, forceDrop, null, null); } catch (Exception e) { LOG.warn("drop table without check", e); throw e; @@ -970,7 +970,7 @@ public void dropTableWithoutCheck(Database db, Table table, boolean forceDrop) t } // Drop a table, the db lock must hold. - private void dropTableInternal(Database db, Table table, boolean forceDrop, + private void dropTableInternal(Database db, Table table, boolean isView, boolean forceDrop, StopWatch watch, Map costTimes) throws DdlException { table.writeLock(); String tableName = table.getName(); @@ -1001,7 +1001,7 @@ private void dropTableInternal(Database db, Table table, boolean forceDrop, Env.getCurrentEnv().getQueryStats().clear(Env.getCurrentEnv().getCurrentCatalog().getId(), db.getId(), table.getId()); - DropInfo info = new DropInfo(db.getId(), table.getId(), tableName, -1L, forceDrop, recycleTime); + DropInfo info = new DropInfo(db.getId(), table.getId(), tableName, -1L, isView, forceDrop, recycleTime); Env.getCurrentEnv().getEditLog().logDropTable(info); Env.getCurrentEnv().getMtmvService().dropTable(table); } @@ -3229,7 +3229,7 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx try { dropTable(db, tableId, true, false, 0L); if (hadLogEditCreateTable) { - DropInfo info = new DropInfo(db.getId(), tableId, olapTable.getName(), -1L, true, 0L); + DropInfo info = new DropInfo(db.getId(), tableId, olapTable.getName(), -1L, false, true, 0L); Env.getCurrentEnv().getEditLog().logDropTable(info); } } catch (Exception ex) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java index 516448bdfbb9ec..f72421da8a1134 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java @@ -525,7 +525,7 @@ public long fetchRowCount() { long rowCount = getRowCountFromExternalSource(); // Only hive table supports estimate row count by listing file. if (rowCount == -1 && dlaType.equals(DLAType.HIVE)) { - LOG.debug("Will estimate row count from file list."); + LOG.info("Will estimate row count for table {} from file list.", name); rowCount = getRowCountFromFileList(); } return rowCount; @@ -837,14 +837,16 @@ private long getRowCountFromFileList() { return -1; } if (isView()) { + LOG.info("Table {} is view, return 0.", name); return 0; } HiveMetaStoreCache.HivePartitionValues partitionValues = getAllPartitionValues(); // Get files for all partitions. int samplePartitionSize = Config.hive_stats_partition_sample_size; - List filesByPartitions = getFilesForPartitions(partitionValues, - samplePartitionSize); + List filesByPartitions = + getFilesForPartitions(partitionValues, samplePartitionSize); + LOG.info("Number of files selected for hive table {} is {}", name, filesByPartitions.size()); long totalSize = 0; // Calculate the total file size. for (HiveMetaStoreCache.FileCacheValue files : filesByPartitions) { @@ -863,14 +865,20 @@ private long getRowCountFromFileList() { estimatedRowSize += column.getDataType().getSlotSize(); } if (estimatedRowSize == 0) { + LOG.warn("Table {} estimated size is 0, return 0.", name); return 0; } int totalPartitionSize = partitionValues == null ? 1 : partitionValues.getIdToPartitionItem().size(); if (samplePartitionSize != 0 && samplePartitionSize < totalPartitionSize) { + LOG.info("Table {} sampled {} of {} partitions, sampled size is {}", + name, samplePartitionSize, totalPartitionSize, totalSize); totalSize = totalSize * totalPartitionSize / samplePartitionSize; } - return totalSize / estimatedRowSize; + long rows = totalSize / estimatedRowSize; + LOG.info("Table {} rows {}, total size is {}, estimatedRowSize is {}", + name, rows, totalSize, estimatedRowSize); + return rows; } // Get all partition values from cache. @@ -888,6 +896,12 @@ private HiveMetaStoreCache.HivePartitionValues getAllPartitionValues() { // no need to worry that this call will invalid or refresh the cache. // because it has enough space to keep partition info of all tables in cache. partitionValues = cache.getPartitionValues(dbName, name, partitionColumnTypes); + if (partitionValues == null || partitionValues.getPartitionNameToIdMap() == null) { + LOG.warn("Partition values for hive table {} is null", name); + } else { + LOG.info("Partition values size for hive table {} is {}", + name, partitionValues.getPartitionNameToIdMap().size()); + } } return partitionValues; } @@ -923,6 +937,7 @@ private List getFilesForPartitions( // get partitions without cache, so that it will not invalid the cache when executing // non query request such as `show table status` hivePartitions = cache.getAllPartitionsWithoutCache(dbName, name, partitionValuesList); + LOG.info("Partition list size for hive partition table {} is {}", name, hivePartitions.size()); } else { hivePartitions.add(new HivePartition(dbName, name, true, getRemoteTable().getSd().getInputFormat(), @@ -930,6 +945,11 @@ private List getFilesForPartitions( } // Get files for all partitions. String bindBrokerName = catalog.bindBrokerName(); + if (LOG.isDebugEnabled()) { + for (HivePartition partition : hivePartitions) { + LOG.debug("Chosen partition for table {}. [{}]", name, partition.toString()); + } + } return cache.getFilesByPartitionsWithoutCache(hivePartitions, bindBrokerName); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AddPartitionEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AddPartitionEvent.java index ffc7b95ff59aad..e582c3d2662bfc 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AddPartitionEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AddPartitionEvent.java @@ -59,7 +59,7 @@ private AddPartitionEvent(NotificationEvent event, super(event, catalogName); Preconditions.checkArgument(getEventType().equals(MetastoreEventType.ADD_PARTITION)); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { AddPartitionMessage addPartitionMessage = MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -98,18 +98,18 @@ protected static List getEvents(NotificationEvent event, @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNames:[{}]", catalogName, dbName, tblName, + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNames:[{}]", catalogName, dbName, tblName, partitionNames.toString()); // bail out early if there are not partitions to process if (partitionNames.isEmpty()) { - infoLog("Partition list is empty. Ignoring this event."); + logInfo("Partition list is empty. Ignoring this event."); return; } Env.getCurrentEnv().getCatalogMgr() .addExternalPartitions(catalogName, dbName, hmsTbl.getTableName(), partitionNames, eventTime, true); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterDatabaseEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterDatabaseEvent.java index ef828025e7f719..8f2932600585e2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterDatabaseEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterDatabaseEvent.java @@ -67,7 +67,7 @@ private AlterDatabaseEvent(NotificationEvent event, dbNameAfter = dbAfter.getName(); } catch (Exception e) { throw new MetastoreNotificationException( - debugString("Unable to parse the alter database message"), e); + getMsgWithEventInfo("Unable to parse the alter database message"), e); } // this is a rename event if either dbName of before and after object changed isRename = !dbBefore.getName().equalsIgnoreCase(dbAfter.getName()); @@ -82,13 +82,13 @@ private void processRename() throws DdlException { throw new DdlException("Only support ExternalCatalog Databases"); } if (catalog.getDbNullable(dbAfter.getName()) != null) { - infoLog("AlterExternalDatabase canceled, because dbAfter has exist, " + logInfo("AlterExternalDatabase canceled, because dbAfter has exist, " + "catalogName:[{}],dbName:[{}]", catalogName, dbAfter.getName()); return; } - Env.getCurrentEnv().getCatalogMgr().unregisterExternalDatabase(dbBefore.getName(), catalogName, true); - Env.getCurrentEnv().getCatalogMgr().registerExternalDatabaseFromEvent(dbAfter.getName(), catalogName, true); + Env.getCurrentEnv().getCatalogMgr().unregisterExternalDatabase(dbBefore.getName(), catalogName); + Env.getCurrentEnv().getCatalogMgr().registerExternalDatabaseFromEvent(dbAfter.getName(), catalogName); } @@ -113,10 +113,10 @@ protected void process() throws MetastoreNotificationException { return; } // only can change properties,we do nothing - infoLog("catalogName:[{}],dbName:[{}]", catalogName, dbName); + logInfo("catalogName:[{}],dbName:[{}]", catalogName, dbName); } catch (Exception e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterPartitionEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterPartitionEvent.java index 569d9878d7afaa..d9898f68d982f7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterPartitionEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterPartitionEvent.java @@ -65,7 +65,7 @@ private AlterPartitionEvent(NotificationEvent event, super(event, catalogName); Preconditions.checkArgument(getEventType().equals(MetastoreEventType.ALTER_PARTITION)); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { AlterPartitionMessage alterPartitionMessage = MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -109,7 +109,7 @@ protected static List getEvents(NotificationEvent event, @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNameBefore:[{}],partitionNameAfter:[{}]", + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNameBefore:[{}],partitionNameAfter:[{}]", catalogName, dbName, tblName, partitionNameBefore, partitionNameAfter); if (isRename) { Env.getCurrentEnv().getCatalogMgr() @@ -125,7 +125,7 @@ protected void process() throws MetastoreNotificationException { } } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterTableEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterTableEvent.java index 2283646b64adfa..43999fac8ce392 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterTableEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/AlterTableEvent.java @@ -59,7 +59,7 @@ private AlterTableEvent(NotificationEvent event, String catalogName) { super(event, catalogName); Preconditions.checkArgument(MetastoreEventType.ALTER_TABLE.equals(getEventType())); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { JSONAlterTableMessage alterTableMessage = (JSONAlterTableMessage) MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -70,7 +70,7 @@ private AlterTableEvent(NotificationEvent event, String catalogName) { tblNameAfter = tableAfter.getTableName(); } catch (Exception e) { throw new MetastoreNotificationException( - debugString("Unable to parse the alter table message"), e); + getMsgWithEventInfo("Unable to parse the alter table message"), e); } // this is a rename event if either dbName or tblName of before and after object changed isRename = !tableBefore.getDbName().equalsIgnoreCase(tableAfter.getDbName()) @@ -111,7 +111,7 @@ private void processRename() throws DdlException { boolean hasExist = Env.getCurrentEnv().getCatalogMgr() .externalTableExistInLocal(tableAfter.getDbName(), tableAfter.getTableName(), catalogName); if (hasExist) { - infoLog("AlterExternalTable canceled,because tableAfter has exist, " + logInfo("AlterExternalTable canceled,because tableAfter has exist, " + "catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tableAfter.getTableName()); return; @@ -143,7 +143,7 @@ public String getTblNameAfter() { @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableBefore:[{}],tableAfter:[{}]", catalogName, dbName, + logInfo("catalogName:[{}],dbName:[{}],tableBefore:[{}],tableAfter:[{}]", catalogName, dbName, tableBefore.getTableName(), tableAfter.getTableName()); if (isRename) { processRename(); @@ -161,7 +161,7 @@ protected void process() throws MetastoreNotificationException { eventTime); } catch (Exception e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateDatabaseEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateDatabaseEvent.java index 2dd4c5671bcee7..2d81377f4b6749 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateDatabaseEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateDatabaseEvent.java @@ -54,11 +54,11 @@ protected static List getEvents(NotificationEvent event, @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}]", catalogName, dbName); - Env.getCurrentEnv().getCatalogMgr().registerExternalDatabaseFromEvent(dbName, catalogName, true); + logInfo("catalogName:[{}],dbName:[{}]", catalogName, dbName); + Env.getCurrentEnv().getCatalogMgr().registerExternalDatabaseFromEvent(dbName, catalogName); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateTableEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateTableEvent.java index e6c3e2e7eae9d0..8a22a479ad7e84 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateTableEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/CreateTableEvent.java @@ -48,7 +48,7 @@ private CreateTableEvent(NotificationEvent event, String catalogName) throws Met super(event, catalogName); Preconditions.checkArgument(MetastoreEventType.CREATE_TABLE.equals(getEventType())); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { CreateTableMessage createTableMessage = MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -57,7 +57,7 @@ private CreateTableEvent(NotificationEvent event, String catalogName) throws Met hmsTbl.setTableName(hmsTbl.getTableName().toLowerCase(Locale.ROOT)); } catch (Exception e) { throw new MetastoreNotificationException( - debugString("Unable to deserialize the event message"), e); + getMsgWithEventInfo("Unable to deserialize the event message"), e); } } @@ -78,12 +78,12 @@ protected boolean willChangeTableName() { @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tblName); + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tblName); Env.getCurrentEnv().getCatalogMgr() .registerExternalTableFromEvent(dbName, hmsTbl.getTableName(), catalogName, eventTime, true); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropDatabaseEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropDatabaseEvent.java index 107ce591a42b03..6ab089232b98c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropDatabaseEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropDatabaseEvent.java @@ -53,12 +53,12 @@ protected static List getEvents(NotificationEvent event, @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}]", catalogName, dbName); + logInfo("catalogName:[{}],dbName:[{}]", catalogName, dbName); Env.getCurrentEnv().getCatalogMgr() - .unregisterExternalDatabase(dbName, catalogName, true); + .unregisterExternalDatabase(dbName, catalogName); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropPartitionEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropPartitionEvent.java index dd443010289126..737ad8f28b9051 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropPartitionEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropPartitionEvent.java @@ -57,7 +57,7 @@ private DropPartitionEvent(NotificationEvent event, super(event, catalogName); Preconditions.checkArgument(getEventType().equals(MetastoreEventType.DROP_PARTITION)); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { DropPartitionMessage dropPartitionMessage = MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -97,11 +97,11 @@ protected static List getEvents(NotificationEvent event, @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNames:[{}]", catalogName, dbName, tblName, + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}],partitionNames:[{}]", catalogName, dbName, tblName, partitionNames.toString()); // bail out early if there are not partitions to process if (partitionNames.isEmpty()) { - infoLog("Partition list is empty. Ignoring this event."); + logInfo("Partition list is empty. Ignoring this event."); return; } Env.getCurrentEnv().getCatalogMgr() @@ -109,7 +109,7 @@ protected void process() throws MetastoreNotificationException { partitionNames, eventTime, true); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropTableEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropTableEvent.java index 6dcb16dedad369..dd67d6605274f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropTableEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/DropTableEvent.java @@ -48,7 +48,7 @@ private DropTableEvent(NotificationEvent event, super(event, catalogName); Preconditions.checkArgument(MetastoreEventType.DROP_TABLE.equals(getEventType())); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); try { JSONDropTableMessage dropTableMessage = (JSONDropTableMessage) MetastoreEventsProcessor.getMessageDeserializer(event.getMessageFormat()) @@ -77,11 +77,11 @@ protected boolean willChangeTableName() { @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tableName); + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tableName); Env.getCurrentEnv().getCatalogMgr().unregisterExternalTable(dbName, tableName, catalogName, true); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/IgnoredEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/IgnoredEvent.java index e7e6643e647b98..bebfc8c2384f16 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/IgnoredEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/IgnoredEvent.java @@ -38,6 +38,6 @@ protected static List getEvents(NotificationEvent event, @Override public void process() { - infoLog("Ignoring unknown event type " + metastoreNotificationEvent.getEventType()); + logInfo("Ignoring unknown event type " + metastoreNotificationEvent.getEventType()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/InsertEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/InsertEvent.java index 7b76d4913d51f2..4b4abd0264d53d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/InsertEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/InsertEvent.java @@ -42,7 +42,7 @@ private InsertEvent(NotificationEvent event, String catalogName) { super(event, catalogName); Preconditions.checkArgument(getEventType().equals(MetastoreEventType.INSERT)); Preconditions - .checkNotNull(event.getMessage(), debugString("Event message is null")); + .checkNotNull(event.getMessage(), getMsgWithEventInfo("Event message is null")); } protected static List getEvents(NotificationEvent event, String catalogName) { @@ -62,7 +62,7 @@ protected boolean willChangeTableName() { @Override protected void process() throws MetastoreNotificationException { try { - infoLog("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tblName); + logInfo("catalogName:[{}],dbName:[{}],tableName:[{}]", catalogName, dbName, tblName); /** * Only when we use hive client to execute a `INSERT INTO TBL SELECT * ...` or `INSERT INTO TBL ...` sql * to a non-partitioned table then the hms will generate an insert event, and there is not @@ -75,7 +75,7 @@ protected void process() throws MetastoreNotificationException { eventTime); } catch (DdlException e) { throw new MetastoreNotificationException( - debugString("Failed to process event"), e); + getMsgWithEventInfo("Failed to process event"), e); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java index 695dd57b215072..b0ec23b0661021 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEvent.java @@ -18,7 +18,6 @@ package org.apache.doris.datasource.hive.event; import org.apache.doris.datasource.MetaIdMappingsLog; -import org.apache.doris.datasource.hive.HMSCachedClient; import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.StringUtils; @@ -31,43 +30,29 @@ import java.util.Map; /** - * Abstract base class for all MetastoreEvents. A MetastoreEvent is an object used to - * process a NotificationEvent received from metastore. + * The wrapper parent class of the NotificationEvent class */ public abstract class MetastoreEvent { private static final Logger LOG = LogManager.getLogger(MetastoreEvent.class); - // String.format compatible string to prepend event id and type - private static final String STR_FORMAT_EVENT_ID_TYPE = "EventId: %d EventType: %s "; - // logger format compatible string to prepend to a log formatted message - private static final String LOG_FORMAT_EVENT_ID_TYPE = "EventId: {} EventType: {} "; - - // the notification received from metastore which is processed by this - protected final NotificationEvent event; + protected final String catalogName; - // dbName from the event protected final String dbName; - // tblName from the event protected final String tblName; - // eventId of the event. Used instead of calling getter on event everytime protected final long eventId; - // eventTime of the event. Used instead of calling getter on event everytime protected final long eventTime; - // eventType from the NotificationEvent protected final MetastoreEventType eventType; - // Actual notificationEvent object received from Metastore + protected final NotificationEvent event; protected final NotificationEvent metastoreNotificationEvent; - protected final String catalogName; - // for test protected MetastoreEvent(long eventId, String catalogName, String dbName, - String tblName, MetastoreEventType eventType) { + String tblName, MetastoreEventType eventType) { this.eventId = eventId; this.eventTime = -1L; this.catalogName = catalogName; @@ -103,127 +88,46 @@ protected MetastoreEvent(NotificationEvent event, String catalogName) { this.catalogName = catalogName; } - public long getEventId() { - return eventId; - } - - public MetastoreEventType getEventType() { - return eventType; - } - - public String getDbName() { - return dbName; - } - - public String getTblName() { - return tblName; - } - /** - * Checks if the given event can be batched into this event. Default behavior is - * to return false which can be overridden by a sub-class. + * Can batch processing be performed to improve processing performance * - * @param event The event under consideration to be batched into this event. - * @return false if event cannot be batched into this event; otherwise true. + * @param event + * @return */ protected boolean canBeBatched(MetastoreEvent event) { return false; } - /** - * Adds the given event into the batch of events represented by this event. Default - * implementation is to return null. Sub-classes must override this method to - * implement batching. - * - * @param event The event which needs to be added to the batch. - * @return The batch event which represents all the events batched into this event - * until now including the given event. - */ - protected MetastoreEvent addToBatchEvents(MetastoreEvent event) { - return null; - } - - /** - * Returns the number of events represented by this event. For most events this is 1. - * In case of batch events this could be more than 1. - */ - protected int getNumberOfEvents() { - return 1; - } - - /** - * Certain events like ALTER_TABLE or ALTER_PARTITION implement logic to ignore - * some events because they do not affect query results. - * - * @return true if this event can be skipped. - */ - protected boolean canBeSkipped() { - return false; - } - - /** - * Process the information available in the NotificationEvent. - * Better not to call (direct/indirect) apis of {@link HMSCachedClient} - * during handling hms events (Reference to https://github.com/apache/doris/pull/19120). - * Try to add some fallback strategies if it is highly necessary. - */ protected abstract void process() throws MetastoreNotificationException; - /** - * Helper method to get debug string with helpful event information prepended to the - * message. This can be used to generate helpful exception messages - * - * @param msgFormatString String value to be used in String.format() for the given message - * @param args args to the String.format() for the given msgFormatString - */ - protected String debugString(String msgFormatString, Object... args) { - String formatString = STR_FORMAT_EVENT_ID_TYPE + msgFormatString; - Object[] formatArgs = getLogFormatArgs(args); - return String.format(formatString, formatArgs); - } - - /** - * Helper method to generate the format args after prepending the event id and type - */ - private Object[] getLogFormatArgs(Object[] args) { - Object[] formatArgs = new Object[args.length + 2]; - formatArgs[0] = eventId; - formatArgs[1] = eventType; - int i = 2; - for (Object arg : args) { - formatArgs[i] = arg; - i++; - } - return formatArgs; + protected String getMsgWithEventInfo(String formatSuffix, Object... args) { + String format = "EventId: %d EventType: %s " + formatSuffix; + Object[] argsWithEventInfo = getArgsWithEventInfo(args); + return String.format(format, argsWithEventInfo); } - /** - * Logs at info level the given log formatted string and its args. The log formatted - * string should have {} pair at the appropriate location in the string for each arg - * value provided. This method prepends the event id and event type before logging the - * message. No-op if the log level is not at INFO - */ - protected void infoLog(String logFormattedStr, Object... args) { + protected void logInfo(String formatSuffix, Object... args) { if (!LOG.isInfoEnabled()) { return; } - String formatString = LOG_FORMAT_EVENT_ID_TYPE + logFormattedStr; - Object[] formatArgs = getLogFormatArgs(args); - LOG.info(formatString, formatArgs); + String format = "EventId: {} EventType: {} " + formatSuffix; + Object[] argsWithEventInfo = getArgsWithEventInfo(args); + LOG.info(format, argsWithEventInfo); } /** - * Similar to infoLog excepts logs at debug level + * Add event information to the parameters */ - protected void debugLog(String logFormattedStr, Object... args) { - if (!LOG.isDebugEnabled()) { - return; - } - String formatString = LOG_FORMAT_EVENT_ID_TYPE + logFormattedStr; - Object[] formatArgs = getLogFormatArgs(args); - if (LOG.isDebugEnabled()) { - LOG.debug(formatString, formatArgs); + private Object[] getArgsWithEventInfo(Object[] args) { + Object[] res = new Object[args.length + 2]; + res[0] = eventId; + res[1] = eventType; + int i = 2; + for (Object arg : args) { + res[i] = arg; + i++; } + return res; } protected String getPartitionName(Map part, List partitionColNames) { @@ -253,8 +157,27 @@ protected List transferToMetaIdMappings() { return ImmutableList.of(); } + public String getDbName() { + return dbName; + } + + public String getTblName() { + return tblName; + } + + public long getEventId() { + return eventId; + } + + public MetastoreEventType getEventType() { + return eventType; + } + @Override public String toString() { - return String.format(STR_FORMAT_EVENT_ID_TYPE, eventId, eventType); + return "MetastoreEvent{" + + "eventId=" + eventId + + ", eventType=" + eventType + + '}'; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java index cbd0bfb5fa6fb5..73054773402bad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/event/MetastoreEventsProcessor.java @@ -165,7 +165,7 @@ private void doExecute(List events, HMSExternalCatalog hmsExtern } catch (HMSClientException hmsClientException) { if (hmsClientException.getCause() != null && hmsClientException.getCause() instanceof NoSuchObjectException) { - LOG.warn(event.debugString("Failed to process event and skip"), hmsClientException); + LOG.warn(event.getMsgWithEventInfo("Failed to process event and skip"), hmsClientException); } else { updateLastSyncedEventId(hmsExternalCatalog, event.getEventId() - 1); throw hmsClientException; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/HudiUtils.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/HudiUtils.java index 3885f1de3ee95e..d7803b1a516f9e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/HudiUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/HudiUtils.java @@ -24,7 +24,6 @@ import org.apache.doris.catalog.StructType; import org.apache.doris.catalog.Type; -import com.google.common.base.Preconditions; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; @@ -41,30 +40,58 @@ public class HudiUtils { private static final SimpleDateFormat defaultDateFormat = new SimpleDateFormat("yyyy-MM-dd"); - public static String fromAvroHudiTypeToHiveTypeString(Schema avroSchema) { - Schema.Type columnType = avroSchema.getType(); - LogicalType logicalType = avroSchema.getLogicalType(); - switch (columnType) { + /** + * Convert different query instant time format to the commit time format. + * Currently we support three kinds of instant time format for time travel query: + * 1、yyyy-MM-dd HH:mm:ss + * 2、yyyy-MM-dd + * This will convert to 'yyyyMMdd000000'. + * 3、yyyyMMddHHmmss + */ + public static String formatQueryInstant(String queryInstant) throws ParseException { + int instantLength = queryInstant.length(); + if (instantLength == 19 || instantLength == 23) { // for yyyy-MM-dd HH:mm:ss[.SSS] + if (instantLength == 19) { + queryInstant += ".000"; + } + return HoodieInstantTimeGenerator.getInstantForDateString(queryInstant); + } else if (instantLength == HoodieInstantTimeGenerator.SECS_INSTANT_ID_LENGTH + || instantLength == HoodieInstantTimeGenerator.MILLIS_INSTANT_ID_LENGTH) { // for yyyyMMddHHmmss[SSS] + HoodieActiveTimeline.parseDateFromInstantTime(queryInstant); // validate the format + return queryInstant; + } else if (instantLength == 10) { // for yyyy-MM-dd + return HoodieActiveTimeline.formatDate(defaultDateFormat.parse(queryInstant)); + } else { + throw new IllegalArgumentException("Unsupported query instant time format: " + queryInstant + + ", Supported time format are: 'yyyy-MM-dd HH:mm:ss[.SSS]' " + + "or 'yyyy-MM-dd' or 'yyyyMMddHHmmss[SSS]'"); + } + } + + public static String convertAvroToHiveType(Schema schema) { + Schema.Type type = schema.getType(); + LogicalType logicalType = schema.getLogicalType(); + + switch (type) { case BOOLEAN: return "boolean"; case INT: if (logicalType instanceof LogicalTypes.Date) { return "date"; - } else if (logicalType instanceof LogicalTypes.TimeMillis) { - break; - } else { - return "int"; } + if (logicalType instanceof LogicalTypes.TimeMillis) { + return handleUnsupportedType(schema); + } + return "int"; case LONG: + if (logicalType instanceof LogicalTypes.TimestampMillis + || logicalType instanceof LogicalTypes.TimestampMicros) { + return logicalType.getName(); + } if (logicalType instanceof LogicalTypes.TimeMicros) { - break; - } else if (logicalType instanceof LogicalTypes.TimestampMillis) { - return "timestamp(3)"; - } else if (logicalType instanceof LogicalTypes.TimestampMicros) { - return "timestamp(6)"; - } else { - return "bigint"; + return handleUnsupportedType(schema); } + return "bigint"; case FLOAT: return "float"; case DOUBLE: @@ -74,71 +101,57 @@ public static String fromAvroHudiTypeToHiveTypeString(Schema avroSchema) { case FIXED: case BYTES: if (logicalType instanceof LogicalTypes.Decimal) { - int precision = ((LogicalTypes.Decimal) logicalType).getPrecision(); - int scale = ((LogicalTypes.Decimal) logicalType).getScale(); - return String.format("decimal(%s,%s)", precision, scale); - } else { - if (columnType == Schema.Type.BYTES) { - return "binary"; - } - return "string"; + LogicalTypes.Decimal decimalType = (LogicalTypes.Decimal) logicalType; + return String.format("decimal(%d,%d)", decimalType.getPrecision(), decimalType.getScale()); } + return "string"; case ARRAY: - String elementType = fromAvroHudiTypeToHiveTypeString(avroSchema.getElementType()); - return String.format("array<%s>", elementType); + String arrayElementType = convertAvroToHiveType(schema.getElementType()); + return String.format("array<%s>", arrayElementType); case RECORD: - List fields = avroSchema.getFields(); - Preconditions.checkArgument(fields.size() > 0); - String nameToType = fields.stream() - .map(f -> String.format("%s:%s", f.name(), - fromAvroHudiTypeToHiveTypeString(f.schema()))) + List recordFields = schema.getFields(); + if (recordFields.isEmpty()) { + throw new IllegalArgumentException("Record must have fields"); + } + String structFields = recordFields.stream() + .map(field -> String.format("%s:%s", field.name(), convertAvroToHiveType(field.schema()))) .collect(Collectors.joining(",")); - return String.format("struct<%s>", nameToType); + return String.format("struct<%s>", structFields); case MAP: - Schema value = avroSchema.getValueType(); - String valueType = fromAvroHudiTypeToHiveTypeString(value); - return String.format("map<%s,%s>", "string", valueType); + Schema mapValueType = schema.getValueType(); + String mapValueTypeString = convertAvroToHiveType(mapValueType); + return String.format("map", mapValueTypeString); case UNION: - List nonNullMembers = avroSchema.getTypes().stream() - .filter(schema -> !Schema.Type.NULL.equals(schema.getType())) + List unionTypes = schema.getTypes().stream() + .filter(s -> s.getType() != Schema.Type.NULL) .collect(Collectors.toList()); - // The nullable column in hudi is the union type with schemas [null, real column type] - if (nonNullMembers.size() == 1) { - return fromAvroHudiTypeToHiveTypeString(nonNullMembers.get(0)); + if (unionTypes.size() == 1) { + return convertAvroToHiveType(unionTypes.get(0)); } break; default: break; } - String errorMsg = String.format("Unsupported hudi %s type of column %s", avroSchema.getType().getName(), - avroSchema.getName()); - throw new IllegalArgumentException(errorMsg); + + throw new IllegalArgumentException( + String.format("Unsupported type: %s for column: %s", type.getName(), schema.getName())); + } + + private static String handleUnsupportedType(Schema schema) { + throw new IllegalArgumentException(String.format("Unsupported logical type: %s", schema.getLogicalType())); } public static Type fromAvroHudiTypeToDorisType(Schema avroSchema) { Schema.Type columnType = avroSchema.getType(); LogicalType logicalType = avroSchema.getLogicalType(); + switch (columnType) { case BOOLEAN: return Type.BOOLEAN; case INT: - if (logicalType instanceof LogicalTypes.Date) { - return ScalarType.createDateV2Type(); - } else if (logicalType instanceof LogicalTypes.TimeMillis) { - return ScalarType.createTimeV2Type(3); - } else { - return Type.INT; - } + return handleIntType(logicalType); case LONG: - if (logicalType instanceof LogicalTypes.TimeMicros) { - return ScalarType.createTimeV2Type(6); - } else if (logicalType instanceof LogicalTypes.TimestampMillis) { - return ScalarType.createDatetimeV2Type(3); - } else if (logicalType instanceof LogicalTypes.TimestampMicros) { - return ScalarType.createDatetimeV2Type(6); - } else { - return Type.BIGINT; - } + return handleLongType(logicalType); case FLOAT: return Type.FLOAT; case DOUBLE: @@ -147,64 +160,75 @@ public static Type fromAvroHudiTypeToDorisType(Schema avroSchema) { return Type.STRING; case FIXED: case BYTES: - if (logicalType instanceof LogicalTypes.Decimal) { - int precision = ((LogicalTypes.Decimal) logicalType).getPrecision(); - int scale = ((LogicalTypes.Decimal) logicalType).getScale(); - return ScalarType.createDecimalV3Type(precision, scale); - } else { - return Type.STRING; - } + return handleFixedOrBytesType(logicalType); case ARRAY: - Type innerType = fromAvroHudiTypeToDorisType(avroSchema.getElementType()); - return ArrayType.create(innerType, true); + return handleArrayType(avroSchema); case RECORD: - ArrayList fields = new ArrayList<>(); - avroSchema.getFields().forEach( - f -> fields.add(new StructField(f.name(), fromAvroHudiTypeToDorisType(f.schema())))); - return new StructType(fields); + return handleRecordType(avroSchema); case MAP: - // Hudi map's key must be string - return new MapType(Type.STRING, fromAvroHudiTypeToDorisType(avroSchema.getValueType())); + return handleMapType(avroSchema); case UNION: - List nonNullMembers = avroSchema.getTypes().stream() - .filter(schema -> !Schema.Type.NULL.equals(schema.getType())) - .collect(Collectors.toList()); - // The nullable column in hudi is the union type with schemas [null, real column type] - if (nonNullMembers.size() == 1) { - return fromAvroHudiTypeToDorisType(nonNullMembers.get(0)); - } - break; + return handleUnionType(avroSchema); default: - break; + return Type.UNSUPPORTED; } - return Type.UNSUPPORTED; } - /** - * Convert different query instant time format to the commit time format. - * Currently we support three kinds of instant time format for time travel query: - * 1、yyyy-MM-dd HH:mm:ss - * 2、yyyy-MM-dd - * This will convert to 'yyyyMMdd000000'. - * 3、yyyyMMddHHmmss - */ - public static String formatQueryInstant(String queryInstant) throws ParseException { - int instantLength = queryInstant.length(); - if (instantLength == 19 || instantLength == 23) { // for yyyy-MM-dd HH:mm:ss[.SSS] - if (instantLength == 19) { - queryInstant += ".000"; - } - return HoodieInstantTimeGenerator.getInstantForDateString(queryInstant); - } else if (instantLength == HoodieInstantTimeGenerator.SECS_INSTANT_ID_LENGTH - || instantLength == HoodieInstantTimeGenerator.MILLIS_INSTANT_ID_LENGTH) { // for yyyyMMddHHmmss[SSS] - HoodieActiveTimeline.parseDateFromInstantTime(queryInstant); // validate the format - return queryInstant; - } else if (instantLength == 10) { // for yyyy-MM-dd - return HoodieActiveTimeline.formatDate(defaultDateFormat.parse(queryInstant)); - } else { - throw new IllegalArgumentException("Unsupported query instant time format: " + queryInstant - + ", Supported time format are: 'yyyy-MM-dd HH:mm:ss[.SSS]' " - + "or 'yyyy-MM-dd' or 'yyyyMMddHHmmss[SSS]'"); + private static Type handleIntType(LogicalType logicalType) { + if (logicalType instanceof LogicalTypes.Date) { + return ScalarType.createDateV2Type(); + } + if (logicalType instanceof LogicalTypes.TimeMillis) { + return ScalarType.createTimeV2Type(3); + } + return Type.INT; + } + + private static Type handleLongType(LogicalType logicalType) { + if (logicalType instanceof LogicalTypes.TimeMicros) { + return ScalarType.createTimeV2Type(6); + } + if (logicalType instanceof LogicalTypes.TimestampMillis) { + return ScalarType.createDatetimeV2Type(3); } + if (logicalType instanceof LogicalTypes.TimestampMicros) { + return ScalarType.createDatetimeV2Type(6); + } + return Type.BIGINT; + } + + private static Type handleFixedOrBytesType(LogicalType logicalType) { + if (logicalType instanceof LogicalTypes.Decimal) { + int precision = ((LogicalTypes.Decimal) logicalType).getPrecision(); + int scale = ((LogicalTypes.Decimal) logicalType).getScale(); + return ScalarType.createDecimalV3Type(precision, scale); + } + return Type.STRING; + } + + private static Type handleArrayType(Schema avroSchema) { + Type innerType = fromAvroHudiTypeToDorisType(avroSchema.getElementType()); + return ArrayType.create(innerType, true); + } + + private static Type handleRecordType(Schema avroSchema) { + ArrayList fields = new ArrayList<>(); + avroSchema.getFields().forEach( + f -> fields.add(new StructField(f.name(), fromAvroHudiTypeToDorisType(f.schema())))); + return new StructType(fields); + } + + private static Type handleMapType(Schema avroSchema) { + return new MapType(Type.STRING, fromAvroHudiTypeToDorisType(avroSchema.getValueType())); + } + + private static Type handleUnionType(Schema avroSchema) { + List nonNullMembers = avroSchema.getTypes().stream() + .filter(schema -> !Schema.Type.NULL.equals(schema.getType())) + .collect(Collectors.toList()); + if (nonNullMembers.size() == 1) { + return fromAvroHudiTypeToDorisType(nonNullMembers.get(0)); + } + return Type.UNSUPPORTED; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/source/HudiScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/source/HudiScanNode.java index abd5a377f5a9cf..a8f2a362bfde8d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/source/HudiScanNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/hudi/source/HudiScanNode.java @@ -176,7 +176,7 @@ protected void doInitialize() throws UserException { } for (Schema.Field hudiField : hudiSchema.getFields()) { columnNames.add(hudiField.name().toLowerCase(Locale.ROOT)); - String columnType = HudiUtils.fromAvroHudiTypeToHiveTypeString(hudiField.schema()); + String columnType = HudiUtils.convertAvroToHiveType(hudiField.schema()); columnTypes.add(columnType); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java index 893ee7bc93b310..7ae600756f17a5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java @@ -604,11 +604,14 @@ public static long getIcebergRowCount(ExternalCatalog catalog, String dbName, St .getIcebergTable(catalog, dbName, tbName); Snapshot snapshot = icebergTable.currentSnapshot(); if (snapshot == null) { + LOG.info("Iceberg table {}.{}.{} is empty, return row count 0.", catalog.getName(), dbName, tbName); // empty table return 0; } Map summary = snapshot.summary(); - return Long.parseLong(summary.get(TOTAL_RECORDS)) - Long.parseLong(summary.get(TOTAL_POSITION_DELETES)); + long rows = Long.parseLong(summary.get(TOTAL_RECORDS)) - Long.parseLong(summary.get(TOTAL_POSITION_DELETES)); + LOG.info("Iceberg table {}.{}.{} row count in summary is {}", catalog.getName(), dbName, tbName, rows); + return rows; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/dlf/client/DLFCachedClientPool.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/dlf/client/DLFCachedClientPool.java index f8e70ebd3f52b9..23b814c13b8085 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/dlf/client/DLFCachedClientPool.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/dlf/client/DLFCachedClientPool.java @@ -41,22 +41,32 @@ public class DLFCachedClientPool implements ClientPool properties) { this.conf = conf; this.endpoint = conf.get("", ""); - this.clientPoolSize = - PropertyUtil.propertyAsInt( + this.clientPoolSize = getClientPoolSize(properties); + this.evictionInterval = getEvictionInterval(properties); + initializeClientPoolCache(); + } + + private int getClientPoolSize(Map properties) { + return PropertyUtil.propertyAsInt( properties, CatalogProperties.CLIENT_POOL_SIZE, - CatalogProperties.CLIENT_POOL_SIZE_DEFAULT); - this.evictionInterval = - PropertyUtil.propertyAsLong( + CatalogProperties.CLIENT_POOL_SIZE_DEFAULT + ); + } + + private long getEvictionInterval(Map properties) { + return PropertyUtil.propertyAsLong( properties, CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS, - CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS_DEFAULT); + CatalogProperties.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS_DEFAULT + ); + } + private void initializeClientPoolCache() { if (clientPoolCache == null) { synchronized (clientPoolCacheLock) { if (clientPoolCache == null) { - clientPoolCache = - Caffeine.newBuilder() + clientPoolCache = Caffeine.newBuilder() .expireAfterAccess(evictionInterval, TimeUnit.MILLISECONDS) .removalListener((key, value, cause) -> ((DLFClientPool) value).close()) .build(); @@ -80,3 +90,4 @@ public R run(Action action, boolean retry) return clientPool().run(action, retry); } } + diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalog.java index e7e7634cff0207..fb26265d19fe93 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/JdbcExternalCatalog.java @@ -93,8 +93,6 @@ public void checkProperties() throws DdlException { JdbcResource.checkBooleanProperty(JdbcResource.CONNECTION_POOL_KEEP_ALIVE, String.valueOf(isConnectionPoolKeepAlive())); JdbcResource.checkBooleanProperty(JdbcResource.TEST_CONNECTION, String.valueOf(isTestConnection())); - JdbcResource.checkBooleanProperty(JdbcResource.ENABLE_CONNECTION_POOL, - String.valueOf(isEnableConnectionPool())); JdbcResource.checkDatabaseListProperties(getOnlySpecifiedDatabase(), getIncludeDatabaseMap(), getExcludeDatabaseMap()); JdbcResource.checkConnectionPoolProperties(getConnectionPoolMinSize(), getConnectionPoolMaxSize(), @@ -114,27 +112,6 @@ public void setDefaultPropsIfMissing(boolean isReplay) { throw new IllegalArgumentException("Jdbc catalog property lower_case_table_names is not supported," + " please use lower_case_meta_names instead."); } - if (catalogProperty.getOrDefault(JdbcResource.ENABLE_CONNECTION_POOL, "").isEmpty()) { - // If not setting enable_connection_pool in replay logic, - // set default value true to be compatible with older version. - catalogProperty.addProperty(JdbcResource.ENABLE_CONNECTION_POOL, - isReplay ? "true" : String.valueOf(JdbcResource - .getDefaultPropertyValue(JdbcResource.ENABLE_CONNECTION_POOL))); - } - } - - @Override - public void tryModifyCatalogProps(Map props) { - // It is forbidden to modify the enable_connection_pool attribute and driver_url attribute of jdbc catalog - if (props.containsKey(JdbcResource.ENABLE_CONNECTION_POOL)) { - throw new IllegalArgumentException("Can not modify enable_connection_pool property of jdbc catalog," - + "please re-create the catalog"); - } - if (props.containsKey(JdbcResource.DRIVER_URL)) { - throw new IllegalArgumentException("Can not modify driver_url property of jdbc catalog" - + "please re-create the catalog"); - } - super.tryModifyCatalogProps(props); } @Override @@ -245,11 +222,6 @@ public boolean isTestConnection() { .getDefaultPropertyValue(JdbcResource.TEST_CONNECTION))); } - public boolean isEnableConnectionPool() { - return Boolean.parseBoolean(catalogProperty.getOrDefault(JdbcResource.ENABLE_CONNECTION_POOL, JdbcResource - .getDefaultPropertyValue(JdbcResource.ENABLE_CONNECTION_POOL))); - } - @Override protected void initLocalObjectsImpl() { JdbcClientConfig jdbcClientConfig = new JdbcClientConfig() @@ -268,8 +240,7 @@ protected void initLocalObjectsImpl() { .setConnectionPoolMaxSize(getConnectionPoolMaxSize()) .setConnectionPoolMaxLifeTime(getConnectionPoolMaxLifeTime()) .setConnectionPoolMaxWaitTime(getConnectionPoolMaxWaitTime()) - .setConnectionPoolKeepAlive(isConnectionPoolKeepAlive()) - .setEnableConnectionPool(isEnableConnectionPool()); + .setConnectionPoolKeepAlive(isConnectionPoolKeepAlive()); jdbcClient = JdbcClient.createJdbcClient(jdbcClientConfig); } @@ -349,7 +320,6 @@ public void configureJdbcTable(JdbcTable jdbcTable, String tableName) { jdbcTable.setConnectionPoolMaxLifeTime(this.getConnectionPoolMaxLifeTime()); jdbcTable.setConnectionPoolMaxWaitTime(this.getConnectionPoolMaxWaitTime()); jdbcTable.setConnectionPoolKeepAlive(this.isConnectionPoolKeepAlive()); - jdbcTable.setEnableConnectionPool(this.isEnableConnectionPool()); } private void testJdbcConnection() throws DdlException { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java index e863a42c122a9b..8c4ada01774559 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClient.java @@ -39,7 +39,6 @@ import java.net.URLClassLoader; import java.sql.Connection; import java.sql.DatabaseMetaData; -import java.sql.Driver; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Properties; import java.util.Set; import java.util.function.Consumer; @@ -62,11 +60,7 @@ public abstract class JdbcClient { private String catalogName; protected String dbType; protected String jdbcUser; - protected String jdbcUrl; - protected String jdbcPassword; - protected String jdbcDriverClass; protected ClassLoader classLoader = null; - protected boolean enableConnectionPool; protected HikariDataSource dataSource = null; protected boolean isOnlySpecifiedDatabase; protected boolean isLowerCaseMetaNames; @@ -109,9 +103,6 @@ protected JdbcClient(JdbcClientConfig jdbcClientConfig) { System.setProperty("com.zaxxer.hikari.useWeakReferences", "true"); this.catalogName = jdbcClientConfig.getCatalog(); this.jdbcUser = jdbcClientConfig.getUser(); - this.jdbcPassword = jdbcClientConfig.getPassword(); - this.jdbcUrl = jdbcClientConfig.getJdbcUrl(); - this.jdbcDriverClass = jdbcClientConfig.getDriverClass(); this.isOnlySpecifiedDatabase = Boolean.parseBoolean(jdbcClientConfig.getOnlySpecifiedDatabase()); this.isLowerCaseMetaNames = Boolean.parseBoolean(jdbcClientConfig.getIsLowerCaseMetaNames()); this.metaNamesMapping = jdbcClientConfig.getMetaNamesMapping(); @@ -119,12 +110,10 @@ protected JdbcClient(JdbcClientConfig jdbcClientConfig) { Optional.ofNullable(jdbcClientConfig.getIncludeDatabaseMap()).orElse(Collections.emptyMap()); this.excludeDatabaseMap = Optional.ofNullable(jdbcClientConfig.getExcludeDatabaseMap()).orElse(Collections.emptyMap()); - this.enableConnectionPool = jdbcClientConfig.isEnableConnectionPool(); + String jdbcUrl = jdbcClientConfig.getJdbcUrl(); this.dbType = parseDbType(jdbcUrl); initializeClassLoader(jdbcClientConfig); - if (enableConnectionPool) { - initializeDataSource(jdbcClientConfig); - } + initializeDataSource(jdbcClientConfig); this.jdbcLowerCaseMetaMatching = new JdbcIdentifierMapping(isLowerCaseMetaNames, metaNamesMapping, this); } @@ -179,57 +168,15 @@ public static String parseDbType(String jdbcUrl) { } public void closeClient() { - if (enableConnectionPool && dataSource != null) { - dataSource.close(); - } + dataSource.close(); } public Connection getConnection() throws JdbcClientException { - if (enableConnectionPool) { - return getConnectionWithPool(); - } else { - return getConnectionWithoutPool(); - } - } - - private Connection getConnectionWithoutPool() throws JdbcClientException { - ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader(); - try { - Thread.currentThread().setContextClassLoader(this.classLoader); - - Class driverClass = Class.forName(jdbcDriverClass, true, this.classLoader); - Driver driverInstance = (Driver) driverClass.getDeclaredConstructor().newInstance(); - - Properties info = new Properties(); - info.put("user", jdbcUser); - info.put("password", jdbcPassword); - - Connection connection = driverInstance.connect(SecurityChecker.getInstance().getSafeJdbcUrl(jdbcUrl), info); - - if (connection == null) { - throw new SQLException("Failed to establish a connection. The JDBC driver returned null. " - + "Please check if the JDBC URL is correct: " - + jdbcUrl - + ". Ensure that the URL format and parameters are valid for the driver: " - + driverInstance.getClass().getName()); - } - - return connection; - } catch (Exception e) { - String errorMessage = String.format("Can not connect to jdbc due to error: %s, Catalog name: %s", - e.getMessage(), this.getCatalogName()); - throw new JdbcClientException(errorMessage, e); - } finally { - Thread.currentThread().setContextClassLoader(oldClassLoader); - } - } - - - private Connection getConnectionWithPool() throws JdbcClientException { ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader(); + Connection conn; try { Thread.currentThread().setContextClassLoader(this.classLoader); - return dataSource.getConnection(); + conn = dataSource.getConnection(); } catch (Exception e) { String errorMessage = String.format( "Catalog `%s` can not connect to jdbc due to error: %s", @@ -238,6 +185,7 @@ private Connection getConnectionWithPool() throws JdbcClientException { } finally { Thread.currentThread().setContextClassLoader(oldClassLoader); } + return conn; } public void close(AutoCloseable... closeables) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClientConfig.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClientConfig.java index f3ab9953e050af..85f3bd8f256d8b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClientConfig.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/jdbc/client/JdbcClientConfig.java @@ -39,7 +39,6 @@ public class JdbcClientConfig implements Cloneable { private int connectionPoolMaxWaitTime; private int connectionPoolMaxLifeTime; private boolean connectionPoolKeepAlive; - private boolean enableConnectionPool; private Map includeDatabaseMap; private Map excludeDatabaseMap; @@ -59,8 +58,6 @@ public JdbcClientConfig() { JdbcResource.getDefaultPropertyValue(JdbcResource.CONNECTION_POOL_MAX_LIFE_TIME)); this.connectionPoolKeepAlive = Boolean.parseBoolean( JdbcResource.getDefaultPropertyValue(JdbcResource.CONNECTION_POOL_KEEP_ALIVE)); - this.enableConnectionPool = Boolean.parseBoolean( - JdbcResource.getDefaultPropertyValue(JdbcResource.ENABLE_CONNECTION_POOL)); this.includeDatabaseMap = Maps.newHashMap(); this.excludeDatabaseMap = Maps.newHashMap(); this.customizedProperties = Maps.newHashMap(); @@ -76,7 +73,6 @@ public JdbcClientConfig clone() { cloned.connectionPoolMaxLifeTime = connectionPoolMaxLifeTime; cloned.connectionPoolMaxWaitTime = connectionPoolMaxWaitTime; cloned.connectionPoolKeepAlive = connectionPoolKeepAlive; - cloned.enableConnectionPool = enableConnectionPool; cloned.includeDatabaseMap = Maps.newHashMap(includeDatabaseMap); cloned.excludeDatabaseMap = Maps.newHashMap(excludeDatabaseMap); cloned.customizedProperties = Maps.newHashMap(customizedProperties); @@ -212,15 +208,6 @@ public JdbcClientConfig setConnectionPoolKeepAlive(boolean connectionPoolKeepAli return this; } - public boolean isEnableConnectionPool() { - return enableConnectionPool; - } - - public JdbcClientConfig setEnableConnectionPool(boolean enableConnectionPool) { - this.enableConnectionPool = enableConnectionPool; - return this; - } - public Map getIncludeDatabaseMap() { return includeDatabaseMap; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java index 95934d31dee5c9..5d5b083664ead9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/rest/manager/ClusterAction.java @@ -28,6 +28,9 @@ import org.apache.doris.system.Frontend; import com.google.common.collect.Maps; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; @@ -44,6 +47,7 @@ @RestController @RequestMapping(path = {"/rest/v2/manager/cluster", "/rest/v2/manager/compute_group"}) public class ClusterAction extends RestBaseController { + private static final Logger LOG = LogManager.getLogger(ClusterAction.class); // Returns mysql and http connection information for the cluster. // { @@ -83,32 +87,58 @@ public static class BeClusterInfo { public volatile int brpcPort; public volatile long currentFragmentNum = 0; public volatile long lastFragmentUpdateTime = 0; + + @Override + public String toString() { + return "BeClusterInfo{" + + "host='" + host + '\'' + + ", heartbeatPort=" + heartbeatPort + + ", bePort=" + bePort + + ", httpPort=" + httpPort + + ", brpcPort=" + brpcPort + + ", currentFragmentNum=" + currentFragmentNum + + ", lastFragmentUpdateTime=" + lastFragmentUpdateTime + + '}'; + } } @RequestMapping(path = {"/cluster_info/cloud_cluster_status", "/compute_group_info/compute_group_status"}, method = RequestMethod.GET) public Object cloudClusterInfo(HttpServletRequest request, HttpServletResponse response) { - executeCheckPassword(request, response); - checkGlobalAuth(ConnectContext.get().getCurrentUserIdentity(), PrivPredicate.ADMIN); + ResponseEntity ret = null; + try { + if (!Env.getCurrentEnv().isMaster()) { + ret = ResponseEntityBuilder.badRequest("this api just use in cloud master fe"); + } else { + executeCheckPassword(request, response); + checkGlobalAuth(ConnectContext.get().getCurrentUserIdentity(), PrivPredicate.ADMIN); - // Key: cluster_name Value: be status - Map> result = Maps.newHashMap(); + // Key: cluster_name Value: be status + Map> result = Maps.newHashMap(); - ((CloudSystemInfoService) Env.getCurrentSystemInfo()).getCloudClusterIdToBackend() - .forEach((clusterId, backends) -> { - List bis = backends.stream().map(backend -> { - BeClusterInfo bi = new BeClusterInfo(); - bi.host = backend.getHost(); - bi.heartbeatPort = backend.getHeartbeatPort(); - bi.bePort = backend.getBePort(); - bi.httpPort = backend.getHttpPort(); - bi.brpcPort = backend.getBrpcPort(); - bi.currentFragmentNum = backend.getBackendStatus().currentFragmentNum; - bi.lastFragmentUpdateTime = backend.getBackendStatus().lastFragmentUpdateTime; - return bi; }).collect(Collectors.toList()); - result.put(clusterId, bis); - }); + ((CloudSystemInfoService) Env.getCurrentSystemInfo()).getCloudClusterIdToBackend() + .forEach((clusterId, backends) -> { + List bis = backends.stream().map(backend -> { + BeClusterInfo bi = new BeClusterInfo(); + bi.host = backend.getHost(); + bi.heartbeatPort = backend.getHeartbeatPort(); + bi.bePort = backend.getBePort(); + bi.httpPort = backend.getHttpPort(); + bi.brpcPort = backend.getBrpcPort(); + bi.currentFragmentNum = backend.getBackendStatus().currentFragmentNum; + bi.lastFragmentUpdateTime = backend.getBackendStatus().lastFragmentUpdateTime; + return bi; + }).collect(Collectors.toList()); + result.put(clusterId, bis); + }); - return ResponseEntityBuilder.ok(result); + ret = ResponseEntityBuilder.ok(result); + } + } finally { + if (LOG.isDebugEnabled()) { + LOG.debug("request {}, response {}", request.getRequestURI(), ret); + } + } + return ret; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java index 33418531f2cda8..5fe9c482633590 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/ExportJob.java @@ -98,7 +98,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; @Data @@ -207,9 +206,7 @@ public class ExportJob implements Writable { // backend_address => snapshot path private List> snapshotPaths = Lists.newArrayList(); - private List jobExecutorList; - - private ConcurrentHashMap taskIdToExecutor = new ConcurrentHashMap<>(); + private List jobExecutorList = Lists.newArrayList(); private Integer finishedTaskCount = 0; private List> allOutfileInfo = Lists.newArrayList(); @@ -690,11 +687,11 @@ private void cancelExportTask(ExportFailMsg.CancelType type, String msg) throws } // we need cancel all task - taskIdToExecutor.keySet().forEach(id -> { + jobExecutorList.forEach(executor -> { try { - Env.getCurrentEnv().getTransientTaskManager().cancelMemoryTask(id); + Env.getCurrentEnv().getTransientTaskManager().cancelMemoryTask(executor.getId()); } catch (JobException e) { - LOG.warn("cancel export task {} exception: {}", id, e); + LOG.warn("cancel export task {} exception: {}", executor.getId(), e); } }); @@ -705,6 +702,7 @@ private void cancelExportJobUnprotected(ExportFailMsg.CancelType type, String ms setExportJobState(ExportJobState.CANCELLED); finishTimeMs = System.currentTimeMillis(); failMsg = new ExportFailMsg(type, msg); + jobExecutorList.clear(); if (FeConstants.runningUnitTest) { return; } @@ -749,6 +747,8 @@ private void finishExportJobUnprotected() { setExportJobState(ExportJobState.FINISHED); finishTimeMs = System.currentTimeMillis(); outfileInfo = GsonUtils.GSON.toJson(allOutfileInfo); + // Clear the jobExecutorList to release memory. + jobExecutorList.clear(); Env.getCurrentEnv().getEditLog().logExportUpdateState(id, ExportJobState.FINISHED); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/ExportMgr.java b/fe/fe-core/src/main/java/org/apache/doris/load/ExportMgr.java index 7dbe953cf9bdbc..5636f1aaad3e71 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/ExportMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/ExportMgr.java @@ -118,8 +118,7 @@ public void addExportJobAndRegisterTask(ExportJob job) throws Exception { job.getBrokerDesc()); } job.getTaskExecutors().forEach(executor -> { - Long taskId = Env.getCurrentEnv().getTransientTaskManager().addMemoryTask(executor); - job.getTaskIdToExecutor().put(taskId, executor); + Env.getCurrentEnv().getTransientTaskManager().addMemoryTask(executor); }); Env.getCurrentEnv().getEditLog().logExportCreate(job); } finally { diff --git a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java index ecf8d3b7a6f779..1ee4fbee123459 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java @@ -113,7 +113,7 @@ public abstract class RoutineLoadJob public static final long DEFAULT_MAX_ERROR_NUM = 0; public static final double DEFAULT_MAX_FILTER_RATIO = 1.0; - public static final long DEFAULT_MAX_INTERVAL_SECOND = 10; + public static final long DEFAULT_MAX_INTERVAL_SECOND = 60; public static final long DEFAULT_MAX_BATCH_ROWS = 20000000; public static final long DEFAULT_MAX_BATCH_SIZE = 1024 * 1024 * 1024; // 1GB public static final long DEFAULT_EXEC_MEM_LIMIT = 2 * 1024 * 1024 * 1024L; diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java index 6642b28424362b..12db90761cbd03 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java @@ -943,6 +943,11 @@ public void setPassword(SetPassVar stmt) throws DdlException { false /* set by resolver */, false); } + public void setPassword(UserIdentity userIdentity, byte[] password) throws DdlException { + setPasswordInternal(userIdentity, password, null, true /* err on non exist */, + false /* set by resolver */, false); + } + public void replaySetPassword(PrivInfo info) { try { setPasswordInternal(info.getUserIdent(), info.getPasswd(), null, true /* err on non exist */, @@ -986,6 +991,12 @@ public void setLdapPassword(SetLdapPassVar stmt) { LOG.info("finished to set ldap password."); } + public void setLdapPassword(String ldapPassword) { + ldapInfo = new LdapInfo(ldapPassword); + Env.getCurrentEnv().getEditLog().logSetLdapPassword(ldapInfo); + LOG.info("finished to set ldap password."); + } + public void replaySetLdapPassword(LdapInfo info) { ldapInfo = info; if (LOG.isDebugEnabled()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java index 176e0f25801043..e334937f2db5b5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java @@ -17,8 +17,11 @@ package org.apache.doris.mysql.privilege; +import org.apache.doris.analysis.ResourceTypeEnum; import org.apache.doris.analysis.SetUserPropertyVar; +import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.Env; +import org.apache.doris.cloud.qe.ComputeGroupException; import org.apache.doris.cluster.ClusterNamespace; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; @@ -257,23 +260,9 @@ public void update(List> properties, boolean isReplay) thro newDefaultLoadCluster = value; } else if (keyArr[0].equalsIgnoreCase(DEFAULT_CLOUD_CLUSTER)) { - // set property "DEFAULT_CLOUD_CLUSTER" = "cluster1" - if (keyArr.length != 1) { - throw new DdlException(DEFAULT_CLOUD_CLUSTER + " format error"); - } - if (value == null) { - value = ""; - } - newDefaultCloudCluster = value; + newDefaultCloudCluster = checkCloudDefaultCluster(keyArr, value, DEFAULT_CLOUD_CLUSTER); } else if (keyArr[0].equalsIgnoreCase(DEFAULT_COMPUTE_GROUP)) { - // set property "DEFAULT_CLOUD_CLUSTER" = "cluster1" - if (keyArr.length != 1) { - throw new DdlException(DEFAULT_COMPUTE_GROUP + " format error"); - } - if (value == null) { - value = ""; - } - newDefaultCloudCluster = value; + newDefaultCloudCluster = checkCloudDefaultCluster(keyArr, value, DEFAULT_COMPUTE_GROUP); } else if (keyArr[0].equalsIgnoreCase(PROP_MAX_QUERY_INSTANCES)) { // set property "max_query_instances" = "1000" if (keyArr.length != 1) { @@ -401,6 +390,26 @@ public void update(List> properties, boolean isReplay) thro defaultCloudCluster = newDefaultCloudCluster; } + private String checkCloudDefaultCluster(String[] keyArr, String value, String defaultComputeGroup) + throws ComputeGroupException, DdlException { + // check cluster auth + if (!Strings.isNullOrEmpty(value) && !Env.getCurrentEnv().getAuth().checkCloudPriv( + new UserIdentity(qualifiedUser, "%"), value, PrivPredicate.USAGE, ResourceTypeEnum.CLUSTER)) { + throw new ComputeGroupException(String.format("set default compute group failed, " + + "user %s has no permission to use compute group '%s', please grant use privilege first ", + qualifiedUser, value), + ComputeGroupException.FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP); + } + // set property "DEFAULT_CLOUD_CLUSTER" = "cluster1" + if (keyArr.length != 1) { + throw new DdlException(defaultComputeGroup + " format error"); + } + if (value == null) { + value = ""; + } + return value; + } + private long getLongProperty(String key, String value, String[] keyArr, String propName) throws DdlException { // eg: set property "load_mem_limit" = "2147483648"; if (keyArr.length != 1) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java index dd24022d9fdc0d..3078fb36df681a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java @@ -325,9 +325,6 @@ public PlanFragment visitPhysicalDistribute(PhysicalDistribute d .collect(Collectors.toList()); keys.addAll(validOutputIds); validOutputIds = keys; - } else if (child instanceof PhysicalLimit && ((PhysicalLimit) child).getPhase().isGlobal()) { - // because sort already contains Offset, we don't need to handle PhysicalTopN - exchangeNode.setOffset(((PhysicalLimit) child).getOffset()); } if (inputFragment instanceof MultiCastPlanFragment) { // TODO: remove this logic when we split to multi-window in logical window to physical window conversion diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java index 6f6c022117c337..894d4264201533 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Analyzer.java @@ -35,6 +35,7 @@ import org.apache.doris.nereids.rules.analysis.EliminateGroupByConstant; import org.apache.doris.nereids.rules.analysis.EliminateLogicalSelectHint; import org.apache.doris.nereids.rules.analysis.FillUpMissingSlots; +import org.apache.doris.nereids.rules.analysis.FillUpQualifyMissingSlot; import org.apache.doris.nereids.rules.analysis.HavingToFilter; import org.apache.doris.nereids.rules.analysis.LeadingJoin; import org.apache.doris.nereids.rules.analysis.NormalizeAggregate; @@ -43,6 +44,7 @@ import org.apache.doris.nereids.rules.analysis.OneRowRelationExtractAggregate; import org.apache.doris.nereids.rules.analysis.ProjectToGlobalAggregate; import org.apache.doris.nereids.rules.analysis.ProjectWithDistinctToAggregate; +import org.apache.doris.nereids.rules.analysis.QualifyToFilter; import org.apache.doris.nereids.rules.analysis.ReplaceExpressionByChildOutput; import org.apache.doris.nereids.rules.analysis.SubqueryToApply; import org.apache.doris.nereids.rules.analysis.VariableToLiteral; @@ -125,6 +127,7 @@ private static List buildAnalyzerJobs(Optional topDown(new BindSink()), bottomUp(new CheckAfterBind()), bottomUp(new AddInitMaterializationHook()), + topDown(new FillUpQualifyMissingSlot()), bottomUp( new ProjectToGlobalAggregate(), // this rule check's the logicalProject node's isDistinct property @@ -165,6 +168,7 @@ private static List buildAnalyzerJobs(Optional topDown(new SimplifyAggGroupBy()), topDown(new NormalizeAggregate()), topDown(new HavingToFilter()), + topDown(new QualifyToFilter()), bottomUp(new SemiJoinCommute()), bottomUp( new CollectSubQueryAlias(), diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java index 75f2ddc7b703fe..f4ca9a972a6814 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/Rewriter.java @@ -355,26 +355,10 @@ public class Rewriter extends AbstractBatchJobExecutor { bottomUp(new EliminateJoinByFK()), topDown(new EliminateJoinByUnique()) ), - - // this rule should be after topic "Column pruning and infer predicate" - topic("Join pull up", - topDown( - new EliminateFilter(), - new PushDownFilterThroughProject(), - new MergeProjects() - ), - topDown( - new PullUpJoinFromUnionAll() - ), - custom(RuleType.COLUMN_PRUNING, ColumnPruning::new), - bottomUp(RuleSet.PUSH_DOWN_FILTERS), - custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new) - ), - - // this rule should be invoked after topic "Join pull up" topic("eliminate Aggregate according to fd items", topDown(new EliminateGroupByKey()), - topDown(new PushDownAggThroughJoinOnPkFk()) + topDown(new PushDownAggThroughJoinOnPkFk()), + topDown(new PullUpJoinFromUnionAll()) ), topic("Limit optimization", diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java index 86f6999475ca44..b5e6d928d6cbaf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/parser/LogicalPlanBuilder.java @@ -20,6 +20,8 @@ import org.apache.doris.analysis.ArithmeticExpr.Operator; import org.apache.doris.analysis.BrokerDesc; import org.apache.doris.analysis.ColumnNullableType; +import org.apache.doris.analysis.PassVar; +import org.apache.doris.analysis.SetType; import org.apache.doris.analysis.StorageBackend; import org.apache.doris.analysis.TableName; import org.apache.doris.analysis.TableScanParams; @@ -152,6 +154,7 @@ import org.apache.doris.nereids.DorisParser.PropertyKeyContext; import org.apache.doris.nereids.DorisParser.PropertyValueContext; import org.apache.doris.nereids.DorisParser.QualifiedNameContext; +import org.apache.doris.nereids.DorisParser.QualifyClauseContext; import org.apache.doris.nereids.DorisParser.QueryContext; import org.apache.doris.nereids.DorisParser.QueryOrganizationContext; import org.apache.doris.nereids.DorisParser.QueryTermContext; @@ -173,7 +176,19 @@ import org.apache.doris.nereids.DorisParser.SelectClauseContext; import org.apache.doris.nereids.DorisParser.SelectColumnClauseContext; import org.apache.doris.nereids.DorisParser.SelectHintContext; +import org.apache.doris.nereids.DorisParser.SetCharsetContext; +import org.apache.doris.nereids.DorisParser.SetCollateContext; +import org.apache.doris.nereids.DorisParser.SetDefaultStorageVaultContext; +import org.apache.doris.nereids.DorisParser.SetLdapAdminPasswordContext; +import org.apache.doris.nereids.DorisParser.SetNamesContext; import org.apache.doris.nereids.DorisParser.SetOperationContext; +import org.apache.doris.nereids.DorisParser.SetOptionsContext; +import org.apache.doris.nereids.DorisParser.SetPasswordContext; +import org.apache.doris.nereids.DorisParser.SetSystemVariableContext; +import org.apache.doris.nereids.DorisParser.SetTransactionContext; +import org.apache.doris.nereids.DorisParser.SetUserPropertiesContext; +import org.apache.doris.nereids.DorisParser.SetUserVariableContext; +import org.apache.doris.nereids.DorisParser.SetVariableWithTypeContext; import org.apache.doris.nereids.DorisParser.ShowConfigContext; import org.apache.doris.nereids.DorisParser.ShowConstraintContext; import org.apache.doris.nereids.DorisParser.ShowCreateMTMVContext; @@ -398,6 +413,10 @@ import org.apache.doris.nereids.trees.plans.commands.RefreshMTMVCommand; import org.apache.doris.nereids.trees.plans.commands.ReplayCommand; import org.apache.doris.nereids.trees.plans.commands.ResumeMTMVCommand; +import org.apache.doris.nereids.trees.plans.commands.SetDefaultStorageVaultCommand; +import org.apache.doris.nereids.trees.plans.commands.SetOptionsCommand; +import org.apache.doris.nereids.trees.plans.commands.SetTransactionCommand; +import org.apache.doris.nereids.trees.plans.commands.SetUserPropertiesCommand; import org.apache.doris.nereids.trees.plans.commands.ShowConfigCommand; import org.apache.doris.nereids.trees.plans.commands.ShowConstraintsCommand; import org.apache.doris.nereids.trees.plans.commands.ShowCreateMTMVCommand; @@ -438,6 +457,14 @@ import org.apache.doris.nereids.trees.plans.commands.info.RefreshMTMVInfo; import org.apache.doris.nereids.trees.plans.commands.info.ResumeMTMVInfo; import org.apache.doris.nereids.trees.plans.commands.info.RollupDefinition; +import org.apache.doris.nereids.trees.plans.commands.info.SetCharsetAndCollateVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetLdapPassVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetNamesVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetPassVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetSessionVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetUserDefinedVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetUserPropertyVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetVarOp; import org.apache.doris.nereids.trees.plans.commands.info.ShowCreateMTMVInfo; import org.apache.doris.nereids.trees.plans.commands.info.SimpleColumnDefinition; import org.apache.doris.nereids.trees.plans.commands.info.StepPartition; @@ -458,6 +485,7 @@ import org.apache.doris.nereids.trees.plans.logical.LogicalLimit; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalQualify; import org.apache.doris.nereids.trees.plans.logical.LogicalRepeat; import org.apache.doris.nereids.trees.plans.logical.LogicalSelectHint; import org.apache.doris.nereids.trees.plans.logical.LogicalSink; @@ -615,6 +643,10 @@ public LogicalPlan visitInsertTable(InsertTableContext ctx) { LogicalPlan plan = visitQuery(ctx.query()); // partitionSpec may be NULL. means auto detect partition. only available when IOT Pair> partitionSpec = visitPartitionSpec(ctx.partitionSpec()); + // partitionSpec.second : + // null - auto detect + // zero - whole table + // others - specific partitions boolean isAutoDetect = partitionSpec.second == null; LogicalSink sink = UnboundTableSinkCreator.createUnboundTableSinkMaybeOverwrite( tableName.build(), @@ -1429,7 +1461,8 @@ public LogicalPlan visitRegularQuerySpecification(RegularQuerySpecificationConte selectCtx, Optional.ofNullable(ctx.whereClause()), Optional.ofNullable(ctx.aggClause()), - Optional.ofNullable(ctx.havingClause())); + Optional.ofNullable(ctx.havingClause()), + Optional.ofNullable(ctx.qualifyClause())); selectPlan = withQueryOrganization(selectPlan, ctx.queryOrganization()); if ((selectHintMap == null) || selectHintMap.isEmpty()) { return selectPlan; @@ -3132,24 +3165,32 @@ protected LogicalPlan withSelectQuerySpecification( SelectClauseContext selectClause, Optional whereClause, Optional aggClause, - Optional havingClause) { + Optional havingClause, + Optional qualifyClause) { return ParserUtils.withOrigin(ctx, () -> { // from -> where -> group by -> having -> select LogicalPlan filter = withFilter(inputRelation, whereClause); SelectColumnClauseContext selectColumnCtx = selectClause.selectColumnClause(); LogicalPlan aggregate = withAggregate(filter, selectColumnCtx, aggClause); boolean isDistinct = (selectClause.DISTINCT() != null); + LogicalPlan selectPlan; if (!(aggregate instanceof Aggregate) && havingClause.isPresent()) { // create a project node for pattern match of ProjectToGlobalAggregate rule // then ProjectToGlobalAggregate rule can insert agg node as LogicalHaving node's child List projects = getNamedExpressions(selectColumnCtx.namedExpressionSeq()); LogicalPlan project = new LogicalProject<>(projects, isDistinct, aggregate); - return new LogicalHaving<>(ExpressionUtils.extractConjunctionToSet( + selectPlan = new LogicalHaving<>(ExpressionUtils.extractConjunctionToSet( getExpression((havingClause.get().booleanExpression()))), project); } else { LogicalPlan having = withHaving(aggregate, havingClause); - return withProjection(having, selectColumnCtx, aggClause, isDistinct); + selectPlan = withProjection(having, selectColumnCtx, aggClause, isDistinct); + } + // support qualify clause + if (qualifyClause.isPresent()) { + Expression qualifyExpr = getExpression(qualifyClause.get().booleanExpression()); + selectPlan = new LogicalQualify<>(Sets.newHashSet(qualifyExpr), selectPlan); } + return selectPlan; }); } @@ -3357,7 +3398,7 @@ public Object visitCommentRelationHint(CommentRelationHintContext ctx) { } protected LogicalPlan withProjection(LogicalPlan input, SelectColumnClauseContext selectCtx, - Optional aggCtx, boolean isDistinct) { + Optional aggCtx, boolean isDistinct) { return ParserUtils.withOrigin(selectCtx, () -> { if (aggCtx.isPresent()) { if (isDistinct) { @@ -3829,4 +3870,111 @@ public LogicalPlan visitShowConfig(ShowConfigContext ctx) { } return command; } + + @Override + public SetOptionsCommand visitSetOptions(SetOptionsContext ctx) { + List setVarOpList = new ArrayList<>(1); + for (Object child : ctx.children) { + if (child instanceof RuleNode) { + setVarOpList.add(typedVisit((RuleNode) child)); + } + } + return new SetOptionsCommand(setVarOpList); + } + + @Override + public SetVarOp visitSetSystemVariable(SetSystemVariableContext ctx) { + SetType type = SetType.DEFAULT; + if (ctx.GLOBAL() != null) { + type = SetType.GLOBAL; + } else if (ctx.LOCAL() != null || ctx.SESSION() != null) { + type = SetType.SESSION; + } + String name = stripQuotes(ctx.identifier().getText()); + Expression expression = ctx.expression() != null ? typedVisit(ctx.expression()) : null; + return new SetSessionVarOp(type, name, expression); + } + + @Override + public SetVarOp visitSetVariableWithType(SetVariableWithTypeContext ctx) { + SetType type = SetType.DEFAULT; + if (ctx.GLOBAL() != null) { + type = SetType.GLOBAL; + } else if (ctx.LOCAL() != null || ctx.SESSION() != null) { + type = SetType.SESSION; + } + String name = stripQuotes(ctx.identifier().getText()); + Expression expression = ctx.expression() != null ? typedVisit(ctx.expression()) : null; + return new SetSessionVarOp(type, name, expression); + } + + @Override + public SetVarOp visitSetPassword(SetPasswordContext ctx) { + String user; + String host; + boolean isDomain; + String passwordText; + UserIdentity userIdentity = null; + if (ctx.userIdentify() != null) { + user = stripQuotes(ctx.userIdentify().user.getText()); + host = ctx.userIdentify().host != null ? stripQuotes(ctx.userIdentify().host.getText()) : "%"; + isDomain = ctx.userIdentify().ATSIGN() != null; + userIdentity = new UserIdentity(user, host, isDomain); + } + passwordText = stripQuotes(ctx.STRING_LITERAL().getText()); + return new SetPassVarOp(userIdentity, new PassVar(passwordText, ctx.isPlain != null)); + } + + @Override + public SetVarOp visitSetNames(SetNamesContext ctx) { + return new SetNamesVarOp(); + } + + @Override + public SetVarOp visitSetCharset(SetCharsetContext ctx) { + String charset = ctx.charsetName != null ? stripQuotes(ctx.charsetName.getText()) : null; + return new SetCharsetAndCollateVarOp(charset); + } + + @Override + public SetVarOp visitSetCollate(SetCollateContext ctx) { + String charset = ctx.charsetName != null ? stripQuotes(ctx.charsetName.getText()) : null; + String collate = ctx.collateName != null ? stripQuotes(ctx.collateName.getText()) : null; + return new SetCharsetAndCollateVarOp(charset, collate); + } + + @Override + public SetVarOp visitSetLdapAdminPassword(SetLdapAdminPasswordContext ctx) { + String passwordText = stripQuotes(ctx.STRING_LITERAL().getText()); + boolean isPlain = ctx.PASSWORD() != null; + return new SetLdapPassVarOp(new PassVar(passwordText, isPlain)); + } + + @Override + public SetVarOp visitSetUserVariable(SetUserVariableContext ctx) { + String name = stripQuotes(ctx.identifier().getText()); + Expression expression = typedVisit(ctx.expression()); + return new SetUserDefinedVarOp(name, expression); + } + + @Override + public SetTransactionCommand visitSetTransaction(SetTransactionContext ctx) { + return new SetTransactionCommand(); + } + + @Override + public SetUserPropertiesCommand visitSetUserProperties(SetUserPropertiesContext ctx) { + String user = ctx.user != null ? stripQuotes(ctx.user.getText()) : null; + Map userPropertiesMap = visitPropertyItemList(ctx.propertyItemList()); + List setUserPropertyVarOpList = new ArrayList<>(userPropertiesMap.size()); + for (Map.Entry entry : userPropertiesMap.entrySet()) { + setUserPropertyVarOpList.add(new SetUserPropertyVarOp(user, entry.getKey(), entry.getValue())); + } + return new SetUserPropertiesCommand(user, setUserPropertyVarOpList); + } + + @Override + public SetDefaultStorageVaultCommand visitSetDefaultStorageVault(SetDefaultStorageVaultContext ctx) { + return new SetDefaultStorageVaultCommand(stripQuotes(ctx.identifier().getText())); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/ProjectAggregateExpressionsForCse.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/ProjectAggregateExpressionsForCse.java index 00220a00ffd784..a8038ab30b04ae 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/ProjectAggregateExpressionsForCse.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/processor/post/ProjectAggregateExpressionsForCse.java @@ -20,6 +20,7 @@ import org.apache.doris.nereids.CascadesContext; import org.apache.doris.nereids.properties.DataTrait; import org.apache.doris.nereids.properties.LogicalProperties; +import org.apache.doris.nereids.properties.PhysicalProperties; import org.apache.doris.nereids.trees.expressions.Alias; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; @@ -29,6 +30,7 @@ import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.algebra.Aggregate; +import org.apache.doris.nereids.trees.plans.physical.AbstractPhysicalPlan; import org.apache.doris.nereids.trees.plans.physical.PhysicalDistribute; import org.apache.doris.nereids.trees.plans.physical.PhysicalHashAggregate; import org.apache.doris.nereids.trees.plans.physical.PhysicalProject; @@ -42,6 +44,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; /** @@ -118,8 +121,14 @@ public Plan visitPhysicalHashAggregate(PhysicalHashAggregate agg () -> projectOutput, () -> DataTrait.EMPTY_TRAIT ); - PhysicalProject project = new PhysicalProject<>(projections, + AbstractPhysicalPlan child = ((AbstractPhysicalPlan) aggregate.child()); + PhysicalProperties projectPhysicalProperties = new PhysicalProperties( + child.getPhysicalProperties().getDistributionSpec(), + child.getPhysicalProperties().getOrderSpec()); + PhysicalProject project = new PhysicalProject<>(projections, Optional.empty(), projectLogicalProperties, + projectPhysicalProperties, + child.getStats(), aggregate.child()); aggregate = (PhysicalHashAggregate) aggregate .withAggOutput(aggOutputReplaced) diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java index 49bc07d6a31301..dbf96ef2f1f2fb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/RuleType.java @@ -43,6 +43,9 @@ public enum RuleType { BINDING_USING_JOIN_SLOT(RuleTypeClass.REWRITE), BINDING_JOIN_SLOT(RuleTypeClass.REWRITE), BINDING_FILTER_SLOT(RuleTypeClass.REWRITE), + BINDING_QUALIFY_PROJECT_SLOT(RuleTypeClass.REWRITE), + BINDING_QUALIFY_AGGREGATE_SLOT(RuleTypeClass.REWRITE), + BINDING_QUALIFY_HAVING_SLOT(RuleTypeClass.REWRITE), BINDING_AGGREGATE_SLOT(RuleTypeClass.REWRITE), BINDING_REPEAT_SLOT(RuleTypeClass.REWRITE), BINDING_HAVING_SLOT(RuleTypeClass.REWRITE), @@ -67,11 +70,16 @@ public enum RuleType { FILL_UP_SORT_HAVING_PROJECT(RuleTypeClass.REWRITE), FILL_UP_SORT_HAVING_AGGREGATE(RuleTypeClass.REWRITE), FILL_UP_SORT_PROJECT(RuleTypeClass.REWRITE), + FILL_UP_QUALIFY_PROJECT(RuleTypeClass.REWRITE), + FILL_UP_QUALIFY_AGGREGATE(RuleTypeClass.REWRITE), + FILL_UP_QUALIFY_HAVING_AGGREGATE(RuleTypeClass.REWRITE), + FILL_UP_QUALIFY_HAVING_PROJECT(RuleTypeClass.REWRITE), RESOLVE_PROJECT_ALIAS(RuleTypeClass.REWRITE), RESOLVE_AGGREGATE_ALIAS(RuleTypeClass.REWRITE), PROJECT_TO_GLOBAL_AGGREGATE(RuleTypeClass.REWRITE), HAVING_TO_FILTER(RuleTypeClass.REWRITE), + QUALIFY_TO_FILTER(RuleTypeClass.REWRITE), ONE_ROW_RELATION_EXTRACT_AGGREGATE(RuleTypeClass.REWRITE), PROJECT_WITH_DISTINCT_TO_AGGREGATE(RuleTypeClass.REWRITE), AVG_DISTINCT_TO_SUM_DIV_COUNT(RuleTypeClass.REWRITE), @@ -304,7 +312,7 @@ public enum RuleType { // split limit SPLIT_LIMIT(RuleTypeClass.REWRITE), - PULL_UP_JOIN_FROM_UNIONALL(RuleTypeClass.REWRITE), + PULL_UP_JOIN_FROM_UNION_ALL(RuleTypeClass.REWRITE), // limit push down PUSH_LIMIT_THROUGH_JOIN(RuleTypeClass.REWRITE), PUSH_LIMIT_THROUGH_PROJECT_JOIN(RuleTypeClass.REWRITE), diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AnalyzeCTE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AnalyzeCTE.java index 129b0860a74ee4..36a0459375b840 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AnalyzeCTE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/AnalyzeCTE.java @@ -101,6 +101,8 @@ private Pair>> analyzeCte( CTEId cteId = StatementScopeIdGenerator.newCTEId(); LogicalSubQueryAlias logicalSubQueryAlias = aliasQuery.withChildren(ImmutableList.of(analyzedCtePlan)); + BindExpression.checkSameNameSlot(logicalSubQueryAlias.child(0).getOutput(), + logicalSubQueryAlias.getAlias()); outerCteCtx = new CTEContext(cteId, logicalSubQueryAlias, outerCteCtx); outerCteCtx.setAnalyzedPlan(logicalSubQueryAlias); cteProducerPlans.add(new LogicalCTEProducer<>(cteId, logicalSubQueryAlias)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindExpression.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindExpression.java index bebe2702cc5b00..cde659b91021ad 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindExpression.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindExpression.java @@ -80,6 +80,7 @@ import org.apache.doris.nereids.trees.plans.logical.LogicalOneRowRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalQualify; import org.apache.doris.nereids.trees.plans.logical.LogicalRepeat; import org.apache.doris.nereids.trees.plans.logical.LogicalResultSink; import org.apache.doris.nereids.trees.plans.logical.LogicalSetOperation; @@ -184,6 +185,15 @@ protected boolean condition(Rule rule, Plan plan) { RuleType.BINDING_HAVING_SLOT.build( logicalHaving(any().whenNot(Aggregate.class::isInstance)).thenApply(this::bindHaving) ), + RuleType.BINDING_QUALIFY_PROJECT_SLOT.build( + logicalQualify(logicalProject()).thenApply(this::bindQualifyProject) + ), + RuleType.BINDING_QUALIFY_AGGREGATE_SLOT.build( + logicalQualify(aggregate()).thenApply(this::bindQualifyAggregate) + ), + RuleType.BINDING_QUALIFY_HAVING_SLOT.build( + logicalQualify(logicalHaving()).thenApply(this::bindQualifyHaving) + ), RuleType.BINDING_INLINE_TABLE_SLOT.build( logicalInlineTable().thenApply(this::bindInlineTable) ), @@ -693,6 +703,191 @@ private Plan bindFilter(MatchingContext> ctx) { return new LogicalFilter<>(boundConjuncts.build(), filter.child()); } + /** + * there a dup table sales + * CREATE TABLE sales ( + * year INT, + * country STRING, + * product STRING, + * profit INT + * ) + * DISTRIBUTED BY HASH(`year`) + * PROPERTIES ( + * "replication_num" = "1" + * ); + * 1.qualify -> project + * for example : + * select year + 1 as year from sales qualify row_number() over (order by year, country) = 1; + * We are binding the year field of table sales. Instead of renaming year + * ----------------------------------------------------------------------------------------------------------------- + * 2.qualify -> project(distinct) + * for example: + * select distinct year + 1, country from sales qualify row_number() over (order by year + 1) > 1; + * We are binding the year field of table sales. + * ----------------------------------------------------------------------------------------------------------------- + * 3.qualify -> project(distinct) -> agg + * for example: + * select distinct year + 1 as year from sales group by year qualify row_number() over (order by year) = 1; + * We are binding the year field of group by output. Instead of renaming year + * ----------------------------------------------------------------------------------------------------------------- + * 4.qualify -> project(distinct) -> having -> agg + * for example: + * select distinct year,country from sales group by year,country having year > 2000 + * qualify row_number() over (order by year + 1) > 1; + * We are binding the year field of group output. + *----------------------------------------------------------------------------------------------------------------- + * Note: For the query without agg, we first bind slot from the child of the project. + * If it cannot be bound in the child, then bind slot from the project. + * If query with agg, we bind slot from the group by first. if not then bind slot from the group output + * or not bind slot from the agg child output finally. + */ + private Plan bindQualifyProject(MatchingContext>> ctx) { + LogicalQualify> qualify = ctx.root; + CascadesContext cascadesContext = ctx.cascadesContext; + LogicalProject project = qualify.child(); + ImmutableSet.Builder boundConjuncts = ImmutableSet.builderWithExpectedSize( + qualify.getConjuncts().size()); + if (project.child() instanceof Aggregate) { + Aggregate aggregate = (Aggregate) project.child(); + bindQualifyByAggregate(aggregate, cascadesContext, qualify, boundConjuncts); + } else if (project.child() instanceof LogicalHaving) { + LogicalHaving having = (LogicalHaving) project.child(); + if (having.child() instanceof Aggregate) { + Aggregate aggregate = (Aggregate) having.child(); + bindQualifyByAggregate(aggregate, cascadesContext, qualify, boundConjuncts); + } else { + throw new AnalysisException("unknown query structure"); + } + } else { + bindQualifyByProject(project, cascadesContext, qualify, boundConjuncts); + } + return new LogicalQualify<>(boundConjuncts.build(), qualify.child()); + } + + /** + * 1.qualify -> having -> agg + * for example: + * select country, sum(profit) as total, row_number() over (order by country) as rk from sales where year >= 2000 + * group by country having sum(profit) > 100 qualify rk = 1 + * We are binding the country field from group by. + * ----------------------------------------------------------------------------------------------------------------- + * 2.qualify -> having -> project + * for example: + * select year, country, profit, row_number() over (partition by year, country order by profit desc) as rk from + * (select * from sales) a where year >= 2000 having profit > 200 qualify rk = 1 order by profit,country limit 3 + * We are binding year/country/profit from sales + * ----------------------------------------------------------------------------------------------------------------- + * 3.qualify -> having -> project(distinct) + * for example: + * select distinct year + 1 as year from sales qualify row_number() over (order by year) = 1; + * we are binding year from sales. Instead of renaming year + */ + private Plan bindQualifyHaving(MatchingContext>> ctx) { + LogicalQualify> qualify = ctx.root; + CascadesContext cascadesContext = ctx.cascadesContext; + LogicalHaving having = qualify.child(); + ImmutableSet.Builder boundConjuncts = ImmutableSet.builderWithExpectedSize( + qualify.getConjuncts().size()); + if (having.child() instanceof Aggregate) { + bindQualifyByAggregate((Aggregate) having.child(), cascadesContext, qualify, + boundConjuncts); + } else { + bindQualifyByProject((LogicalProject) having.child(), cascadesContext, qualify, + boundConjuncts); + } + return new LogicalQualify<>(boundConjuncts.build(), qualify.child()); + } + + /** + * qualify -> agg + * for example: + * select country, sum(profit) as total, row_number() over (order by country) as rk from sales qualify rk > 1 + * we are binding the country field from group by. + */ + private Plan bindQualifyAggregate(MatchingContext>> ctx) { + LogicalQualify> qualify = ctx.root; + CascadesContext cascadesContext = ctx.cascadesContext; + Aggregate aggregate = qualify.child(); + ImmutableSet.Builder boundConjuncts = ImmutableSet.builderWithExpectedSize( + qualify.getConjuncts().size()); + bindQualifyByAggregate(aggregate, cascadesContext, qualify, boundConjuncts); + return new LogicalQualify<>(boundConjuncts.build(), qualify.child()); + } + + private void bindQualifyByProject(LogicalProject project, CascadesContext cascadesContext, + LogicalQualify qualify, + ImmutableSet.Builder boundConjuncts) { + Supplier defaultScope = Suppliers.memoize(() -> + toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(project.children())) + ); + Scope backupScope = toScope(cascadesContext, project.getOutput()); + + SimpleExprAnalyzer analyzer = buildCustomSlotBinderAnalyzer( + qualify, cascadesContext, defaultScope.get(), true, true, + (self, unboundSlot) -> { + List slots = self.bindSlotByScope(unboundSlot, defaultScope.get()); + if (!slots.isEmpty()) { + return slots; + } + return self.bindSlotByScope(unboundSlot, backupScope); + }); + + for (Expression conjunct : qualify.getConjuncts()) { + conjunct = analyzer.analyze(conjunct); + conjunct = TypeCoercionUtils.castIfNotSameType(conjunct, BooleanType.INSTANCE); + boundConjuncts.add(conjunct); + } + } + + private void bindQualifyByAggregate(Aggregate aggregate, CascadesContext cascadesContext, + LogicalQualify qualify, + ImmutableSet.Builder boundConjuncts) { + Supplier bindByAggChild = Suppliers.memoize(() -> { + Scope aggChildOutputScope + = toScope(cascadesContext, PlanUtils.fastGetChildrenOutputs(aggregate.children())); + return (analyzer, unboundSlot) -> analyzer.bindSlotByScope(unboundSlot, aggChildOutputScope); + }); + Scope aggOutputScope = toScope(cascadesContext, aggregate.getOutput()); + Supplier bindByGroupByThenAggOutputThenAggChildOutput = Suppliers.memoize(() -> { + List groupByExprs = aggregate.getGroupByExpressions(); + ImmutableList.Builder groupBySlots = ImmutableList.builderWithExpectedSize(groupByExprs.size()); + for (Expression groupBy : groupByExprs) { + if (groupBy instanceof Slot) { + groupBySlots.add((Slot) groupBy); + } + } + Scope groupBySlotsScope = toScope(cascadesContext, groupBySlots.build()); + + return (analyzer, unboundSlot) -> { + List boundInGroupBy = analyzer.bindSlotByScope(unboundSlot, groupBySlotsScope); + if (!boundInGroupBy.isEmpty()) { + return ImmutableList.of(boundInGroupBy.get(0)); + } + List boundInAggOutput = analyzer.bindSlotByScope(unboundSlot, aggOutputScope); + if (!boundInAggOutput.isEmpty()) { + return ImmutableList.of(boundInAggOutput.get(0)); + } + List expressions = bindByAggChild.get().bindSlot(analyzer, unboundSlot); + return expressions.isEmpty() ? expressions : ImmutableList.of(expressions.get(0)); + }; + }); + + ExpressionAnalyzer qualifyAnalyzer = new ExpressionAnalyzer(qualify, aggOutputScope, cascadesContext, + true, true) { + @Override + protected List bindSlotByThisScope(UnboundSlot unboundSlot) { + return bindByGroupByThenAggOutputThenAggChildOutput.get().bindSlot(this, unboundSlot); + } + }; + + ExpressionRewriteContext rewriteContext = new ExpressionRewriteContext(cascadesContext); + for (Expression expression : qualify.getConjuncts()) { + Expression boundConjunct = qualifyAnalyzer.analyze(expression, rewriteContext); + boundConjunct = TypeCoercionUtils.castIfNotSameType(boundConjunct, BooleanType.INSTANCE); + boundConjuncts.add(boundConjunct); + } + } + private List exceptStarSlots(Set boundExcepts, BoundStar boundStar) { List slots = boundStar.getSlots(); if (!boundExcepts.isEmpty()) { @@ -947,7 +1142,11 @@ private LogicalTVFRelation bindTableValuedFunction(MatchingContext childOutputs, String subQueryAlias) { + /** + * Check the slot in childOutputs is duplicated or not + * If childOutputs has duplicated column name, would throw analysis exception + */ + public static void checkSameNameSlot(List childOutputs, String subQueryAlias) { Set nameSlots = new HashSet<>(childOutputs.size() * 2); for (Slot s : childOutputs) { if (!nameSlots.add(s.getInternalName())) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java index c55ed5957ba20c..c392b85317dc28 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlots.java @@ -28,6 +28,7 @@ import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; +import org.apache.doris.nereids.trees.expressions.WindowExpression; import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.algebra.Aggregate; @@ -245,7 +246,7 @@ public List buildRules() { having.withChildren(new LogicalProject<>(projects, project.child()))); } }) - ) + ) ); } @@ -316,6 +317,8 @@ public void resolve(Expression expression) { + expression.toSql() + "."); } generateAliasForNewOutputSlots(expression); + } else if (expression instanceof WindowExpression) { + generateAliasForNewOutputSlots(expression); } else { // Try to resolve the children. for (Expression child : expression.children()) { @@ -387,7 +390,7 @@ interface PlanGenerator { Plan apply(Resolver resolver, Aggregate aggregate); } - private Plan createPlan(Resolver resolver, Aggregate aggregate, PlanGenerator planGenerator) { + protected Plan createPlan(Resolver resolver, Aggregate aggregate, PlanGenerator planGenerator) { Aggregate newAggregate; if (resolver.getNewOutputSlots().isEmpty()) { newAggregate = aggregate; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpQualifyMissingSlot.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpQualifyMissingSlot.java new file mode 100644 index 00000000000000..bb99883254729f --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/FillUpQualifyMissingSlot.java @@ -0,0 +1,272 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.analysis; + +import org.apache.doris.nereids.exceptions.AnalysisException; +import org.apache.doris.nereids.rules.Rule; +import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.expressions.Alias; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.NamedExpression; +import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.expressions.SlotReference; +import org.apache.doris.nereids.trees.expressions.WindowExpression; +import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter; +import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionVisitor; +import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.algebra.Aggregate; +import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; +import org.apache.doris.nereids.trees.plans.logical.LogicalHaving; +import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalQualify; +import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanVisitor; +import org.apache.doris.nereids.util.ExpressionUtils; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +/** + * We don't fill the missing slots in FillUpMissingSlots. + * Because for distinct queries, + * for example: + * select distinct year,country from sales having year > 2000 qualify row_number() over (order by year + 1) > 1; + * It would be converted into the form of agg. + * before logical plan: + * qualify + * | + * project(distinct) + * | + * scan + * apply ProjectWithDistinctToAggregate rule + * after logical plan: + * qualify + * | + * agg + * | + * scan + * if fill the missing slots in FillUpMissingSlots(after ProjectWithDistinctToAggregate). qualify could hardly be + * pushed under the agg of distinct. + * But apply FillUpQualifyMissingSlot rule before ProjectWithDistinctToAggregate + * logical plan: + * project(distinct) + * | + * qualify + * | + * project + * | + * scan + * and then apply ProjectWithDistinctToAggregate rule + * logical plan: + * agg + * | + * qualify + * | + * project + * | + * scan + * So it is easy to handle. + */ +public class FillUpQualifyMissingSlot extends FillUpMissingSlots { + @Override + public List buildRules() { + return ImmutableList.of( + /* + qualify -> project + qualify -> project(distinct) + qualify -> project(distinct) -> agg + qualify -> project(distinct) -> having -> agg + */ + RuleType.FILL_UP_QUALIFY_PROJECT.build( + logicalQualify(logicalProject()) + .then(qualify -> { + checkWindow(qualify); + LogicalProject project = qualify.child(); + return createPlan(project, qualify.getConjuncts(), (newConjuncts, projects) -> { + LogicalProject bottomProject = new LogicalProject<>(projects, project.child()); + LogicalQualify logicalQualify = new LogicalQualify<>(newConjuncts, bottomProject); + ImmutableList copyOutput = ImmutableList.copyOf(project.getOutput()); + return new LogicalProject<>(copyOutput, project.isDistinct(), logicalQualify); + }); + }) + ), + /* + qualify -> agg + */ + RuleType.FILL_UP_QUALIFY_AGGREGATE.build( + logicalQualify(aggregate()).then(qualify -> { + checkWindow(qualify); + Aggregate agg = qualify.child(); + Resolver resolver = new Resolver(agg); + qualify.getConjuncts().forEach(resolver::resolve); + return createPlan(resolver, agg, (r, a) -> { + Set newConjuncts = ExpressionUtils.replace( + qualify.getConjuncts(), r.getSubstitution()); + boolean notChanged = newConjuncts.equals(qualify.getConjuncts()); + if (notChanged && a.equals(agg)) { + return null; + } + return notChanged ? qualify.withChildren(a) : new LogicalQualify<>(newConjuncts, a); + }); + }) + ), + /* + qualify -> having -> agg + */ + RuleType.FILL_UP_QUALIFY_HAVING_AGGREGATE.build( + logicalQualify(logicalHaving(aggregate())).then(qualify -> { + checkWindow(qualify); + LogicalHaving> having = qualify.child(); + Aggregate agg = qualify.child().child(); + Resolver resolver = new Resolver(agg); + qualify.getConjuncts().forEach(resolver::resolve); + return createPlan(resolver, agg, (r, a) -> { + Set newConjuncts = ExpressionUtils.replace( + qualify.getConjuncts(), r.getSubstitution()); + boolean notChanged = newConjuncts.equals(qualify.getConjuncts()); + if (notChanged && a.equals(agg)) { + return null; + } + return notChanged ? qualify.withChildren(having.withChildren(a)) : + new LogicalQualify<>(newConjuncts, having.withChildren(a)); + }); + }) + ), + /* + qualify -> having -> project + qualify -> having -> project(distinct) + */ + RuleType.FILL_UP_QUALIFY_HAVING_PROJECT.build( + logicalQualify(logicalHaving(logicalProject())).then(qualify -> { + checkWindow(qualify); + LogicalHaving> having = qualify.child(); + LogicalProject project = qualify.child().child(); + return createPlan(project, qualify.getConjuncts(), (newConjuncts, projects) -> { + ImmutableList copyOutput = ImmutableList.copyOf(project.getOutput()); + if (project.isDistinct()) { + Set missingSlots = having.getExpressions().stream() + .map(Expression::getInputSlots) + .flatMap(Set::stream) + .filter(s -> !projects.contains(s)) + .collect(Collectors.toSet()); + List output = ImmutableList.builder() + .addAll(projects).addAll(missingSlots).build(); + LogicalQualify> logicalQualify = + new LogicalQualify<>(newConjuncts, new LogicalProject<>(output, project.child())); + return having.withChildren(project.withProjects(copyOutput).withChildren(logicalQualify)); + } else { + return new LogicalProject<>(copyOutput, new LogicalQualify<>(newConjuncts, + having.withChildren(project.withProjects(projects)))); + } + }); + }) + ) + ); + } + + interface PlanGenerator { + Plan apply(Set newConjuncts, List projects); + } + + private Plan createPlan(LogicalProject project, Set conjuncts, PlanGenerator planGenerator) { + Set projectOutputSet = project.getOutputSet(); + List newOutputSlots = Lists.newArrayList(); + Set newConjuncts = new HashSet<>(); + for (Expression conjunct : conjuncts) { + conjunct = conjunct.accept(new DefaultExpressionRewriter>() { + @Override + public Expression visitWindow(WindowExpression window, List context) { + Alias alias = new Alias(window); + context.add(alias); + return alias.toSlot(); + } + }, newOutputSlots); + newConjuncts.add(conjunct); + } + Set notExistedInProject = conjuncts.stream() + .map(Expression::getInputSlots) + .flatMap(Set::stream) + .filter(s -> !projectOutputSet.contains(s)) + .collect(Collectors.toSet()); + + newOutputSlots.addAll(notExistedInProject); + if (newOutputSlots.isEmpty()) { + return null; + } + List projects = ImmutableList.builder() + .addAll(project.getProjects()) + .addAll(newOutputSlots).build(); + + return planGenerator.apply(newConjuncts, projects); + } + + private void checkWindow(LogicalQualify qualify) throws AnalysisException { + Set inputSlots = new HashSet<>(); + AtomicBoolean hasWindow = new AtomicBoolean(false); + for (Expression conjunct : qualify.getConjuncts()) { + conjunct.accept(new DefaultExpressionVisitor>() { + @Override + public Void visitWindow(WindowExpression windowExpression, Set context) { + hasWindow.set(true); + return null; + } + + @Override + public Void visitSlotReference(SlotReference slotReference, Set context) { + context.add(slotReference); + return null; + } + + }, inputSlots); + } + if (hasWindow.get()) { + return; + } + qualify.accept(new DefaultPlanVisitor() { + private void findWindow(List namedExpressions) { + for (NamedExpression slot : namedExpressions) { + if (slot instanceof Alias && slot.child(0) instanceof WindowExpression) { + if (inputSlots.contains(slot.toSlot())) { + hasWindow.set(true); + } + } + } + } + + @Override + public Void visitLogicalProject(LogicalProject project, Void context) { + findWindow(project.getProjects()); + return visit(project, context); + } + + @Override + public Void visitLogicalAggregate(LogicalAggregate aggregate, Void context) { + findWindow(aggregate.getOutputExpressions()); + return visit(aggregate, context); + } + }, null); + if (!hasWindow.get()) { + throw new AnalysisException("qualify only used for window expression"); + } + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/QualifyToFilter.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/QualifyToFilter.java new file mode 100644 index 00000000000000..8bd933010acc84 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/QualifyToFilter.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.analysis; + +import org.apache.doris.nereids.rules.Rule; +import org.apache.doris.nereids.rules.RuleType; +import org.apache.doris.nereids.trees.plans.logical.LogicalFilter; + +/** + * qualify to filter. + */ +public class QualifyToFilter extends OneAnalysisRuleFactory { + @Override + public Rule build() { + return logicalQualify() + .then(qualify -> new LogicalFilter<>(qualify.getConjuncts(), qualify.child())) + .toRule(RuleType.QUALIFY_TO_FILTER); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java index 04acb91d9e2d39..cd10b6a7e37d24 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRule.java @@ -36,7 +36,7 @@ public class FoldConstantRule implements ExpressionPatternRuleFactory { public static final FoldConstantRule INSTANCE = new FoldConstantRule(); private static final ExpressionBottomUpRewriter FULL_FOLD_REWRITER = ExpressionRewrite.bottomUp( - FoldConstantRuleOnFE.VISITOR_INSTANCE, + FoldConstantRuleOnFE.PATTERN_MATCH_INSTANCE, FoldConstantRuleOnBE.INSTANCE ); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java index 1857ddd05773bb..4ba4d97b95ea9d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rules/FoldConstantRuleOnFE.java @@ -536,9 +536,6 @@ public Expression visitCaseWhen(CaseWhen caseWhen, ExpressionRewriteContext cont Expression defaultResult = null; if (caseWhen.getDefaultValue().isPresent()) { defaultResult = caseWhen.getDefaultValue().get(); - if (deepRewrite) { - defaultResult = rewrite(defaultResult, context); - } } if (foundNewDefault) { defaultResult = newDefault; diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionAll.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionAll.java index 28d70ad558e656..b3df9b92c56a84 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionAll.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionAll.java @@ -17,648 +17,599 @@ package org.apache.doris.nereids.rules.rewrite; -import org.apache.doris.catalog.constraint.ForeignKeyConstraint; -import org.apache.doris.catalog.constraint.PrimaryKeyConstraint; -import org.apache.doris.catalog.constraint.UniqueConstraint; -import org.apache.doris.nereids.hint.DistributeHint; -import org.apache.doris.nereids.jobs.JobContext; +import org.apache.doris.catalog.constraint.TableIdentifier; +import org.apache.doris.common.Pair; import org.apache.doris.nereids.rules.Rule; import org.apache.doris.nereids.rules.RuleType; import org.apache.doris.nereids.trees.expressions.Alias; import org.apache.doris.nereids.trees.expressions.EqualTo; -import org.apache.doris.nereids.trees.expressions.ExprId; import org.apache.doris.nereids.trees.expressions.Expression; import org.apache.doris.nereids.trees.expressions.NamedExpression; import org.apache.doris.nereids.trees.expressions.Slot; import org.apache.doris.nereids.trees.expressions.SlotReference; -import org.apache.doris.nereids.trees.expressions.literal.Literal; -import org.apache.doris.nereids.trees.plans.DistributeType; -import org.apache.doris.nereids.trees.plans.JoinType; import org.apache.doris.nereids.trees.plans.Plan; import org.apache.doris.nereids.trees.plans.algebra.SetOperation.Qualifier; -import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate; import org.apache.doris.nereids.trees.plans.logical.LogicalCatalogRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalFilter; import org.apache.doris.nereids.trees.plans.logical.LogicalJoin; -import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; import org.apache.doris.nereids.trees.plans.logical.LogicalUnion; -import org.apache.doris.nereids.trees.plans.visitor.CustomRewriter; -import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanRewriter; import org.apache.doris.nereids.util.ExpressionUtils; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import java.util.ArrayList; -import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; -import java.util.stream.Collectors; +import javax.annotation.Nullable; /** - * Pull up join from union all rule. + * Pull up join from union all rules with project: + * Union + * / \ + * project project + * (optional) (optional) + * | | + * Join Join + * / \ / \ + * t1 t2 t1 t3 (t1 is common side; t2,t3 is other side) + * =====> + * project + * | + * Join + * / \ + * Union t1 + * / \ + * project project + * (optional) (optional) + * | | + * t2 t3 */ public class PullUpJoinFromUnionAll extends OneRewriteRuleFactory { - private static final Set> SUPPORTED_PLAN_TYPE = ImmutableSet.of( - LogicalFilter.class, - LogicalJoin.class, - LogicalProject.class, - LogicalCatalogRelation.class - ); - - private static class PullUpContext { - public static final String unifiedOutputAlias = "PULL_UP_UNIFIED_OUTPUT_ALIAS"; - public final Map> pullUpCandidatesMaps = Maps.newHashMap(); - public final Map tableToJoinRootMap = Maps.newHashMap(); - public final Map tableToAggrRootMap = Maps.newHashMap(); - public final Map origChild0ToNewUnionOutputMap = Maps.newHashMap(); - public final List aggrChildList = Lists.newArrayList(); - public final List joinChildList = Lists.newArrayList(); - public final List replaceColumns = Lists.newArrayList(); - public final Map pullUpTableToPkSlotMap = Maps.newHashMap(); - public int replacedColumnIndex = -1; - public LogicalCatalogRelation pullUpTable; - - // the slot will replace the original pk in group by and select list - public SlotReference replaceColumn; - public boolean needAddReplaceColumn = false; - - public PullUpContext() {} - - public void setReplacedColumn(SlotReference slot) { - this.replaceColumn = slot; - } - - public void setPullUpTable(LogicalCatalogRelation table) { - this.pullUpTable = table; - } - - public void setNeedAddReplaceColumn(boolean needAdd) { - this.needAddReplaceColumn = needAdd; - } - } - @Override public Rule build() { return logicalUnion() - .when(union -> union.getQualifier() != Qualifier.DISTINCT) - .then(union -> { - PullUpContext context = new PullUpContext(); - if (!checkUnionPattern(union, context) - || !checkJoinCondition(context) - || !checkGroupByKeys(context)) { - return null; - } - // only support single table pull up currently - if (context.pullUpCandidatesMaps.entrySet().size() != 1) { - return null; - } + .when(union -> union.getQualifier() != Qualifier.DISTINCT + && union.getConstantExprsList().isEmpty()) + .then(union -> { + HashMap, Plan>>> commonChildrenMap = + tryToExtractCommonChild(union); + if (commonChildrenMap == null) { + return null; + } - List pullUpTableList = context.pullUpCandidatesMaps - .entrySet().iterator().next().getValue(); - if (pullUpTableList.size() != union.children().size() - || context.replaceColumns.size() != union.children().size() - || !checkNoFilterOnPullUpTable(pullUpTableList, context)) { - return null; - } - // make new union node - LogicalUnion newUnionNode = makeNewUnionNode(union, pullUpTableList, context); - // make new join node - LogicalJoin newJoin = makeNewJoin(newUnionNode, pullUpTableList.get(0), context); - // add project on pull up table with origin union output - List newProjectOutputs = makeNewProjectOutputs(union, newJoin, context); + // The joinsAndCommonSides size is the same as the number of union children. + List, Plan>> joinsAndCommonSides = null; + for (List, Plan>> childSet : commonChildrenMap.values()) { + if (childSet.size() == union.children().size()) { + joinsAndCommonSides = childSet; + break; + } + } + if (joinsAndCommonSides == null) { + return null; + } - return new LogicalProject(newProjectOutputs, newJoin); - }).toRule(RuleType.PULL_UP_JOIN_FROM_UNIONALL); - } + List> otherOutputsList = new ArrayList<>(); + List> upperProjectExpressionOrIndex = new ArrayList<>(); + // First, check whether the output of the union child meets the requirements. + if (!checkUnionChildrenOutput(union, joinsAndCommonSides, otherOutputsList, + upperProjectExpressionOrIndex)) { + return null; + } - private boolean checkUnionPattern(LogicalUnion union, PullUpContext context) { - int tableListNumber = -1; - for (Plan child : union.children()) { - if (!(child instanceof LogicalProject - && child.child(0) instanceof LogicalAggregate - && child.child(0).child(0) instanceof LogicalProject - && child.child(0).child(0).child(0) instanceof LogicalJoin)) { - return false; - } - LogicalAggregate aggrRoot = (LogicalAggregate) child.child(0); - if (!checkAggrRoot(aggrRoot)) { - return false; - } - context.aggrChildList.add(aggrRoot); - LogicalJoin joinRoot = (LogicalJoin) aggrRoot.child().child(0); - // check join under union is spj - if (!checkJoinRoot(joinRoot)) { - return false; - } - context.joinChildList.add(joinRoot); + List>> commonSlotToOtherSlotMaps = new ArrayList<>(); + Set joinCommonSlots = new LinkedHashSet<>(); + if (!checkJoinCondition(joinsAndCommonSides, commonSlotToOtherSlotMaps, joinCommonSlots)) { + return null; + } - List tableList = getTableListUnderJoin(joinRoot); - // add into table -> joinRoot map - for (LogicalCatalogRelation table : tableList) { - context.tableToJoinRootMap.put(table, joinRoot); - context.tableToAggrRootMap.put(table, aggrRoot); - } - if (tableListNumber == -1) { - tableListNumber = tableList.size(); - } else { - // check all union children have the same number of tables - if (tableListNumber != tableList.size()) { - return false; - } - } + Map> commonSlotToProjectsIndex = new HashMap<>(); + LogicalUnion newUnion = constructNewUnion(joinsAndCommonSides, otherOutputsList, + commonSlotToOtherSlotMaps, joinCommonSlots, commonSlotToProjectsIndex); + LogicalJoin newJoin = constructNewJoin(newUnion, + commonSlotToProjectsIndex, joinsAndCommonSides); + LogicalProject newProject = constructNewProject(union, newJoin, upperProjectExpressionOrIndex); + return newProject; + }).toRule(RuleType.PULL_UP_JOIN_FROM_UNION_ALL); + } - for (LogicalCatalogRelation table : tableList) { - // key: qualified table name - // value: table list in all union children - String qName = makeQualifiedName(table); - if (context.pullUpCandidatesMaps.get(qName) == null) { - List newList = new ArrayList<>(); - newList.add(table); - context.pullUpCandidatesMaps.put(qName, newList); - } else { - context.pullUpCandidatesMaps.get(qName).add(table); - } - } + private LogicalProject constructNewProject(LogicalUnion originUnion, LogicalJoin newJoin, + List> upperProjectExpressionOrIndex) { + List originOutput = originUnion.getOutput(); + List upperProjects = new ArrayList<>(); + List newUnionOutput = newJoin.left().getOutput(); + if (originOutput.size() != upperProjectExpressionOrIndex.size()) { + return null; } - int expectedNumber = union.children().size(); - List toBeRemoved = new ArrayList<>(); - // check the pull up table candidate exists in all union children - for (Map.Entry> e : context.pullUpCandidatesMaps.entrySet()) { - if (e.getValue().size() != expectedNumber) { - toBeRemoved.add(e.getKey()); + for (int i = 0; i < upperProjectExpressionOrIndex.size(); ++i) { + Pair pair = upperProjectExpressionOrIndex.get(i); + boolean fromCommon = pair.first; + if (fromCommon) { + upperProjects.add(new Alias(originOutput.get(i).getExprId(), pair.second.exprFromCommonSide, + originOutput.get(i).getName())); + } else { + upperProjects.add(new Alias(originOutput.get(i).getExprId(), + newUnionOutput.get(pair.second.indexOfNewUnionOutput), originOutput.get(i).getName())); } } - for (String key : toBeRemoved) { - context.pullUpCandidatesMaps.remove(key); - } - return !context.pullUpCandidatesMaps.isEmpty(); + return new LogicalProject<>(upperProjects, newJoin); } - private boolean checkJoinCondition(PullUpContext context) { - List toBeRemoved = new ArrayList<>(); - for (Map.Entry> e : context.pullUpCandidatesMaps.entrySet()) { - List tableList = e.getValue(); - boolean allFound = true; - for (LogicalCatalogRelation table : tableList) { - LogicalJoin joinRoot = context.tableToJoinRootMap.get(table); - if (joinRoot == null) { - return false; - } else if (!checkJoinConditionOnPk(joinRoot, table, context)) { - allFound = false; - break; - } - } - if (!allFound) { - toBeRemoved.add(e.getKey()); + private LogicalJoin constructNewJoin(LogicalUnion union, + Map> commonSlotToProjectsIndex, + List, Plan>> commonChild) { + LogicalJoin originalJoin = commonChild.iterator().next().first; + Plan newCommon = commonChild.iterator().next().second; + List newHashExpressions = new ArrayList<>(); + List unionOutputs = union.getOutput(); + for (Map.Entry> entry : commonSlotToProjectsIndex.entrySet()) { + SlotReference commonSlot = entry.getKey(); + for (Integer index : entry.getValue()) { + newHashExpressions.add(new EqualTo(unionOutputs.get(index), commonSlot)); } } - for (String table : toBeRemoved) { - context.pullUpCandidatesMaps.remove(table); - } - - if (context.pullUpCandidatesMaps.isEmpty()) { - return false; - } - return true; + return (LogicalJoin) originalJoin + .withJoinConjuncts(newHashExpressions, ImmutableList.of(), originalJoin.getJoinReorderContext()) + .withChildren(union, newCommon); } - private boolean checkGroupByKeys(PullUpContext context) { - List toBeRemoved = new ArrayList<>(); - for (Map.Entry> e : context.pullUpCandidatesMaps.entrySet()) { - List tableList = e.getValue(); - boolean allFound = true; - for (LogicalCatalogRelation table : tableList) { - LogicalAggregate aggrRoot = context.tableToAggrRootMap.get(table); - if (aggrRoot == null) { - return false; - } else if (!checkAggrKeyOnUkOrPk(aggrRoot, table)) { - allFound = false; - break; - } + // Output parameter: commonSlotToProjectsIndex, key is the common slot of join condition, + // value is the index of the other slot corresponding to this common slot in the union output, + // which is used to construct the join condition of the new join. + private LogicalUnion constructNewUnion(List, Plan>> joinsAndCommonSides, + List> otherOutputsList, + List>> commonSlotToOtherSlotMaps, + Set joinCommonSlots, Map> commonSlotToProjectsIndex) { + List newChildren = new ArrayList<>(); + for (int i = 0; i < joinsAndCommonSides.size(); ++i) { + Pair, Plan> pair = joinsAndCommonSides.get(i); + // find the child that is not the common side + Plan otherSide; + if (pair.second == pair.first.left()) { + otherSide = pair.first.right(); + } else { + otherSide = pair.first.left(); } - if (!allFound) { - toBeRemoved.add(e.getKey()); + List projects = otherOutputsList.get(i); + // In projects, we also need to add the other slot in join condition + // TODO: may eliminate repeated output slots: + // e.g.select t2.a from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + // select t3.a from test_like1 t1 join test_like3 t3 on t1.a=t3.a; + // new union child will output t2.a/t3.a twice. one for output, the other for join condition. + Map> commonSlotToOtherSlotMap = commonSlotToOtherSlotMaps.get(i); + for (SlotReference commonSlot : joinCommonSlots) { + List otherSlots = commonSlotToOtherSlotMap.get(commonSlot); + for (SlotReference otherSlot : otherSlots) { + if (i == 0) { + int index = projects.size(); + commonSlotToProjectsIndex.computeIfAbsent(commonSlot, k -> new ArrayList<>()).add(index); + } + projects.add(otherSlot); + } } - } - for (String table : toBeRemoved) { - context.pullUpCandidatesMaps.remove(table); + LogicalProject logicalProject = new LogicalProject<>(projects, otherSide); + newChildren.add(logicalProject); } - if (context.pullUpCandidatesMaps.isEmpty()) { - return false; - } - return true; + //2. construct new union + LogicalUnion newUnion = new LogicalUnion(Qualifier.ALL, newChildren); + List> childrenOutputs = newChildren.stream() + .map(p -> p.getOutput().stream() + .map(SlotReference.class::cast) + .collect(ImmutableList.toImmutableList())) + .collect(ImmutableList.toImmutableList()); + newUnion = (LogicalUnion) newUnion.withChildrenAndTheirOutputs(newChildren, childrenOutputs); + newUnion = newUnion.withNewOutputs(newUnion.buildNewOutputs()); + return newUnion; } - private boolean checkNoFilterOnPullUpTable(List pullUpTableList, PullUpContext context) { - for (LogicalCatalogRelation table : pullUpTableList) { - LogicalJoin joinRoot = context.tableToJoinRootMap.get(table); - if (joinRoot == null) { - return false; + /** This function is used to check whether the join condition meets the optimization condition + * Check the join condition, requiring that the join condition of each join is equal and the number is the same. + * Generate commonSlotToOtherSlotMaps. In each map of the list, the keySet must be the same, + * and the length of the value list of the same key must be the same. + * These are sql that can not do this transform: + * SQL1: select t2.a+1,2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.a=t3.a and t1.b=t3.b; + * SQL2: select t2.a+1,2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.b=t3.a; + * SQL3: select t2.a+1,2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.a=t3.a and t1.a=t3.b; + * @param commonSlotToOtherSlotMaps Output parameter that records the join conditions for each join operation. + * The key represents the slot on the common side of the join, while the value + * corresponds to the slot on the other side. + * Example: + * For the following SQL: + * SELECT t2.a + 1, 2 FROM test_like1 t1 + * JOIN test_like2 t2 ON t1.a = t2.a AND t1.a = t2.c AND t1.b = t2.b + * UNION ALL + * SELECT t3.a + 1, 3 FROM test_like1 t1 + * JOIN test_like3 t3 ON t1.a = t3.a AND t1.a = t3.d AND t1.b = t3.b; + * commonSlotToOtherSlotMaps would be: + * {{t1.a: t2.a, t2.c; t1.b: t2.b}, {t1.a: t3.a, t3.d; t1.b: t3.b}} + * This parameter is used to verify if the join conditions meet + * optimization requirements and to help generate new join conditions. + * @param joinCommonSlots output parameter, which records join common side slots. + * */ + private boolean checkJoinCondition(List, Plan>> joinsAndCommonSides, + List>> commonSlotToOtherSlotMaps, + Set joinCommonSlots) { + Map> conditionMapFirst = new HashMap<>(); + Map commonJoinSlotMap = buildCommonJoinMap(joinsAndCommonSides); + for (int i = 0; i < joinsAndCommonSides.size(); ++i) { + Pair, Plan> pair = joinsAndCommonSides.get(i); + LogicalJoin join = pair.first; + Plan commonSide = pair.second; + Map> conditionMapSubsequent = new HashMap<>(); + for (Expression condition : join.getHashJoinConjuncts()) { + if (!(condition instanceof EqualTo)) { + return false; + } + EqualTo equalTo = (EqualTo) condition; + if (!(equalTo.left() instanceof SlotReference) || !(equalTo.right() instanceof SlotReference)) { + return false; + } + SlotReference commonSideSlot; + SlotReference otherSideSlot; + if (commonSide.getOutputSet().contains(equalTo.left())) { + commonSideSlot = (SlotReference) equalTo.left(); + otherSideSlot = (SlotReference) equalTo.right(); + } else { + commonSideSlot = (SlotReference) equalTo.right(); + otherSideSlot = (SlotReference) equalTo.left(); + } + if (i == 0) { + conditionMapFirst.computeIfAbsent(commonSideSlot, k -> new ArrayList<>()).add(otherSideSlot); + joinCommonSlots.add(commonSideSlot); + } else { + conditionMapSubsequent.computeIfAbsent( + (SlotReference) ExpressionUtils.replace(commonSideSlot, commonJoinSlotMap), + k -> new ArrayList<>()).add(otherSideSlot); + } + } + if (i == 0) { + commonSlotToOtherSlotMaps.add(conditionMapFirst); } else { - List filterList = new ArrayList<>(); - filterList.addAll((Collection) - joinRoot.collect(LogicalFilter.class::isInstance)); - for (LogicalFilter filter : filterList) { - if (filter.child().equals(context.pullUpTable)) { + // reject SQL1 + if (conditionMapSubsequent.size() != conditionMapFirst.size()) { + return false; + } + // reject SQL2 + if (!conditionMapSubsequent.keySet().equals(conditionMapFirst.keySet())) { + return false; + } + // reject SQL3 + for (Map.Entry> entry : conditionMapFirst.entrySet()) { + SlotReference commonSlot = entry.getKey(); + if (conditionMapSubsequent.get(commonSlot).size() != entry.getValue().size()) { return false; } } + commonSlotToOtherSlotMaps.add(conditionMapSubsequent); } } return true; } - private boolean checkAggrKeyOnUkOrPk(LogicalAggregate aggregate, LogicalCatalogRelation table) { - List groupByKeys = aggregate.getGroupByExpressions(); - boolean isAllSlotReference = groupByKeys.stream().allMatch(e -> e instanceof SlotReference); - if (!isAllSlotReference) { - return false; - } else { - Set ukInfo = getUkInfoFromConstraint(table); - Set pkInfo = getPkInfoFromConstraint(table); - if (ukInfo == null || pkInfo == null || ukInfo.size() != 1 || pkInfo.size() != 1) { - return false; + // Make a map to map the output of all other joins to the output of the first join + private Map buildCommonJoinMap(List, Plan>> commonChild) { + Map commonJoinSlotMap = new HashMap<>(); + List firstJoinOutput = new ArrayList<>(); + for (int i = 0; i < commonChild.size(); ++i) { + Pair, Plan> pair = commonChild.get(i); + Plan commonSide = pair.second; + if (i == 0) { + firstJoinOutput.addAll(commonSide.getOutput()); + for (Slot slot : commonSide.getOutput()) { + commonJoinSlotMap.put(slot, slot); + } } else { - String ukName = ukInfo.iterator().next(); - String pkName = pkInfo.iterator().next(); - for (Object expr : aggregate.getGroupByExpressions()) { - SlotReference slot = (SlotReference) expr; - if (table.getOutputExprIds().contains(slot.getExprId()) - && (slot.getName().equals(ukName) || slot.getName().equals(pkName))) { - return true; - } + for (int j = 0; j < commonSide.getOutput().size(); ++j) { + commonJoinSlotMap.put(commonSide.getOutput().get(j), firstJoinOutput.get(j)); } - return false; } } + return commonJoinSlotMap; } - private boolean checkJoinConditionOnPk(LogicalJoin joinRoot, LogicalCatalogRelation table, PullUpContext context) { - Set pkInfos = getPkInfoFromConstraint(table); - if (pkInfos == null || pkInfos.size() != 1) { + private class ExpressionOrIndex { + Expression exprFromCommonSide = null; + int indexOfNewUnionOutput = -1; + + private ExpressionOrIndex(Expression expr) { + exprFromCommonSide = expr; + } + + private ExpressionOrIndex(int index) { + indexOfNewUnionOutput = index; + } + } + + /** In the union child output, the number of outputs from the common side must be the same in each child output, + * and the outputs from the common side must be isomorphic (both a+1) and have the same index in the union output. + * In the union child output, the number of outputs from the non-common side must also be the same, + * but they do not need to be isomorphic. + * These are sql that can not do this transform: + * SQL1: select t2.a+t1.a from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t3.a+1 from test_like1 t1 join test_like3 t3 on t1.a=t3.a; + * SQL2: select t2.a from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t1.a from test_like1 t1 join test_like3 t3 on t1.a=t3.a; + * SQL3: select t1.a+1 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select t1.a+2 from test_like1 t1 join test_like3 t3 on t1.a=t3.a; + * SQL4: select t1.a from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + * select 1 from test_like1 t1 join test_like3 t3 on t1.a=t3.a; + * @param otherOutputsList output parameter that stores the outputs of the other side. + * The length of each element in otherOutputsList must be the same. + * The i-th element represents the output of the other side in the i-th child of the union. + * This parameter is used to create child nodes of a new Union + * in the constructNewUnion function. + * + * @param upperProjectExpressionOrIndex Output parameter used in the constructNewProject function to create + * the top-level project.This parameter records the output column order of + * the original union and determines,based on the new join output, the columns + * or expressions to output in the upper-level project operator. The size of + * upperProjectExpressionOrIndex must match the output size of + * the original union。 + * Each Pair in the List represents an output source: + * - Pair.first (Boolean): Indicates whether the output is from + * the common side (true) or the other side (false). + * - Pair.second (Object): When Pair.first is true, it stores + * the common side's output expression.When false, it saves the output index + * of the other side. Since the new union output is not yet constructed + * at this point, only the index is stored. + * The function’s check ensures that outputs at the same position in + * union children either come from the common side or from the other side. + * When the final join is constructed, the common side uses the first join's + * common side, so only the first child’s outputs need to be processed to + * fill in upperProjectExpressionOrIndex. + */ + private boolean checkUnionChildrenOutput(LogicalUnion union, + List, Plan>> joinsAndCommonSides, + List> otherOutputsList, + List> upperProjectExpressionOrIndex) { + List> regularChildrenOutputs = union.getRegularChildrenOutputs(); + int arity = union.arity(); + if (arity == 0) { return false; } - String pkSlot = pkInfos.iterator().next(); - List joinList = new ArrayList<>(); - joinList.addAll((Collection) joinRoot.collect(LogicalJoin.class::isInstance)); - boolean found = false; - for (LogicalJoin join : joinList) { - List conditions = join.getHashJoinConjuncts(); - List basicTableList = new ArrayList<>(); - basicTableList.addAll((Collection) join - .collect(LogicalCatalogRelation.class::isInstance)); - for (Expression equalTo : conditions) { - if (equalTo instanceof EqualTo - && ((EqualTo) equalTo).left() instanceof SlotReference - && ((EqualTo) equalTo).right() instanceof SlotReference) { - SlotReference leftSlot = (SlotReference) ((EqualTo) equalTo).left(); - SlotReference rightSlot = (SlotReference) ((EqualTo) equalTo).right(); - if (table.getOutputExprIds().contains(leftSlot.getExprId()) - && pkSlot.equals(leftSlot.getName())) { - // pk-fk join condition, check other side's join key is on fk - LogicalCatalogRelation rightTable = findTableFromSlot(rightSlot, basicTableList); - if (rightTable != null && getFkInfoFromConstraint(rightTable) != null) { - ForeignKeyConstraint fkInfo = getFkInfoFromConstraint(rightTable); - if (fkInfo.getReferencedTable().getId() == table.getTable().getId()) { - for (Map.Entry entry : fkInfo.getForeignToReference().entrySet()) { - if (entry.getValue().equals(pkSlot) && entry.getKey().equals(rightSlot.getName())) { - found = true; - context.replaceColumns.add(rightSlot); - context.pullUpTableToPkSlotMap.put(table, leftSlot); - break; - } + // fromCommonSide is used to ensure that the outputs at the same position in the union children + // must all come from the common side or from the other side + boolean[] fromCommonSide = new boolean[regularChildrenOutputs.get(0).size()]; + // checkSameExpr and commonJoinSlotMap are used to ensure that Expr from the common side have the same structure + Expression[] checkSameExpr = new Expression[regularChildrenOutputs.get(0).size()]; + Map commonJoinSlotMap = buildCommonJoinMap(joinsAndCommonSides); + for (int i = 0; i < arity; ++i) { + List regularChildrenOutput = regularChildrenOutputs.get(i); + Plan child = union.child(i); + List otherOutputs = new ArrayList<>(); + for (int j = 0; j < regularChildrenOutput.size(); ++j) { + SlotReference slot = regularChildrenOutput.get(j); + if (child instanceof LogicalProject) { + LogicalProject project = (LogicalProject) child; + int index = project.getOutput().indexOf(slot); + NamedExpression expr = project.getOutputs().get(index); + Slot insideSlot; + Expression insideExpr; + Set inputSlots = expr.getInputSlots(); + // reject SQL1 + if (inputSlots.size() > 1) { + return false; + } else if (inputSlots.size() == 1) { + if (expr instanceof Alias) { + insideSlot = inputSlots.iterator().next(); + insideExpr = expr.child(0); + } else if (expr instanceof SlotReference) { + insideSlot = (Slot) expr; + insideExpr = expr; + } else { + return false; + } + + Plan commonSide = joinsAndCommonSides.get(i).second; + if (i == 0) { + if (commonSide.getOutputSet().contains(insideSlot)) { + fromCommonSide[j] = true; + checkSameExpr[j] = insideExpr; + upperProjectExpressionOrIndex.add(Pair.of(true, new ExpressionOrIndex(insideExpr))); + } else { + fromCommonSide[j] = false; + upperProjectExpressionOrIndex.add(Pair.of(false, new ExpressionOrIndex( + otherOutputs.size()))); + otherOutputs.add(expr); + } + } else { + // reject SQL2 + if (commonSide.getOutputSet().contains(insideSlot) != fromCommonSide[j]) { + return false; + } + // reject SQL3 + if (commonSide.getOutputSet().contains(insideSlot)) { + Expression sameExpr = ExpressionUtils.replace(insideExpr, commonJoinSlotMap); + if (!sameExpr.equals(checkSameExpr[j])) { + return false; } + } else { + otherOutputs.add(expr); } } - } else if (table.getOutputExprIds().contains(rightSlot.getExprId()) - && pkSlot.equals(rightSlot.getName())) { - // pk-fk join condition, check other side's join key is on fk - LogicalCatalogRelation leftTable = findTableFromSlot(leftSlot, basicTableList); - if (leftTable != null && getFkInfoFromConstraint(leftTable) != null) { - ForeignKeyConstraint fkInfo = getFkInfoFromConstraint(leftTable); - if (fkInfo.getReferencedTable().getId() == table.getTable().getId()) { - for (Map.Entry entry : fkInfo.getForeignToReference().entrySet()) { - if (entry.getValue().equals(pkSlot) && entry.getKey().equals(leftSlot.getName())) { - found = true; - context.replaceColumns.add(leftSlot); - context.pullUpTableToPkSlotMap.put(table, rightSlot); - break; - } - } + } else if (expr.getInputSlots().isEmpty()) { + // Constants must come from other side + if (i == 0) { + fromCommonSide[j] = false; + upperProjectExpressionOrIndex.add(Pair.of(false, new ExpressionOrIndex( + otherOutputs.size()))); + } else { + // reject SQL4 + if (fromCommonSide[j]) { + return false; } } + otherOutputs.add(expr); } - if (found) { - break; + } else if (child instanceof LogicalJoin) { + Plan commonSide = joinsAndCommonSides.get(i).second; + if (i == 0) { + if (commonSide.getOutputSet().contains(slot)) { + fromCommonSide[j] = true; + checkSameExpr[j] = slot; + upperProjectExpressionOrIndex.add(Pair.of(true, new ExpressionOrIndex(slot))); + } else { + fromCommonSide[j] = false; + upperProjectExpressionOrIndex.add(Pair.of(false, + new ExpressionOrIndex(otherOutputs.size()))); + otherOutputs.add(slot); + } + } else { + // reject SQL2 + if (commonSide.getOutputSet().contains(slot) != fromCommonSide[j]) { + return false; + } + // reject SQL3 + if (commonSide.getOutputSet().contains(slot)) { + Expression sameExpr = ExpressionUtils.replace(slot, commonJoinSlotMap); + if (!sameExpr.equals(checkSameExpr[j])) { + return false; + } + } else { + otherOutputs.add(slot); + } } } } - if (found) { - break; - } + otherOutputsList.add(otherOutputs); } - return found; + return true; } - private LogicalCatalogRelation findTableFromSlot(SlotReference targetSlot, - List tableList) { - for (LogicalCatalogRelation table : tableList) { - if (table.getOutputExprIds().contains(targetSlot.getExprId())) { - return table; + /** + * Attempts to extract common children from a LogicalUnion. + * + * This method iterates through all children of the union, looking for LogicalJoin operations, + * and tries to identify common left or right subtrees. The results are stored in a Map where + * keys are potential common subtrees and values are lists of pairs containing the original + * join and the corresponding subtree. + * + * For example, given the following union: + * Union + * ├─ Join(A, B) + * ├─ Join(A, C) + * └─ Join(D, B) + * + * The returned Map would contain: + * A -> [(Join(A,B), A), (Join(A,C), A)] + * B -> [(Join(A,B), B), (Join(D,B), B)] + * + * This indicates that both A and B are potential common subtrees that could be extracted. + * + * @param union The LogicalUnion to analyze + * @return A Map containing potential common subtrees, or null if extraction is not possible + */ + private @Nullable HashMap, Plan>>> tryToExtractCommonChild(LogicalUnion union) { + HashMap, Plan>>> planCount = new HashMap<>(); + for (Plan child : union.children()) { + LogicalJoin join = tryToGetJoin(child); + if (join == null) { + return null; + } + boolean added = false; + for (Plan plan : planCount.keySet()) { + LogicalPlanComparator comparator = new LogicalPlanComparator(); + if (comparator.isLogicalEqual(join.left(), plan)) { + planCount.get(plan).add(Pair.of(join, join.left())); + added = true; + break; + } else if (comparator.isLogicalEqual(join.right(), plan)) { + planCount.get(plan).add(Pair.of(join, join.right())); + added = true; + break; + } } - } - return null; - } - private ForeignKeyConstraint getFkInfoFromConstraint(LogicalCatalogRelation table) { - Set foreignKeyConstraints = table.getTable().getForeignKeyConstraints(); - if (foreignKeyConstraints.isEmpty()) { - return null; + if (!added) { + planCount.put(join.left(), Lists.newArrayList(Pair.of(join, join.left()))); + planCount.put(join.right(), Lists.newArrayList(Pair.of(join, join.right()))); + } } - return foreignKeyConstraints.stream().iterator().next(); + return planCount; } - private Set getPkInfoFromConstraint(LogicalCatalogRelation table) { - Set primaryKeyConstraints = table.getTable().getPrimaryKeyConstraints(); - if (primaryKeyConstraints.isEmpty()) { - return null; + // we only allow project(join) or join() + private @Nullable LogicalJoin tryToGetJoin(Plan child) { + if (child instanceof LogicalProject) { + child = child.child(0); } - return primaryKeyConstraints.stream().iterator().next().getPrimaryKeyNames(); - } - - private Set getUkInfoFromConstraint(LogicalCatalogRelation table) { - Set uniqueConstraints = table.getTable().getUniqueConstraints(); - if (uniqueConstraints.isEmpty()) { - return null; + if (child instanceof LogicalJoin + && ((LogicalJoin) child).getJoinType().isInnerJoin() + && ((LogicalJoin) child).getOtherJoinConjuncts().isEmpty() + && !((LogicalJoin) child).isMarkJoin()) { + return (LogicalJoin) child; } - return uniqueConstraints.stream().iterator().next().getUniqueColumnNames(); + return null; } - private boolean checkJoinRoot(LogicalJoin joinRoot) { - List joinChildrenPlans = Lists.newArrayList(); - joinChildrenPlans.addAll((Collection) joinRoot - .collect(LogicalPlan.class::isInstance)); - boolean planTypeMatch = joinChildrenPlans.stream() - .allMatch(p -> SUPPORTED_PLAN_TYPE.stream().anyMatch(c -> c.isInstance(p))); - if (!planTypeMatch) { - return false; - } - - List allJoinNodes = Lists.newArrayList(); - allJoinNodes.addAll((Collection) joinRoot.collect(LogicalJoin.class::isInstance)); - boolean joinTypeMatch = allJoinNodes.stream().allMatch(e -> e.getJoinType() == JoinType.INNER_JOIN); - boolean joinConditionMatch = allJoinNodes.stream() - .allMatch(e -> !e.getHashJoinConjuncts().isEmpty() && e.getOtherJoinConjuncts().isEmpty()); - if (!joinTypeMatch || !joinConditionMatch) { - return false; - } - - return true; - } + class LogicalPlanComparator { + private HashMap plan1ToPlan2 = new HashMap<>(); - private boolean checkAggrRoot(LogicalAggregate aggrRoot) { - for (Object expr : aggrRoot.getGroupByExpressions()) { - if (!(expr instanceof NamedExpression)) { + public boolean isLogicalEqual(Plan plan1, Plan plan2) { + if (plan1.children().size() != plan2.children().size()) { return false; } - } - return true; - } - - private List getTableListUnderJoin(LogicalJoin joinRoot) { - List tableLists = new ArrayList<>(); - tableLists.addAll((Collection) joinRoot - .collect(LogicalCatalogRelation.class::isInstance)); - return tableLists; - } - - private String makeQualifiedName(LogicalCatalogRelation table) { - String dbName = table.getTable().getDatabase().getFullName(); - String tableName = table.getTable().getName(); - return dbName + ":" + tableName; - } - - private Plan doPullUpJoinFromUnionAll(Plan unionChildPlan, PullUpContext context) { - return PullUpRewriter.INSTANCE.rewrite(unionChildPlan, context); - } - - private List doWrapReplaceColumnForUnionChildren(List unionChildren, PullUpContext context) { - List newUnionChildren = new ArrayList<>(); - for (int i = 0; i < unionChildren.size(); i++) { - // has been checked before - LogicalProject oldProject = (LogicalProject) unionChildren.get(i); - List newNamedExpressionList = new ArrayList<>(); - for (int j = 0; j < oldProject.getProjects().size(); j++) { - Object child = oldProject.getProjects().get(j); - if (context.replaceColumns.contains(child)) { - Alias newExpr = new Alias((Expression) child, context.unifiedOutputAlias); - newNamedExpressionList.add(newExpr); - context.replacedColumnIndex = j; - } else { - newNamedExpressionList.add((NamedExpression) child); - } - } - LogicalProject newProject = new LogicalProject(newNamedExpressionList, (LogicalPlan) oldProject.child()); - newUnionChildren.add(newProject); - } - return newUnionChildren; - } - - private List makeNewProjectOutputs(LogicalUnion origUnion, - LogicalJoin newJoin, PullUpContext context) { - List newProjectOutputs = new ArrayList<>(); - List origUnionSlots = origUnion.getOutput(); - List origUnionChildOutput = ((LogicalProject) origUnion.child(0)).getOutputs(); - for (int i = 0; i < origUnionChildOutput.size(); i++) { - NamedExpression unionOutputExpr = origUnionChildOutput.get(i); - if (unionOutputExpr instanceof Alias) { - if (!(unionOutputExpr.child(0) instanceof Literal)) { - unionOutputExpr = (Slot) unionOutputExpr.child(0); - } - } - boolean found = false; - Slot matchedJoinSlot = null; - for (Slot joinOutput : newJoin.getOutput()) { - Slot slot = joinOutput; - if (context.origChild0ToNewUnionOutputMap.get(slot) != null) { - slot = context.origChild0ToNewUnionOutputMap.get(slot); - } - if (slot.equals(unionOutputExpr) || slot.getExprId() == unionOutputExpr.getExprId()) { - matchedJoinSlot = joinOutput; - found = true; - break; + for (int i = 0; i < plan1.children().size(); i++) { + if (!isLogicalEqual(plan1.child(i), plan2.child(i))) { + return false; } } - if (found) { - ExprId exprId = origUnionSlots.get(i).getExprId(); - Alias aliasExpr = new Alias(exprId, matchedJoinSlot, matchedJoinSlot.toSql()); - newProjectOutputs.add(aliasExpr); + if (isNotSupported(plan1) || isNotSupported(plan2)) { + return false; } + return comparePlan(plan1, plan2); } - return newProjectOutputs; - } - - private LogicalJoin makeNewJoin(LogicalUnion newUnionNode, - LogicalCatalogRelation pullUpTable, PullUpContext context) { - List newHashJoinConjuncts = new ArrayList<>(); - Slot unionSideExpr = newUnionNode.getOutput().get(context.replacedColumnIndex); - Slot pullUpSidePkSlot = context.pullUpTableToPkSlotMap.get(pullUpTable); - if (pullUpSidePkSlot == null) { - return null; + boolean isNotSupported(Plan plan) { + return !(plan instanceof LogicalFilter) + && !(plan instanceof LogicalCatalogRelation) + && !(plan instanceof LogicalProject); } - EqualTo pullUpJoinCondition = new EqualTo(unionSideExpr, pullUpSidePkSlot); - newHashJoinConjuncts.add(pullUpJoinCondition); - - // new a join with the newUnion and the pulled up table - return new LogicalJoin<>( - JoinType.INNER_JOIN, - newHashJoinConjuncts, - ExpressionUtils.EMPTY_CONDITION, - new DistributeHint(DistributeType.NONE), - Optional.empty(), - newUnionNode, - pullUpTable, null); - } - - private LogicalUnion makeNewUnionNode(LogicalUnion origUnion, - List pullUpTableList, PullUpContext context) { - List newUnionChildren = new ArrayList<>(); - for (int i = 0; i < origUnion.children().size(); i++) { - Plan unionChild = origUnion.child(i); - context.setPullUpTable(pullUpTableList.get(i)); - context.setReplacedColumn(context.replaceColumns.get(i)); - Plan newChild = doPullUpJoinFromUnionAll(unionChild, context); - newUnionChildren.add(newChild); - } - - // wrap the replaced column with a shared alias which is exposed to outside - List formalizedNewUnionChildren = doWrapReplaceColumnForUnionChildren(newUnionChildren, context); - List> childrenOutputs = formalizedNewUnionChildren.stream() - .map(j -> j.getOutput().stream() - .map(SlotReference.class::cast) - .collect(ImmutableList.toImmutableList())) - .collect(ImmutableList.toImmutableList()); - - LogicalUnion newUnionNode = new LogicalUnion(Qualifier.ALL, formalizedNewUnionChildren); - newUnionNode = (LogicalUnion) newUnionNode.withChildrenAndTheirOutputs( - formalizedNewUnionChildren, childrenOutputs); - List newOutputs = newUnionNode.buildNewOutputs(); - newUnionNode = newUnionNode.withNewOutputs(newOutputs); - - // set up origin child 0 output to new union output mapping - List origChild0Output = childrenOutputs.get(0); - for (int i = 0; i < origChild0Output.size(); i++) { - SlotReference slot = origChild0Output.get(i); - NamedExpression newExpr = newOutputs.get(i); - context.origChild0ToNewUnionOutputMap.put(newExpr, slot); - } - - return newUnionNode; - } - - private static class PullUpRewriter extends DefaultPlanRewriter implements CustomRewriter { - public static final PullUpRewriter INSTANCE = new PullUpRewriter(); - @Override - public Plan rewriteRoot(Plan plan, JobContext context) { - return null; - } - - public Plan rewrite(Plan plan, PullUpContext context) { - return plan.accept(this, context); - } - - @Override - public Plan visitLogicalAggregate(LogicalAggregate agg, PullUpContext context) { - Plan input = agg.child().accept(this, context); - - LogicalCatalogRelation pullTable = context.pullUpTable; - SlotReference replaceColumn = context.replaceColumn; - - // eliminate group by keys - List groupByExprList = new ArrayList<>(); - for (Expression expr : agg.getGroupByExpressions()) { - // expr has been checked before - if (!pullTable.getOutputExprIds().contains(((NamedExpression) expr).getExprId())) { - groupByExprList.add(expr); + boolean comparePlan(Plan plan1, Plan plan2) { + boolean isEqual = true; + if (plan1 instanceof LogicalCatalogRelation && plan2 instanceof LogicalCatalogRelation) { + isEqual = new TableIdentifier(((LogicalCatalogRelation) plan1).getTable()) + .equals(new TableIdentifier(((LogicalCatalogRelation) plan2).getTable())); + } else if (plan1 instanceof LogicalProject && plan2 instanceof LogicalProject) { + if (plan1.getOutput().size() != plan2.getOutput().size()) { + isEqual = false; } - } - // add replaced group by key - groupByExprList.add(replaceColumn); - - // eliminate outputs keys - List outputExprList = new ArrayList<>(); - for (NamedExpression expr : agg.getOutputExpressions()) { - if (!pullTable.getOutputExprIds().contains(expr.getExprId())) { - outputExprList.add(expr); + for (int i = 0; isEqual && i < plan2.getOutput().size(); i++) { + NamedExpression expr = ((LogicalProject) plan1).getProjects().get(i); + NamedExpression replacedExpr = (NamedExpression) + expr.rewriteUp(e -> plan1ToPlan2.getOrDefault(e, e)); + if (!replacedExpr.equals(((LogicalProject) plan2).getProjects().get(i))) { + isEqual = false; + } } - } - // add replaced group by key - outputExprList.add(replaceColumn); - return new LogicalAggregate<>(groupByExprList, outputExprList, input); - } - - public Plan visitLogicalJoin(LogicalJoin join, PullUpContext context) { - Plan leftChild = join.child(0).accept(this, context); - Plan rightChild = join.child(1).accept(this, context); - LogicalCatalogRelation pullUpTable = context.pullUpTable; - - // no filter on pull up table, which has been checked before - if (leftChild instanceof LogicalCatalogRelation - && leftChild.equals(pullUpTable)) { - context.setNeedAddReplaceColumn(true); - return rightChild; - } else if (rightChild instanceof LogicalCatalogRelation - && rightChild.equals(pullUpTable)) { - context.setNeedAddReplaceColumn(true); - return leftChild; - } else if (leftChild instanceof LogicalProject - && leftChild.child(0) instanceof LogicalCatalogRelation - && leftChild.child(0).equals(pullUpTable)) { - context.setNeedAddReplaceColumn(true); - return rightChild; - } else if (rightChild instanceof LogicalProject - && rightChild.child(0) instanceof LogicalCatalogRelation - && rightChild.child(0).equals(pullUpTable)) { - context.setNeedAddReplaceColumn(true); - return leftChild; + } else if (plan1 instanceof LogicalFilter && plan2 instanceof LogicalFilter) { + Set replacedConjuncts = new HashSet<>(); + for (Expression expr : ((LogicalFilter) plan1).getConjuncts()) { + replacedConjuncts.add(expr.rewriteUp(e -> plan1ToPlan2.getOrDefault(e, e))); + } + isEqual = replacedConjuncts.equals(((LogicalFilter) plan2).getConjuncts()); } else { - return new LogicalJoin(JoinType.INNER_JOIN, - join.getHashJoinConjuncts(), - join.getOtherJoinConjuncts(), - new DistributeHint(DistributeType.NONE), - Optional.empty(), - leftChild, rightChild, null); + isEqual = false; } - } - - @Override - public Plan visitLogicalProject(LogicalProject project, PullUpContext context) { - Plan input = project.child().accept(this, context); - List outputs = input.getOutput().stream() - .map(e -> (NamedExpression) e).collect(Collectors.toList()); - for (NamedExpression expr : project.getProjects()) { - // handle alias - if (expr instanceof Alias && expr.child(0) instanceof Literal) { - outputs.add(expr); - } + if (!isEqual) { + return false; } - return new LogicalProject<>(outputs, input); - } - - @Override - public Plan visitLogicalFilter(LogicalFilter filter, PullUpContext context) { - Plan input = filter.child().accept(this, context); - return new LogicalFilter<>(filter.getConjuncts(), input); + for (int i = 0; i < plan1.getOutput().size(); i++) { + plan1ToPlan2.put(plan1.getOutput().get(i), plan2.getOutput().get(i)); + } + return true; } } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/CutToFirstSignificantSubdomain.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/CutToFirstSignificantSubdomain.java new file mode 100644 index 00000000000000..a2e77531e43ca4 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/CutToFirstSignificantSubdomain.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.StringType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'CutToFirstSignificantSubdomain'. This class is generated by GenerateFunction. + */ +public class CutToFirstSignificantSubdomain extends ScalarFunction + implements UnaryExpression, ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(StringType.INSTANCE).args(StringType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public CutToFirstSignificantSubdomain(Expression arg) { + super("cut_to_first_significant_subdomain", arg); + } + + /** + * withChildren. + */ + @Override + public CutToFirstSignificantSubdomain withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new CutToFirstSignificantSubdomain(children.get(0)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitCutToFirstSignificantSubdomain(this, context); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FirstSignificantSubdomain.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FirstSignificantSubdomain.java new file mode 100644 index 00000000000000..1af4dd96e6deff --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/FirstSignificantSubdomain.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.StringType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'FirstSignificantSubdomain'. This class is generated by GenerateFunction. + */ +public class FirstSignificantSubdomain extends ScalarFunction + implements UnaryExpression, ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(StringType.INSTANCE).args(StringType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public FirstSignificantSubdomain(Expression arg) { + super("first_significant_subdomain", arg); + } + + /** + * withChildren. + */ + @Override + public FirstSignificantSubdomain withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new FirstSignificantSubdomain(children.get(0)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitFirstSignificantSubdomain(this, context); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/TopLevelDomain.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/TopLevelDomain.java new file mode 100644 index 00000000000000..05997659a2ea95 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/scalar/TopLevelDomain.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.expressions.functions.scalar; + +import org.apache.doris.catalog.FunctionSignature; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.functions.ExplicitlyCastableSignature; +import org.apache.doris.nereids.trees.expressions.functions.PropagateNullable; +import org.apache.doris.nereids.trees.expressions.shape.UnaryExpression; +import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor; +import org.apache.doris.nereids.types.StringType; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.List; + +/** + * ScalarFunction 'TopLevelDomain'. This class is generated by GenerateFunction. + */ +public class TopLevelDomain extends ScalarFunction + implements UnaryExpression, ExplicitlyCastableSignature, PropagateNullable { + + public static final List SIGNATURES = ImmutableList.of( + FunctionSignature.ret(StringType.INSTANCE).args(StringType.INSTANCE) + ); + + /** + * constructor with 1 argument. + */ + public TopLevelDomain(Expression arg) { + super("top_level_domain", arg); + } + + /** + * withChildren. + */ + @Override + public TopLevelDomain withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new TopLevelDomain(children.get(0)); + } + + @Override + public R accept(ExpressionVisitor visitor, C context) { + return visitor.visitTopLevelDomain(this, context); + } + + @Override + public List getSignatures() { + return SIGNATURES; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java index c5e9688d3c1733..2619731cfc874b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/visitor/ScalarFunctionVisitor.java @@ -146,6 +146,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.CurrentTime; import org.apache.doris.nereids.trees.expressions.functions.scalar.CurrentUser; import org.apache.doris.nereids.trees.expressions.functions.scalar.CutIpv6; +import org.apache.doris.nereids.trees.expressions.functions.scalar.CutToFirstSignificantSubdomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Database; import org.apache.doris.nereids.trees.expressions.functions.scalar.Date; import org.apache.doris.nereids.trees.expressions.functions.scalar.DateDiff; @@ -188,6 +189,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ExtractUrlParameter; import org.apache.doris.nereids.trees.expressions.functions.scalar.Field; import org.apache.doris.nereids.trees.expressions.functions.scalar.FindInSet; +import org.apache.doris.nereids.trees.expressions.functions.scalar.FirstSignificantSubdomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Floor; import org.apache.doris.nereids.trees.expressions.functions.scalar.Fmod; import org.apache.doris.nereids.trees.expressions.functions.scalar.Fpow; @@ -437,6 +439,7 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ToMonday; import org.apache.doris.nereids.trees.expressions.functions.scalar.ToQuantileState; import org.apache.doris.nereids.trees.expressions.functions.scalar.Tokenize; +import org.apache.doris.nereids.trees.expressions.functions.scalar.TopLevelDomain; import org.apache.doris.nereids.trees.expressions.functions.scalar.Translate; import org.apache.doris.nereids.trees.expressions.functions.scalar.Trim; import org.apache.doris.nereids.trees.expressions.functions.scalar.TrimIn; @@ -903,6 +906,11 @@ default R visitChar(Char charFunc, C context) { return visitScalarFunction(charFunc, context); } + default R visitCutToFirstSignificantSubdomain(CutToFirstSignificantSubdomain cutToFirstSignificantSubdomain, + C context) { + return visitScalarFunction(cutToFirstSignificantSubdomain, context); + } + default R visitEncodeAsSmallInt(EncodeAsSmallInt encode, C context) { return visitScalarFunction(encode, context); } @@ -1187,6 +1195,10 @@ default R visitFindInSet(FindInSet findInSet, C context) { return visitScalarFunction(findInSet, context); } + default R visitFirstSignificantSubdomain(FirstSignificantSubdomain firstSignificantSubdomain, C context) { + return visitScalarFunction(firstSignificantSubdomain, context); + } + default R visitFloor(Floor floor, C context) { return visitScalarFunction(floor, context); } @@ -2111,6 +2123,10 @@ default R visitTokenize(Tokenize tokenize, C context) { return visitScalarFunction(tokenize, context); } + default R visitTopLevelDomain(TopLevelDomain topLevelDomain, C context) { + return visitScalarFunction(topLevelDomain, context); + } + default R visitToQuantileState(ToQuantileState toQuantileState, C context) { return visitScalarFunction(toQuantileState, context); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/PlanType.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/PlanType.java index 73ea61fcface45..b87dfaf08ae497 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/PlanType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/PlanType.java @@ -75,6 +75,7 @@ public enum PlanType { LOGICAL_MULTI_JOIN, LOGICAL_PARTITION_TOP_N, LOGICAL_PROJECT, + LOGICAL_QUALIFY, LOGICAL_REPEAT, LOGICAL_SELECT_HINT, LOGICAL_SUBQUERY_ALIAS, @@ -165,6 +166,10 @@ public enum PlanType { DROP_CATALOG_RECYCLE_BIN_COMMAND, UNSUPPORTED_COMMAND, CREATE_TABLE_LIKE_COMMAND, + SET_OPTIONS_COMMAND, + SET_TRANSACTION_COMMAND, + SET_USER_PROPERTIES_COMMAND, + SET_DEFAULT_STORAGE_VAULT_COMMAND, PREPARED_COMMAND, EXECUTE_COMMAND, diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/Forward.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/Forward.java index 1f8f0a33eb5773..c175ccc820e28b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/Forward.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/Forward.java @@ -18,10 +18,13 @@ package org.apache.doris.nereids.trees.plans.commands; import org.apache.doris.analysis.RedirectStatus; +import org.apache.doris.qe.ConnectContext; /** * forward to master. */ public interface Forward { RedirectStatus toRedirectStatus(); + + default void afterForwardToMaster(ConnectContext ctx) throws Exception {} } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NeedAuditEncryption.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NeedAuditEncryption.java new file mode 100644 index 00000000000000..df1c22ffe531fd --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/NeedAuditEncryption.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands; + +/** + * NeedAuditEncryption + */ +public interface NeedAuditEncryption { + + boolean needAuditEncryption(); + + String toSql(); + +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetDefaultStorageVaultCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetDefaultStorageVaultCommand.java new file mode 100644 index 00000000000000..8251f923f34462 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetDefaultStorageVaultCommand.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands; + +import org.apache.doris.analysis.StmtType; +import org.apache.doris.catalog.Env; +import org.apache.doris.cloud.catalog.CloudEnv; +import org.apache.doris.common.Config; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.nereids.exceptions.AnalysisException; +import org.apache.doris.nereids.trees.plans.PlanType; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.StmtExecutor; + +/** + * SetDefaultStorageVaultCommand + */ +public class SetDefaultStorageVaultCommand extends Command implements ForwardWithSync { + private final String vaultName; + + public SetDefaultStorageVaultCommand(String vaultName) { + super(PlanType.SET_DEFAULT_STORAGE_VAULT_COMMAND); + this.vaultName = vaultName; + } + + @Override + public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { + if (Config.isNotCloudMode()) { + throw new AnalysisException("Storage Vault is only supported for cloud mode"); + } + if (!FeConstants.runningUnitTest) { + // In legacy cloud mode, some s3 back-ended storage does need to use storage vault. + if (!((CloudEnv) Env.getCurrentEnv()).getEnableStorageVault()) { + throw new AnalysisException("Your cloud instance doesn't support storage vault"); + } + } + + // check auth + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + ctx.getEnv().getStorageVaultMgr().setDefaultStorageVault(vaultName); + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitSetDefaultStorageVault(this, context); + } + + @Override + public StmtType stmtType() { + return StmtType.SET; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetOptionsCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetOptionsCommand.java new file mode 100644 index 00000000000000..d3e280d541d210 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetOptionsCommand.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands; + +import org.apache.doris.analysis.RedirectStatus; +import org.apache.doris.analysis.SetType; +import org.apache.doris.analysis.StmtType; +import org.apache.doris.nereids.trees.plans.PlanType; +import org.apache.doris.nereids.trees.plans.commands.info.SetLdapPassVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetPassVarOp; +import org.apache.doris.nereids.trees.plans.commands.info.SetVarOp; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.StmtExecutor; + +import java.util.List; + +/** + * SetOptionsCommand + */ +public class SetOptionsCommand extends Command implements Forward, NeedAuditEncryption { + private final List setVarOpList; + + public SetOptionsCommand(List setVarOpList) { + super(PlanType.SET_OPTIONS_COMMAND); + this.setVarOpList = setVarOpList; + } + + @Override + public RedirectStatus toRedirectStatus() { + for (SetVarOp varOp : setVarOpList) { + if (varOp.getType() == SetType.GLOBAL || varOp instanceof SetPassVarOp + || varOp instanceof SetLdapPassVarOp) { + return RedirectStatus.FORWARD_WITH_SYNC; + } + } + return RedirectStatus.NO_FORWARD; + } + + @Override + public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { + for (SetVarOp varOp : setVarOpList) { + varOp.validate(ctx); + varOp.run(ctx); + } + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitSetOptionsCommand(this, context); + } + + @Override + public StmtType stmtType() { + return StmtType.SET; + } + + @Override + public boolean needAuditEncryption() { + for (SetVarOp setVarOp : setVarOpList) { + if (setVarOp.needAuditEncryption()) { + return true; + } + } + return false; + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("SET "); + int idx = 0; + for (SetVarOp variableInfo : setVarOpList) { + if (idx != 0) { + sb.append(", "); + } + sb.append(variableInfo.toSql()); + idx++; + } + return sb.toString(); + } + + @Override + public void afterForwardToMaster(ConnectContext ctx) throws Exception { + for (SetVarOp varOp : setVarOpList) { + varOp.validate(ctx); + varOp.afterForwardToMaster(ctx); + } + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetTransactionCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetTransactionCommand.java new file mode 100644 index 00000000000000..cd1e13fab79461 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetTransactionCommand.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands; + +import org.apache.doris.analysis.StmtType; +import org.apache.doris.nereids.trees.plans.PlanType; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.StmtExecutor; + +/** + * SetTransactionCommand + */ +public class SetTransactionCommand extends Command implements ForwardWithSync { + public SetTransactionCommand() { + super(PlanType.SET_TRANSACTION_COMMAND); + } + + @Override + public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { + // do nothing + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitSetTransactionCommand(this, context); + } + + @Override + public StmtType stmtType() { + return StmtType.SET; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetUserPropertiesCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetUserPropertiesCommand.java new file mode 100644 index 00000000000000..f3921decf69fe6 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/SetUserPropertiesCommand.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands; + +import org.apache.doris.analysis.StmtType; +import org.apache.doris.common.Pair; +import org.apache.doris.nereids.trees.plans.PlanType; +import org.apache.doris.nereids.trees.plans.commands.info.SetUserPropertyVarOp; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.StmtExecutor; + +import java.util.ArrayList; +import java.util.List; + +/** + * SetUserPropertiesCommand + */ +public class SetUserPropertiesCommand extends Command implements ForwardWithSync { + private final String user; + private final List setUserPropertyVarOpList; + + public SetUserPropertiesCommand(String user, List setUserPropertyVarOpList) { + super(PlanType.SET_OPTIONS_COMMAND); + this.user = user != null ? user : ConnectContext.get().getQualifiedUser(); + this.setUserPropertyVarOpList = setUserPropertyVarOpList; + } + + @Override + public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { + List> properties = new ArrayList<>(setUserPropertyVarOpList.size()); + for (SetUserPropertyVarOp op : setUserPropertyVarOpList) { + op.validate(ctx); + properties.add(Pair.of(op.getPropertyKey(), op.getPropertyValue())); + } + ctx.getEnv().getAuth().updateUserPropertyInternal(user, properties, false); + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitSetUserPropertiesCommand(this, context); + } + + @Override + public StmtType stmtType() { + return StmtType.SET; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetCharsetAndCollateVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetCharsetAndCollateVarOp.java new file mode 100644 index 00000000000000..bdbf9d05232f77 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetCharsetAndCollateVarOp.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.SetType; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.base.Strings; + +/** + * SetCharsetAndCollateVarOp + */ +public class SetCharsetAndCollateVarOp extends SetVarOp { + private static final String DEFAULT_NAMES = "utf8"; + private String charset; + private String collate; + + public SetCharsetAndCollateVarOp(String charsetName) { + this(charsetName, null); + } + + public SetCharsetAndCollateVarOp(String charsetName, String collate) { + super(SetType.DEFAULT); + this.charset = charsetName; + this.collate = collate; + } + + @Override + public void run(ConnectContext ctx) throws Exception { + // do nothing + } + + @Override + public void validate(ConnectContext ctx) throws UserException { + if (Strings.isNullOrEmpty(charset)) { + charset = DEFAULT_NAMES; + } else { + charset = charset.toLowerCase(); + } + // utf8-superset transform to utf8 + if (charset.startsWith(DEFAULT_NAMES)) { + charset = DEFAULT_NAMES; + } + + if (!charset.equalsIgnoreCase(DEFAULT_NAMES)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_UNKNOWN_CHARACTER_SET, charset); + } + } + + @Override + public String toSql() { + return "NAMES '" + charset + "' COLLATE " + + (Strings.isNullOrEmpty(collate) ? "DEFAULT" : "'" + collate.toLowerCase() + "'"); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetLdapPassVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetLdapPassVarOp.java new file mode 100644 index 00000000000000..df3b42b97a5f7b --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetLdapPassVarOp.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.PassVar; +import org.apache.doris.analysis.SetType; +import org.apache.doris.catalog.Env; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +/** + * SetLdapPassVarOp + */ +public class SetLdapPassVarOp extends SetVarOp { + private final PassVar passVar; + + public SetLdapPassVarOp(PassVar passVar) { + super(SetType.DEFAULT); + this.passVar = passVar; + } + + public String getLdapPassword() { + return passVar.getText(); + } + + @Override + public void validate(ConnectContext ctx) throws UserException { + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ctx, PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, + PrivPredicate.ADMIN.getPrivs().toString()); + } + if (!passVar.isPlain()) { + throw new AnalysisException("Only support set ldap password with plain text"); + } + passVar.analyze(); + } + + @Override + public void run(ConnectContext ctx) throws Exception { + ctx.getEnv().getAuth().setLdapPassword(passVar.getText()); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder("SET LDAP_ADMIN_PASSWORD"); + sb.append(" = '*XXX'"); + return sb.toString(); + } + + @Override + public boolean needAuditEncryption() { + return true; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/scheduler/registry/ExportTaskRegister.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetNamesVarOp.java similarity index 52% rename from fe/fe-core/src/main/java/org/apache/doris/scheduler/registry/ExportTaskRegister.java rename to fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetNamesVarOp.java index 0241f57fea0026..e247fe5ce27464 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/scheduler/registry/ExportTaskRegister.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetNamesVarOp.java @@ -15,26 +15,21 @@ // specific language governing permissions and limitations // under the License. -package org.apache.doris.scheduler.registry; +package org.apache.doris.nereids.trees.plans.commands.info; -import org.apache.doris.scheduler.exception.JobException; -import org.apache.doris.scheduler.executor.TransientTaskExecutor; -import org.apache.doris.scheduler.manager.TransientTaskManager; +import org.apache.doris.analysis.SetType; +import org.apache.doris.qe.ConnectContext; -public class ExportTaskRegister implements TransientTaskRegister { - private final TransientTaskManager transientTaskManager; - - public ExportTaskRegister(TransientTaskManager transientTaskManager) { - this.transientTaskManager = transientTaskManager; - } - - @Override - public Long registerTask(TransientTaskExecutor executor) { - return transientTaskManager.addMemoryTask(executor); +/** + * SetNamesVarOp + */ +public class SetNamesVarOp extends SetVarOp { + public SetNamesVarOp() { + super(SetType.DEFAULT); } @Override - public void cancelTask(Long taskId) throws JobException { - transientTaskManager.cancelMemoryTask(taskId); + public void run(ConnectContext ctx) throws Exception { + // do nothing } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetPassVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetPassVarOp.java new file mode 100644 index 00000000000000..718c9acd4b096c --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetPassVarOp.java @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.PassVar; +import org.apache.doris.analysis.SetType; +import org.apache.doris.analysis.UserIdentity; +import org.apache.doris.catalog.Env; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.Auth; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +/** + * SetPassVarOp + */ +public class SetPassVarOp extends SetVarOp { + private UserIdentity userIdent; + private PassVar passVar; + + // The password in parameter is a hashed password. + public SetPassVarOp(UserIdentity userIdent, PassVar passVar) { + super(SetType.DEFAULT); + this.userIdent = userIdent; + this.passVar = passVar; + } + + @Override + public void validate(ConnectContext ctx) throws UserException { + boolean isSelf = false; + if (userIdent == null) { + // set userIdent as what current_user() returns + userIdent = ctx.getCurrentUserIdentity(); + isSelf = true; + } else { + userIdent.analyze(); + if (userIdent.equals(ctx.getCurrentUserIdentity())) { + isSelf = true; + } + } + + // Check password + if (passVar != null) { + passVar.analyze(); + } + + // check privs. + // 1. this is user itself + if (isSelf) { + return; + } + + // 2. No user can set password for root expect for root user itself + if (userIdent.getQualifiedUser().equals(Auth.ROOT_USER)) { + throw new AnalysisException("Can not set password for root user, except root itself"); + } + + // 3. user has grant privs + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ctx, PrivPredicate.GRANT)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); + } + } + + @Override + public void run(ConnectContext ctx) throws Exception { + ctx.getEnv().getAuth().setPassword(userIdent, passVar.getScrambled()); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder("SET PASSWORD"); + if (userIdent != null) { + sb.append(" FOR ").append(userIdent); + } + sb.append(" = '*XXX'"); + return sb.toString(); + } + + @Override + public boolean needAuditEncryption() { + return true; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetSessionVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetSessionVarOp.java new file mode 100644 index 00000000000000..5ecb175a67f0f0 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetSessionVarOp.java @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.Expr; +import org.apache.doris.analysis.SetType; +import org.apache.doris.analysis.SetVar; +import org.apache.doris.catalog.Env; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.common.util.ParseUtil; +import org.apache.doris.common.util.TimeUtils; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.nereids.CascadesContext; +import org.apache.doris.nereids.exceptions.AnalysisException; +import org.apache.doris.nereids.glue.translator.ExpressionTranslator; +import org.apache.doris.nereids.glue.translator.PlanTranslatorContext; +import org.apache.doris.nereids.properties.PhysicalProperties; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.literal.Literal; +import org.apache.doris.nereids.trees.expressions.literal.StringLiteral; +import org.apache.doris.nereids.trees.plans.logical.LogicalEmptyRelation; +import org.apache.doris.nereids.util.ExpressionUtils; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.GlobalVariable; +import org.apache.doris.qe.SessionVariable; +import org.apache.doris.qe.VariableMgr; +import org.apache.doris.system.HeartbeatFlags; + +import java.util.ArrayList; + +/** + * SetSessionVarOp + */ +public class SetSessionVarOp extends SetVarOp { + private String name; + private final Expression expression; + private Literal value; + private final boolean isDefault; + + /** constructor*/ + public SetSessionVarOp(SetType type, String name, Expression expression) { + super(type); + this.name = name; + this.expression = expression; + this.isDefault = expression == null; + } + + @Override + public void validate(ConnectContext ctx) throws UserException { + if (isDefault) { + value = new StringLiteral("default"); + return; + } + value = ExpressionUtils.analyzeAndFoldToLiteral(ctx, expression); + + if (getType() == SetType.GLOBAL) { + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + } + + if (name.equalsIgnoreCase(GlobalVariable.DEFAULT_ROWSET_TYPE)) { + if (value != null && !HeartbeatFlags.isValidRowsetType(value.getStringValue())) { + throw new AnalysisException("Invalid rowset type, now we support {alpha, beta}."); + } + } + + if (name.equalsIgnoreCase(SessionVariable.PREFER_JOIN_METHOD)) { + String val = value.getStringValue(); + if (!val.equalsIgnoreCase("broadcast") && !val.equalsIgnoreCase("shuffle")) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, + SessionVariable.PREFER_JOIN_METHOD, val); + } + } + + // Check variable time_zone value is valid + if (name.equalsIgnoreCase(SessionVariable.TIME_ZONE)) { + this.value = new StringLiteral(TimeUtils.checkTimeZoneValidAndStandardize(value.getStringValue())); + } + + if (name.equalsIgnoreCase(SessionVariable.EXEC_MEM_LIMIT) + || name.equalsIgnoreCase(SessionVariable.SCAN_QUEUE_MEM_LIMIT)) { + this.value = new StringLiteral(Long.toString(ParseUtil.analyzeDataVolume(value.getStringValue()))); + } + + if (name.equalsIgnoreCase(SessionVariable.FILE_SPLIT_SIZE)) { + try { + this.value = new StringLiteral( + Long.toString(ParseUtil.analyzeDataVolume(value.getStringValue()))); + } catch (Throwable t) { + // The way of handling file_split_size should be same as exec_mem_limit or scan_queue_mem_limit. + // But ParseUtil.analyzeDataVolume() does not accept 0 as a valid value. + // So for compatibility, we set origin value to file_split_size + // when the value is 0 or other invalid value. + this.value = new StringLiteral(value.getStringValue()); + } + } + + if (name.equalsIgnoreCase("is_report_success")) { + name = SessionVariable.ENABLE_PROFILE; + } + } + + public void run(ConnectContext ctx) throws Exception { + VariableMgr.setVar(ctx.getSessionVariable(), translateToLegacyVar(ctx)); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append(getType().toSql()); + sb.append(" ").append(name).append(" = ").append(value.toSql()); + return sb.toString(); + } + + public void afterForwardToMaster(ConnectContext ctx) throws Exception { + setType(SetType.SESSION); + VariableMgr.setVarForNonMasterFE(ctx.getSessionVariable(), translateToLegacyVar(ctx)); + } + + // TODO delete this method after removing dependence of SetVar in VariableMgr + private SetVar translateToLegacyVar(ConnectContext ctx) { + if (isDefault) { + return new SetVar(getType(), name, null); + } else { + LogicalEmptyRelation plan = new LogicalEmptyRelation( + ConnectContext.get().getStatementContext().getNextRelationId(), new ArrayList<>()); + CascadesContext cascadesContext = CascadesContext.initContext(ctx.getStatementContext(), plan, + PhysicalProperties.ANY); + Expr expr = ExpressionTranslator.translate(value, new PlanTranslatorContext(cascadesContext)); + return new SetVar(getType(), name, expr); + } + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserDefinedVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserDefinedVarOp.java new file mode 100644 index 00000000000000..4cb32d6ec18666 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserDefinedVarOp.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.SetType; +import org.apache.doris.common.UserException; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.literal.Literal; +import org.apache.doris.nereids.util.ExpressionUtils; +import org.apache.doris.qe.ConnectContext; + +/** + * SetUserDefinedVarOp + */ +public class SetUserDefinedVarOp extends SetVarOp { + private final String name; + private final Expression expression; + private Literal value; + + public SetUserDefinedVarOp(String name, Expression expression) { + super(SetType.USER); + this.name = name; + this.expression = expression; + } + + @Override + public void validate(ConnectContext ctx) throws UserException { + value = ExpressionUtils.analyzeAndFoldToLiteral(ctx, expression); + } + + @Override + public void run(ConnectContext ctx) throws Exception { + ctx.setUserVar(name, value.toLegacyLiteral()); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserPropertyVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserPropertyVarOp.java new file mode 100644 index 00000000000000..12db9218a1676e --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetUserPropertyVarOp.java @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.catalog.Env; +import org.apache.doris.cloud.system.CloudSystemInfoService; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.Config; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.mysql.privilege.UserProperty; +import org.apache.doris.qe.ConnectContext; + +import com.google.common.base.Strings; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * SetUserPropertyVarOp + */ +public class SetUserPropertyVarOp { + private final String user; + private final String key; + private final String value; + + public SetUserPropertyVarOp(String user, String key, String value) { + this.key = key; + this.value = value; + this.user = user != null ? user : ConnectContext.get().getQualifiedUser(); + } + + public String getPropertyKey() { + return key; + } + + public String getPropertyValue() { + return value; + } + + /**validate*/ + public void validate(ConnectContext ctx) throws UserException { + if (Strings.isNullOrEmpty(key)) { + throw new AnalysisException("User property key is null"); + } + + if (value == null) { + throw new AnalysisException("User property value is null"); + } + + for (Pattern advPattern : UserProperty.ADVANCED_PROPERTIES) { + Matcher matcher = advPattern.matcher(key); + if (matcher.find()) { + if (!Env.getCurrentEnv().getAccessManager() + .checkGlobalPriv(ctx, PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + return; + } + } + boolean isSelf = user.equals(ctx.getQualifiedUser()); + for (Pattern commPattern : UserProperty.COMMON_PROPERTIES) { + Matcher matcher = commPattern.matcher(key); + if (matcher.find()) { + if (!isSelf && !Env.getCurrentEnv().getAccessManager().checkGlobalPriv(ctx, + PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, + "GRANT"); + } + if (Config.isCloudMode()) { + // check value, clusterName is valid. + if (key.equals(UserProperty.DEFAULT_CLOUD_CLUSTER) + && !Strings.isNullOrEmpty(value) + && !((CloudSystemInfoService) Env.getCurrentSystemInfo()) + .getCloudClusterNames().contains(value)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_CLOUD_CLUSTER_ERROR, value); + } + + if (key.equals(UserProperty.DEFAULT_COMPUTE_GROUP) + && !Strings.isNullOrEmpty(value) + && !((CloudSystemInfoService) Env.getCurrentSystemInfo()) + .getCloudClusterNames().contains(value)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_CLOUD_CLUSTER_ERROR, value); + } + } + return; + } + } + + throw new AnalysisException("Unknown property key: " + key); + } + + /**toSql*/ + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("'"); + sb.append(key); + sb.append("' = "); + if (value != null) { + sb.append("'"); + sb.append(value); + sb.append("'"); + } else { + sb.append("NULL"); + } + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetVarOp.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetVarOp.java new file mode 100644 index 00000000000000..4fff13ca786422 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/info/SetVarOp.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.commands.info; + +import org.apache.doris.analysis.SetType; +import org.apache.doris.common.UserException; +import org.apache.doris.qe.ConnectContext; + +/** + * SetVarOp + */ +public abstract class SetVarOp { + private SetType type; + + /** constructor*/ + public SetVarOp(SetType type) { + this.type = type; + } + + /**validate*/ + public void validate(ConnectContext ctx) throws UserException {} + + public void run(ConnectContext ctx) throws Exception {} + + public SetType getType() { + return type; + } + + public void setType(SetType type) { + this.type = type; + } + + public String toSql() { + return ""; + } + + public boolean needAuditEncryption() { + return false; + } + + public void afterForwardToMaster(ConnectContext ctx) throws Exception {} +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java index 064fccaf521029..c89a4fc7be96ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/insert/InsertOverwriteTableCommand.java @@ -142,6 +142,7 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { PhysicalTableSink physicalTableSink = ((PhysicalTableSink) plan.get()); TableIf targetTable = physicalTableSink.getTargetTable(); List partitionNames; + boolean wholeTable = false; if (physicalTableSink instanceof PhysicalOlapTableSink) { InternalDatabaseUtil .checkDatabase(((OlapTable) targetTable).getQualifiedDbName(), ConnectContext.get()); @@ -156,7 +157,10 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { } ConnectContext.get().setSkipAuth(true); partitionNames = ((UnboundTableSink) logicalQuery).getPartitions(); + // If not specific partition to overwrite, means it's a command to overwrite the table. + // not we execute as overwrite every partitions. if (CollectionUtils.isEmpty(partitionNames)) { + wholeTable = true; partitionNames = Lists.newArrayList(targetTable.getPartitionNames()); } } else { @@ -174,9 +178,10 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { // When inserting, BE will call to replace partition by FrontendService. FE will register new temp // partitions and return. for transactional, the replacement will really occur when insert successed, // i.e. `insertInto` finished. then we call taskGroupSuccess to make replacement. - insertInto(ctx, executor, taskId); + insertIntoAutoDetect(ctx, executor, taskId); insertOverwriteManager.taskGroupSuccess(taskId, (OlapTable) targetTable); } else { + // it's overwrite table(as all partitions) or specific partition(s) List tempPartitionNames = InsertOverwriteUtil.generateTempPartitionNames(partitionNames); if (isCancelled.get()) { LOG.info("insert overwrite is cancelled before registerTask, queryId: {}", @@ -198,7 +203,7 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { insertOverwriteManager.taskFail(taskId); return; } - insertInto(ctx, executor, tempPartitionNames); + insertIntoPartitions(ctx, executor, tempPartitionNames, wholeTable); if (isCancelled.get()) { LOG.info("insert overwrite is cancelled before replacePartition, queryId: {}", ctx.getQueryIdentifier()); @@ -269,13 +274,15 @@ private void runInsertCommand(LogicalPlan logicalQuery, InsertCommandContext ins } /** - * insert into select. for sepecified temp partitions + * insert into select. for sepecified temp partitions or all partitions(table). * - * @param ctx ctx - * @param executor executor + * @param ctx ctx + * @param executor executor * @param tempPartitionNames tempPartitionNames + * @param wholeTable overwrite target is the whole table. not one by one by partitions(...) */ - private void insertInto(ConnectContext ctx, StmtExecutor executor, List tempPartitionNames) + private void insertIntoPartitions(ConnectContext ctx, StmtExecutor executor, List tempPartitionNames, + boolean wholeTable) throws Exception { // copy sink tot replace by tempPartitions UnboundLogicalSink copySink; @@ -291,9 +298,10 @@ private void insertInto(ConnectContext ctx, StmtExecutor executor, List sink.isPartialUpdate(), sink.getDMLCommandType(), (LogicalPlan) (sink.child(0))); - // 1. for overwrite situation, we disable auto create partition. + // 1. when overwrite table, allow auto partition or not is controlled by session variable. // 2. we save and pass overwrite auto detect by insertCtx - insertCtx = new OlapInsertCommandContext(false, true); + boolean allowAutoPartition = wholeTable && ctx.getSessionVariable().isEnableAutoCreateWhenOverwrite(); + insertCtx = new OlapInsertCommandContext(allowAutoPartition, true); } else if (logicalQuery instanceof UnboundHiveTableSink) { UnboundHiveTableSink sink = (UnboundHiveTableSink) logicalQuery; copySink = (UnboundLogicalSink) UnboundTableSinkCreator.createUnboundTableSink( @@ -332,12 +340,13 @@ private void insertInto(ConnectContext ctx, StmtExecutor executor, List * @param ctx ctx * @param executor executor */ - private void insertInto(ConnectContext ctx, StmtExecutor executor, long groupId) throws Exception { - // 1. for overwrite situation, we disable auto create partition. - // 2. we save and pass overwrite auto-detected by insertCtx + private void insertIntoAutoDetect(ConnectContext ctx, StmtExecutor executor, long groupId) throws Exception { InsertCommandContext insertCtx; if (logicalQuery instanceof UnboundTableSink) { - insertCtx = new OlapInsertCommandContext(false, + // 1. when overwrite auto-detect, allow auto partition or not is controlled by session variable. + // 2. we save and pass overwrite auto detect by insertCtx + boolean allowAutoPartition = ctx.getSessionVariable().isEnableAutoCreateWhenOverwrite(); + insertCtx = new OlapInsertCommandContext(allowAutoPartition, ((UnboundTableSink) logicalQuery).isAutoDetectPartition(), groupId, true); } else if (logicalQuery instanceof UnboundHiveTableSink) { insertCtx = new HiveInsertCommandContext(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalQualify.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalQualify.java new file mode 100644 index 00000000000000..ced6730dfb54a2 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalQualify.java @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.trees.plans.logical; + +import org.apache.doris.common.Pair; +import org.apache.doris.nereids.memo.GroupExpression; +import org.apache.doris.nereids.properties.DataTrait.Builder; +import org.apache.doris.nereids.properties.LogicalProperties; +import org.apache.doris.nereids.trees.expressions.Expression; +import org.apache.doris.nereids.trees.expressions.Slot; +import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.PlanType; +import org.apache.doris.nereids.trees.plans.algebra.Filter; +import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; +import org.apache.doris.nereids.util.ExpressionUtils; +import org.apache.doris.nereids.util.Utils; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +/** + * Logical qualify plan. + */ +public class LogicalQualify extends LogicalUnary implements Filter { + + private final Set conjuncts; + + public LogicalQualify(Set conjuncts, CHILD_TYPE child) { + this(conjuncts, Optional.empty(), Optional.empty(), child); + } + + private LogicalQualify(Set conjuncts, Optional groupExpression, + Optional logicalProperties, CHILD_TYPE child) { + super(PlanType.LOGICAL_QUALIFY, groupExpression, logicalProperties, child); + this.conjuncts = ImmutableSet.copyOf(Objects.requireNonNull(conjuncts, "conjuncts can not be null")); + } + + @Override + public Set getConjuncts() { + return conjuncts; + } + + @Override + public List computeOutput() { + return child().getOutput(); + } + + @Override + public Plan withGroupExpression(Optional groupExpression) { + return new LogicalQualify<>(conjuncts, groupExpression, Optional.of(getLogicalProperties()), child()); + } + + @Override + public Plan withGroupExprLogicalPropChildren(Optional groupExpression, + Optional logicalProperties, List children) { + Preconditions.checkArgument(children.size() == 1); + return new LogicalQualify<>(conjuncts, groupExpression, logicalProperties, children.get(0)); + } + + public LogicalQualify withConjuncts(Set conjuncts) { + return new LogicalQualify<>(conjuncts, Optional.empty(), Optional.of(getLogicalProperties()), child()); + } + + @Override + public String toString() { + return Utils.toSqlString("LogicalQualify[" + id.asInt() + "]", + "predicates", getPredicate() + ); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LogicalQualify that = (LogicalQualify) o; + return conjuncts.equals(that.conjuncts); + } + + @Override + public int hashCode() { + return Objects.hash(conjuncts); + } + + @Override + public R accept(PlanVisitor visitor, C context) { + return visitor.visitLogicalQualify(this, context); + } + + @Override + public List getExpressions() { + return ImmutableList.copyOf(conjuncts); + } + + @Override + public LogicalQualify withChildren(List children) { + Preconditions.checkArgument(children.size() == 1); + return new LogicalQualify<>(conjuncts, children.get(0)); + } + + @Override + public void computeUnique(Builder builder) { + builder.addUniqueSlot(child(0).getLogicalProperties().getTrait()); + } + + @Override + public void computeUniform(Builder builder) { + for (Expression e : getConjuncts()) { + Set uniformSlots = ExpressionUtils.extractUniformSlot(e); + for (Slot slot : uniformSlots) { + builder.addUniformSlot(slot); + } + } + builder.addUniformSlot(child(0).getLogicalProperties().getTrait()); + } + + @Override + public void computeEqualSet(Builder builder) { + builder.addEqualSet(child().getLogicalProperties().getTrait()); + for (Expression expression : getConjuncts()) { + Optional> equalSlot = ExpressionUtils.extractEqualSlot(expression); + equalSlot.ifPresent(slotSlotPair -> builder.addEqualPair(slotSlotPair.first, slotSlotPair.second)); + } + } + + @Override + public void computeFd(Builder builder) { + builder.addFuncDepsDG(child().getLogicalProperties().getTrait()); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/CommandVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/CommandVisitor.java index f35e6f8a6400b3..03e2853ffa0d12 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/CommandVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/CommandVisitor.java @@ -43,6 +43,10 @@ import org.apache.doris.nereids.trees.plans.commands.RefreshMTMVCommand; import org.apache.doris.nereids.trees.plans.commands.ReplayCommand; import org.apache.doris.nereids.trees.plans.commands.ResumeMTMVCommand; +import org.apache.doris.nereids.trees.plans.commands.SetDefaultStorageVaultCommand; +import org.apache.doris.nereids.trees.plans.commands.SetOptionsCommand; +import org.apache.doris.nereids.trees.plans.commands.SetTransactionCommand; +import org.apache.doris.nereids.trees.plans.commands.SetUserPropertiesCommand; import org.apache.doris.nereids.trees.plans.commands.ShowConfigCommand; import org.apache.doris.nereids.trees.plans.commands.ShowConstraintsCommand; import org.apache.doris.nereids.trees.plans.commands.ShowCreateMTMVCommand; @@ -201,4 +205,20 @@ default R visitCreateTableLikeCommand(CreateTableLikeCommand createTableLikeComm default R visitShowConfigCommand(ShowConfigCommand showConfigCommand, C context) { return visitCommand(showConfigCommand, context); } + + default R visitSetOptionsCommand(SetOptionsCommand setOptionsCommand, C context) { + return visitCommand(setOptionsCommand, context); + } + + default R visitSetTransactionCommand(SetTransactionCommand setTransactionCommand, C context) { + return visitCommand(setTransactionCommand, context); + } + + default R visitSetUserPropertiesCommand(SetUserPropertiesCommand setUserPropertiesCommand, C context) { + return visitCommand(setUserPropertiesCommand, context); + } + + default R visitSetDefaultStorageVault(SetDefaultStorageVaultCommand setDefaultStorageVaultCommand, C context) { + return visitCommand(setDefaultStorageVaultCommand, context); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/PlanVisitor.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/PlanVisitor.java index 8db1407220cf08..396c6e4f26569f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/PlanVisitor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/visitor/PlanVisitor.java @@ -39,6 +39,7 @@ import org.apache.doris.nereids.trees.plans.logical.LogicalLimit; import org.apache.doris.nereids.trees.plans.logical.LogicalPartitionTopN; import org.apache.doris.nereids.trees.plans.logical.LogicalProject; +import org.apache.doris.nereids.trees.plans.logical.LogicalQualify; import org.apache.doris.nereids.trees.plans.logical.LogicalRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalRepeat; import org.apache.doris.nereids.trees.plans.logical.LogicalSelectHint; @@ -169,6 +170,10 @@ public R visitLogicalFilter(LogicalFilter filter, C context) { return visit(filter, context); } + public R visitLogicalQualify(LogicalQualify filter, C context) { + return visit(filter, context); + } + public R visitLogicalGenerate(LogicalGenerate generate, C context) { return visit(generate, context); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/ExpressionUtils.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/ExpressionUtils.java index 67734f66aa17c1..bf4d6e084795f1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/util/ExpressionUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/util/ExpressionUtils.java @@ -21,10 +21,18 @@ import org.apache.doris.common.MaterializedViewException; import org.apache.doris.common.NereidsException; import org.apache.doris.common.Pair; +import org.apache.doris.common.UserException; import org.apache.doris.nereids.CascadesContext; +import org.apache.doris.nereids.analyzer.Scope; +import org.apache.doris.nereids.analyzer.UnboundSlot; +import org.apache.doris.nereids.exceptions.AnalysisException; import org.apache.doris.nereids.properties.PhysicalProperties; +import org.apache.doris.nereids.rules.analysis.ExpressionAnalyzer; +import org.apache.doris.nereids.rules.expression.ExpressionRewrite; import org.apache.doris.nereids.rules.expression.ExpressionRewriteContext; +import org.apache.doris.nereids.rules.expression.ExpressionRuleExecutor; import org.apache.doris.nereids.rules.expression.rules.FoldConstantRule; +import org.apache.doris.nereids.rules.expression.rules.ReplaceVariableByLiteral; import org.apache.doris.nereids.trees.TreeNode; import org.apache.doris.nereids.trees.expressions.Alias; import org.apache.doris.nereids.trees.expressions.And; @@ -50,9 +58,11 @@ import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral; import org.apache.doris.nereids.trees.expressions.literal.Literal; import org.apache.doris.nereids.trees.expressions.literal.NullLiteral; +import org.apache.doris.nereids.trees.expressions.literal.StringLiteral; import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter; import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionVisitor; import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.logical.LogicalEmptyRelation; import org.apache.doris.nereids.trees.plans.logical.LogicalUnion; import org.apache.doris.nereids.trees.plans.visitor.ExpressionLineageReplacer; import org.apache.doris.nereids.types.BooleanType; @@ -962,4 +972,48 @@ public static boolean unionConstExprsSatisfyConjuncts(LogicalUnion union, Set()); + LogicalEmptyRelation plan = new LogicalEmptyRelation( + ConnectContext.get().getStatementContext().getNextRelationId(), + new ArrayList<>()); + CascadesContext cascadesContext = CascadesContext.initContext(ctx.getStatementContext(), plan, + PhysicalProperties.ANY); + ExpressionAnalyzer analyzer = new ExpressionAnalyzer(null, scope, cascadesContext, false, false); + Expression boundExpr = UnboundSlotRewriter.INSTANCE.rewrite(expression, null); + Expression analyzedExpr; + try { + analyzedExpr = analyzer.analyze(boundExpr, new ExpressionRewriteContext(cascadesContext)); + } catch (AnalysisException e) { + throw new UserException(expression + " must be constant value"); + } + ExpressionRewriteContext context = new ExpressionRewriteContext(cascadesContext); + ExpressionRuleExecutor executor = new ExpressionRuleExecutor(ImmutableList.of( + ExpressionRewrite.bottomUp(ReplaceVariableByLiteral.INSTANCE) + )); + Expression rewrittenExpression = executor.rewrite(analyzedExpr, context); + Expression foldExpression = FoldConstantRule.evaluate(rewrittenExpression, context); + if (foldExpression instanceof Literal) { + return (Literal) foldExpression; + } else { + throw new UserException(expression + " must be constant value"); + } + } + + private static class UnboundSlotRewriter extends DefaultExpressionRewriter { + public static final UnboundSlotRewriter INSTANCE = new UnboundSlotRewriter(); + + public Expression rewrite(Expression e, Void ctx) { + return e.accept(this, ctx); + } + + @Override + public Expression visitUnboundSlot(UnboundSlot unboundSlot, Void ctx) { + // set exec_mem_limit=21G, '21G' will be parsed as unbound slot + // we need to rewrite it to String Literal '21G' + return new StringLiteral(unboundSlot.getName()); + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/BarrierLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/BarrierLog.java index ea849d217d740c..2b4245b290c850 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/BarrierLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/BarrierLog.java @@ -20,6 +20,7 @@ import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.thrift.TBinlogType; import com.google.gson.annotations.SerializedName; @@ -37,6 +38,11 @@ public class BarrierLog implements Writable { @SerializedName(value = "tableName") String tableName; + @SerializedName(value = "binlogType") + int binlogType; + @SerializedName(value = "binlog") + String binlog; + public BarrierLog() { } @@ -47,6 +53,28 @@ public BarrierLog(long dbId, String dbName, long tableId, String tableName) { this.tableName = tableName; } + // A trick: Wrap the binlog as part of the BarrierLog so that it can work in + // the old Doris version without breaking the compatibility. + public BarrierLog(long dbId, long tableId, TBinlogType binlogType, String binlog) { + this.dbId = dbId; + this.tableId = tableId; + this.binlogType = binlogType.getValue(); + this.binlog = binlog; + } + + public boolean hasBinlog() { + return binlog != null; + } + + public String getBinlog() { + return binlog; + } + + // null is returned if binlog is not set or binlogType is not recognized. + public TBinlogType getBinlogType() { + return binlog == null ? null : TBinlogType.findByValue(binlogType); + } + public long getDbId() { return dbId; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java index b30522e942592b..461f3ddd67d5a7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/DropInfo.java @@ -38,6 +38,8 @@ public class DropInfo implements Writable { private String tableName; // not used in equals and hashCode @SerializedName(value = "indexId") private long indexId; + @SerializedName(value = "isView") + private boolean isView = false; @SerializedName(value = "forceDrop") private boolean forceDrop = false; @SerializedName(value = "recycleTime") @@ -46,11 +48,13 @@ public class DropInfo implements Writable { public DropInfo() { } - public DropInfo(long dbId, long tableId, String tableName, long indexId, boolean forceDrop, long recycleTime) { + public DropInfo(long dbId, long tableId, String tableName, long indexId, boolean isView, boolean forceDrop, + long recycleTime) { this.dbId = dbId; this.tableId = tableId; this.tableName = tableName; this.indexId = indexId; + this.isView = isView; this.forceDrop = forceDrop; this.recycleTime = recycleTime; } @@ -71,12 +75,16 @@ public long getIndexId() { return this.indexId; } + public boolean isView() { + return this.isView; + } + public boolean isForceDrop() { - return forceDrop; + return this.forceDrop; } public Long getRecycleTime() { - return recycleTime; + return this.recycleTime; } @Override @@ -119,7 +127,7 @@ public boolean equals(Object obj) { DropInfo info = (DropInfo) obj; return (dbId == info.dbId) && (tableId == info.tableId) && (indexId == info.indexId) - && (forceDrop == info.forceDrop) && (recycleTime == info.recycleTime); + && (isView == info.isView) && (forceDrop == info.forceDrop) && (recycleTime == info.recycleTime); } public String toJson() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 982baf966be82d..5f94c95a2495e4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -323,6 +323,7 @@ public static void loadJournal(Env env, Long logId, JournalEntity journal) { case OperationType.OP_RENAME_COLUMN: { TableRenameColumnInfo info = (TableRenameColumnInfo) journal.getData(); env.replayRenameColumn(info); + env.getBinlogManager().addColumnRename(info, logId); break; } case OperationType.OP_BACKUP_JOB: { @@ -346,7 +347,7 @@ public static void loadJournal(Env env, Long logId, JournalEntity journal) { for (long indexId : batchDropInfo.getIndexIdSet()) { env.getMaterializedViewHandler().replayDropRollup( new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), - batchDropInfo.getTableName(), indexId, false, 0), + batchDropInfo.getTableName(), indexId, false, false, 0), env); } break; diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java index ae2fff4683581d..ee6f2f74eac43d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/gson/GsonUtils.java @@ -126,6 +126,7 @@ import org.apache.doris.cloud.load.CopyJob; import org.apache.doris.common.Config; import org.apache.doris.common.FeMetaVersion; +import org.apache.doris.common.io.Text; import org.apache.doris.common.util.RangeUtils; import org.apache.doris.datasource.CatalogIf; import org.apache.doris.datasource.ExternalDatabase; @@ -246,8 +247,13 @@ import org.apache.commons.lang3.reflect.TypeUtils; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; import java.io.DataInputStream; +import java.io.DataOutput; import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; @@ -257,6 +263,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; /* * Some utilities about Gson. @@ -978,4 +986,26 @@ public T read(JsonReader reader) throws IOException { } } + public static void toJsonCompressed(DataOutput out, Object src) throws IOException { + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); + try (GZIPOutputStream gzipStream = new GZIPOutputStream(byteStream)) { + try (OutputStreamWriter writer = new OutputStreamWriter(gzipStream)) { + GsonUtils.GSON.toJson(src, writer); + } + } + Text text = new Text(byteStream.toByteArray()); + text.write(out); + } + + public static T fromJsonCompressed(DataInput in, Class clazz) throws IOException { + Text text = new Text(); + text.readFields(in); + + ByteArrayInputStream byteStream = new ByteArrayInputStream(text.getBytes()); + try (GZIPInputStream gzipStream = new GZIPInputStream(byteStream)) { + try (InputStreamReader reader = new InputStreamReader(gzipStream)) { + return GsonUtils.GSON.fromJson(reader, clazz); + } + } + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java index c5a6ec55f6393a..0ebd023ed411ee 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java +++ b/fe/fe-core/src/main/java/org/apache/doris/planner/PlanFragment.java @@ -342,7 +342,6 @@ public TPlanFragment toThrift() { // TODO chenhao , calculated by cost result.setMinReservationBytes(0); result.setInitialReservationTotalClaims(0); - result.setUseSerialSource(useSerialSource(ConnectContext.get())); return result; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogHelper.java b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogHelper.java index 0bbc37cd47b132..2bd3cf9c335245 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogHelper.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/AuditLogHelper.java @@ -36,6 +36,7 @@ import org.apache.doris.nereids.analyzer.UnboundTableSink; import org.apache.doris.nereids.glue.LogicalPlanAdapter; import org.apache.doris.nereids.trees.plans.Plan; +import org.apache.doris.nereids.trees.plans.commands.NeedAuditEncryption; import org.apache.doris.nereids.trees.plans.commands.insert.InsertIntoTableCommand; import org.apache.doris.nereids.trees.plans.logical.LogicalInlineTable; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; @@ -259,10 +260,22 @@ private static void logAuditLogImpl(ConnectContext ctx, String origStmt, Stateme auditEventBuilder.setFeIp(FrontendOptions.getLocalHostAddress()); // We put origin query stmt at the end of audit log, for parsing the log more convenient. - if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { - auditEventBuilder.setStmt(parsedStmt.toSql()); + if (parsedStmt instanceof LogicalPlanAdapter) { + if (!ctx.getState().isQuery() && (parsedStmt != null + && (((LogicalPlanAdapter) parsedStmt).getLogicalPlan() instanceof NeedAuditEncryption) + && ((NeedAuditEncryption) ((LogicalPlanAdapter) parsedStmt).getLogicalPlan()) + .needAuditEncryption())) { + auditEventBuilder + .setStmt(((NeedAuditEncryption) ((LogicalPlanAdapter) parsedStmt).getLogicalPlan()).toSql()); + } else { + auditEventBuilder.setStmt(origStmt); + } } else { - auditEventBuilder.setStmt(origStmt); + if (!ctx.getState().isQuery() && (parsedStmt != null && parsedStmt.needAuditEncryption())) { + auditEventBuilder.setStmt(parsedStmt.toSql()); + } else { + auditEventBuilder.setStmt(origStmt); + } } auditEventBuilder.setStmtType(getStmtType(parsedStmt)); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java index 2493b8e6203476..cbae761596ddb2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java @@ -554,6 +554,10 @@ public void setUserVar(SetVar setVar) { userVars.put(setVar.getVariable().toLowerCase(), setVar.getResult()); } + public void setUserVar(String name, LiteralExpr value) { + userVars.put(name.toLowerCase(), value); + } + public @Nullable Literal getLiteralForUserVar(String varName) { varName = varName.toLowerCase(); if (userVars.containsKey(varName)) { @@ -1272,11 +1276,6 @@ public String getCloudCluster(boolean updateErr) throws ComputeGroupException { if (!Strings.isNullOrEmpty(defaultCluster)) { cluster = defaultCluster; choseWay = "default compute group"; - if (!Env.getCurrentEnv().getAuth().checkCloudPriv(getCurrentUserIdentity(), - cluster, PrivPredicate.USAGE, ResourceTypeEnum.CLUSTER)) { - throw new ComputeGroupException(String.format("default compute group %s check auth failed", cluster), - ComputeGroupException.FailedTypeEnum.CURRENT_USER_NO_AUTH_TO_USE_DEFAULT_COMPUTE_GROUP); - } } else { CloudClusterResult cloudClusterTypeAndName = getCloudClusterByPolicy(); if (cloudClusterTypeAndName != null && !Strings.isNullOrEmpty(cloudClusterTypeAndName.clusterName)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java index cc1f29b76c2b49..52ea334a14200f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java @@ -675,6 +675,12 @@ public class SessionVariable implements Serializable, Writable { "enable_cooldown_replica_affinity"; public static final String SKIP_CHECKING_ACID_VERSION_FILE = "skip_checking_acid_version_file"; + /** + * Inserting overwrite for auto partition table allows creating partition for + * datas which cannot find partition to overwrite. + */ + public static final String ENABLE_AUTO_CREATE_WHEN_OVERWRITE = "enable_auto_create_when_overwrite"; + /** * If set false, user couldn't submit analyze SQL and FE won't allocate any related resources. */ @@ -2170,7 +2176,6 @@ public void setIgnoreShapePlanNodes(String ignoreShapePlanNodes) { }) public boolean enableFallbackOnMissingInvertedIndex = true; - @VariableMgr.VarAttr(name = IN_LIST_VALUE_COUNT_THRESHOLD, description = { "in条件value数量大于这个threshold后将不会走fast_execute", "When the number of values in the IN condition exceeds this threshold," @@ -2210,6 +2215,14 @@ public void setIgnoreShapePlanNodes(String ignoreShapePlanNodes) { @VariableMgr.VarAttr(name = ENABLE_COOLDOWN_REPLICA_AFFINITY, needForward = true) public boolean enableCooldownReplicaAffinity = true; + @VariableMgr.VarAttr(name = ENABLE_AUTO_CREATE_WHEN_OVERWRITE, description = { + "开启后对自动分区表的 insert overwrite 操作会对没有找到分区的插入数据按自动分区规则创建分区,默认关闭", + "The insert overwrite operation on an auto-partitioned table will create partitions for inserted data" + + " for which no partition is found according to the auto-partitioning rules, which is turned off" + + " by default." + }) + public boolean enableAutoCreateWhenOverwrite = false; + @VariableMgr.VarAttr(name = SKIP_CHECKING_ACID_VERSION_FILE, needForward = true, description = { "跳过检查 transactional hive 版本文件 '_orc_acid_version.'", "Skip checking transactional hive version file '_orc_acid_version.'" @@ -3826,6 +3839,7 @@ public TQueryOptions toThrift() { tResult.setAdaptivePipelineTaskSerialReadOnLimit(adaptivePipelineTaskSerialReadOnLimit); tResult.setInListValueCountThreshold(inListValueCountThreshold); tResult.setEnablePhraseQuerySequentialOpt(enablePhraseQuerySequentialOpt); + tResult.setEnableAutoCreateWhenOverwrite(enableAutoCreateWhenOverwrite); return tResult; } @@ -4337,7 +4351,7 @@ public int getCreateTablePartitionMaxNum() { } public boolean isIgnoreStorageDataDistribution() { - return ignoreStorageDataDistribution && enableLocalShuffle; + return ignoreStorageDataDistribution && enableLocalShuffle && enableNereidsPlanner; } public void setIgnoreStorageDataDistribution(boolean ignoreStorageDataDistribution) { @@ -4375,7 +4389,7 @@ public boolean isEnableCountPushDownForExternalTable() { } public boolean isForceToLocalShuffle() { - return enableLocalShuffle && forceToLocalShuffle; + return enableLocalShuffle && forceToLocalShuffle && enableNereidsPlanner; } public void setForceToLocalShuffle(boolean forceToLocalShuffle) { @@ -4390,6 +4404,10 @@ public int getMaxMsgSizeOfResultReceiver() { return this.maxMsgSizeOfResultReceiver; } + public boolean isEnableAutoCreateWhenOverwrite() { + return this.enableAutoCreateWhenOverwrite; + } + public TSerdeDialect getSerdeDialect() { switch (serdeDialect) { case "doris": diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index 153823c2edc5a3..6c584f7255a5ca 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -3407,8 +3407,7 @@ private void handleShowStorageVault() throws AnalysisException { UserIdentity user = ctx.getCurrentUserIdentity(); rows = resp.getStorageVaultList().stream() .filter(storageVault -> auth.checkStorageVaultPriv(user, storageVault.getName(), - PrivPredicate.USAGE) - ) + PrivPredicate.USAGE)) .map(StorageVault::convertToShowStorageVaultProperties) .collect(Collectors.toList()); if (resp.hasDefaultStorageVaultId()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index eb5a6d7359ed39..494232ef38cb34 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -1200,7 +1200,13 @@ private void forwardToMaster() throws Exception { LOG.debug("need to transfer to Master. stmt: {}", context.getStmtId()); } masterOpExecutor.execute(); - if (parsedStmt instanceof SetStmt) { + if (parsedStmt instanceof LogicalPlanAdapter) { + // for nereids command + if (((LogicalPlanAdapter) parsedStmt).getLogicalPlan() instanceof Forward) { + Forward forward = (Forward) ((LogicalPlanAdapter) parsedStmt).getLogicalPlan(); + forward.afterForwardToMaster(context); + } + } else if (parsedStmt instanceof SetStmt) { SetStmt setStmt = (SetStmt) parsedStmt; setStmt.modifySetVarsForExecute(); for (SetVar var : setStmt.getSetVars()) { @@ -3121,7 +3127,7 @@ private void handleCtasRollback(TableName table) { } } - private void handleIotStmt() { + private void handleIotStmt() throws AnalysisException { ConnectContext.get().setSkipAuth(true); try { InsertOverwriteTableStmt iotStmt = (InsertOverwriteTableStmt) this.parsedStmt; @@ -3163,9 +3169,11 @@ private void handleOverwriteTable(InsertOverwriteTableStmt iotStmt) { return; } // after success create table insert data + // when overwrite table, allow auto partition or not is controlled by session variable. + boolean allowAutoPartition = context.getSessionVariable().isEnableAutoCreateWhenOverwrite(); try { parsedStmt = new NativeInsertStmt(tmpTableName, null, new LabelName(iotStmt.getDb(), iotStmt.getLabel()), - iotStmt.getQueryStmt(), iotStmt.getHints(), iotStmt.getCols(), true); + iotStmt.getQueryStmt(), iotStmt.getHints(), iotStmt.getCols(), allowAutoPartition); parsedStmt.setUserInfo(context.getCurrentUserIdentity()); execute(); if (MysqlStateType.ERR.equals(context.getState().getStateType())) { @@ -3235,6 +3243,7 @@ private void handleOverwritePartition(InsertOverwriteTableStmt iotStmt) { return; } // after success add tmp partitions + // when overwrite partition, auto creating is always disallowed. try { parsedStmt = new NativeInsertStmt(targetTableName, new PartitionNames(true, tempPartitionName), new LabelName(iotStmt.getDb(), iotStmt.getLabel()), iotStmt.getQueryStmt(), @@ -3277,24 +3286,9 @@ private void handleOverwritePartition(InsertOverwriteTableStmt iotStmt) { } } - /* - * TODO: support insert overwrite auto detect partition in legacy planner - */ - private void handleAutoOverwritePartition(InsertOverwriteTableStmt iotStmt) { - // TODO: - TableName targetTableName = new TableName(null, iotStmt.getDb(), iotStmt.getTbl()); - try { - parsedStmt = new NativeInsertStmt(targetTableName, null, new LabelName(iotStmt.getDb(), iotStmt.getLabel()), - iotStmt.getQueryStmt(), iotStmt.getHints(), iotStmt.getCols(), true).withAutoDetectOverwrite(); - parsedStmt.setUserInfo(context.getCurrentUserIdentity()); - execute(); - } catch (Exception e) { - LOG.warn("IOT insert data error, stmt={}", parsedStmt.toSql(), e); - context.getState().setError(ErrorCode.ERR_UNKNOWN_ERROR, "Unexpected exception: " + e.getMessage()); - handleIotRollback(targetTableName); - return; - } - + private void handleAutoOverwritePartition(InsertOverwriteTableStmt iotStmt) throws AnalysisException { + throw new AnalysisException( + "insert overwrite auto detect is not support in legacy planner. use nereids instead"); } private void handleIotRollback(TableName table) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/scheduler/disruptor/TaskHandler.java b/fe/fe-core/src/main/java/org/apache/doris/scheduler/disruptor/TaskHandler.java index de889c1b2e49d9..193f8ece9f7a2c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/scheduler/disruptor/TaskHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/scheduler/disruptor/TaskHandler.java @@ -68,6 +68,8 @@ public void onTransientTaskHandle(TaskEvent taskEvent) { taskExecutor.execute(); } catch (JobException e) { log.warn("Memory task execute failed, taskId: {}, msg : {}", taskId, e.getMessage()); + } finally { + transientTaskManager.removeMemoryTask(taskId); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/scheduler/manager/TransientTaskManager.java b/fe/fe-core/src/main/java/org/apache/doris/scheduler/manager/TransientTaskManager.java index 51edd4af318bb0..7461399c8eb0c5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/scheduler/manager/TransientTaskManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/scheduler/manager/TransientTaskManager.java @@ -22,10 +22,13 @@ import org.apache.doris.scheduler.executor.TransientTaskExecutor; import lombok.Setter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.util.concurrent.ConcurrentHashMap; public class TransientTaskManager { + private static final Logger LOG = LogManager.getLogger(TransientTaskManager.class); /** * key: taskId * value: memory task executor of this task @@ -57,10 +60,20 @@ public Long addMemoryTask(TransientTaskExecutor executor) { Long taskId = executor.getId(); taskExecutorMap.put(taskId, executor); disruptor.tryPublishTask(taskId); + LOG.info("add memory task, taskId: {}", taskId); return taskId; } public void cancelMemoryTask(Long taskId) throws JobException { - taskExecutorMap.get(taskId).cancel(); + try { + taskExecutorMap.get(taskId).cancel(); + } finally { + removeMemoryTask(taskId); + } + } + + public void removeMemoryTask(Long taskId) { + taskExecutorMap.remove(taskId); + LOG.info("remove memory task, taskId: {}", taskId); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java index f592a20f84df3c..cd1af82563d64c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java @@ -3026,7 +3026,7 @@ private TRestoreSnapshotResult restoreSnapshotImpl(TRestoreSnapshotRequest reque RestoreStmt restoreStmt = new RestoreStmt(label, repoName, restoreTableRefClause, properties, request.getMeta(), request.getJobInfo()); restoreStmt.setIsBeingSynced(); - LOG.trace("restore snapshot info, restoreStmt: {}", restoreStmt); + LOG.debug("restore snapshot info, restoreStmt: {}", restoreStmt); try { ConnectContext ctx = new ConnectContext(); ctx.setQualifiedUser(request.getUser()); @@ -3037,13 +3037,13 @@ private TRestoreSnapshotResult restoreSnapshotImpl(TRestoreSnapshotRequest reque restoreStmt.analyze(analyzer); DdlExecutor.execute(Env.getCurrentEnv(), restoreStmt); } catch (UserException e) { - LOG.warn("failed to restore: {}", e.getMessage(), e); + LOG.warn("failed to restore: {}, stmt: {}", e.getMessage(), restoreStmt, e); status.setStatusCode(TStatusCode.ANALYSIS_ERROR); - status.addToErrorMsgs(e.getMessage()); + status.addToErrorMsgs(e.getMessage() + ", stmt: " + restoreStmt.toString()); } catch (Throwable e) { - LOG.warn("catch unknown result.", e); + LOG.warn("catch unknown result. stmt: {}", restoreStmt, e); status.setStatusCode(TStatusCode.INTERNAL_ERROR); - status.addToErrorMsgs(Strings.nullToEmpty(e.getMessage())); + status.addToErrorMsgs(Strings.nullToEmpty(e.getMessage()) + ", stmt: " + restoreStmt.toString()); } finally { ConnectContext.remove(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java index acfd457d8a2108..0ab1fe004f6914 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/AnalysisJob.java @@ -154,7 +154,7 @@ protected void executeWithExceptionOnFail(StmtExecutor stmtExecutor) throws Exce + queryState.getErrorMessage()); } } finally { - AuditLogHelper.logAuditLog(stmtExecutor.getContext(), stmtExecutor.getOriginStmt().toString(), + AuditLogHelper.logAuditLog(stmtExecutor.getContext(), stmtExecutor.getOriginStmt().originStmt, stmtExecutor.getParsedStmt(), stmtExecutor.getQueryStatisticsForAuditLog(), true); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java index d51281eb0e667c..b0fc3b9c1cfab1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java +++ b/fe/fe-core/src/main/java/org/apache/doris/statistics/util/StatisticsUtil.java @@ -626,6 +626,7 @@ public static long getHiveRowCount(HMSExternalTable table) { long rows = Long.parseLong(parameters.get(NUM_ROWS)); // Sometimes, the NUM_ROWS in hms is 0 but actually is not. Need to check TOTAL_SIZE if NUM_ROWS is 0. if (rows != 0) { + LOG.info("Get row count {} for hive table {} in table parameters.", rows, table.getName()); return rows; } } @@ -639,9 +640,13 @@ public static long getHiveRowCount(HMSExternalTable table) { estimatedRowSize += column.getDataType().getSlotSize(); } if (estimatedRowSize == 0) { + LOG.warn("Hive table {} estimated row size is invalid {}", table.getName(), estimatedRowSize); return -1; } - return totalSize / estimatedRowSize; + long rows = totalSize / estimatedRowSize; + LOG.info("Get row count {} for hive table {} by total size {} and row size {}", + rows, table.getName(), totalSize, estimatedRowSize); + return rows; } /** diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java index b0304648b73471..974c0e0cae13a7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/Backend.java @@ -47,6 +47,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.security.SecureRandom; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -153,6 +154,8 @@ public class Backend implements Writable { // send some queries to this BE, it is not an important problem. private AtomicBoolean isShutDown = new AtomicBoolean(false); + private long nextForceEditlogHeartbeatTime = System.currentTimeMillis() + (new SecureRandom()).nextInt(60 * 1000); + public Backend() { this.host = ""; this.version = ""; @@ -797,13 +800,8 @@ public boolean equals(Object obj) { public String toString() { return "Backend [id=" + id + ", host=" + host + ", heartbeatPort=" + heartbeatPort + ", alive=" + isAlive.get() + ", lastStartTime=" + TimeUtils.longToTimeString(lastStartTime) + ", process epoch=" + lastStartTime - + ", isDecommissioned=" + isDecommissioned + ", tags: " + tagMap + "]"; - } - - public String getHealthyStatus() { - return "Backend [id=" + id + ", isDecommission: " + isDecommissioned - + ", backendStatus: " + backendStatus + ", isAlive: " + isAlive.get() + ", lastUpdateTime: " - + TimeUtils.longToTimeString(lastUpdateMs); + + ", isDecommissioned=" + isDecommissioned + ", tags: " + tagMap + "]" + + ", backendStatus: " + backendStatus; } /** @@ -881,7 +879,18 @@ public boolean handleHbResponse(BackendHbResponse hbResponse, boolean isReplay) heartbeatErrMsg = ""; this.heartbeatFailureCounter = 0; + + // even if no change, write an editlog to make lastUpdateMs in image update + if (System.currentTimeMillis() >= this.nextForceEditlogHeartbeatTime) { + isChanged = true; + int delaySecond = Config.editlog_healthy_heartbeat_seconds + (new SecureRandom()).nextInt(60); + this.nextForceEditlogHeartbeatTime = System.currentTimeMillis() + delaySecond * 1000L; + } } else { + // for a bad BackendHbResponse, its hbTime is last succ hbTime, not this hbTime + if (hbResponse.getHbTime() > 0) { + this.lastUpdateMs = hbResponse.getHbTime(); + } // Only set backend to dead if the heartbeat failure counter exceed threshold. // And if it is a replay process, must set backend to dead. if (isReplay || ++this.heartbeatFailureCounter >= Config.max_backend_heartbeat_failure_tolerance_count) { @@ -945,7 +954,9 @@ public class BackendStatus { public String toString() { return "[" + "lastSuccessReportTabletsTime='" + lastSuccessReportTabletsTime + '\'' + ", lastStreamLoadTime=" + lastStreamLoadTime + ", isQueryDisabled=" + isQueryDisabled - + ", isLoadDisabled=" + isLoadDisabled + "]"; + + ", isLoadDisabled=" + isLoadDisabled + + ", currentFragmentNum=" + currentFragmentNum + + ", lastFragmentUpdateTime=" + lastFragmentUpdateTime + "]"; } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java b/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java index a0311a9b737847..479966d2ff3c8c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/BackendHbResponse.java @@ -98,18 +98,12 @@ public BackendHbResponse(long beId, int bePort, int httpPort, int brpcPort, long this.beMemory = beMemory; } - public BackendHbResponse(long beId, String errMsg) { - super(HeartbeatResponse.Type.BACKEND); - this.status = HbStatus.BAD; - this.beId = beId; - this.msg = errMsg; - } - - public BackendHbResponse(long beId, String host, String errMsg) { + public BackendHbResponse(long beId, String host, long lastHbTime, String errMsg) { super(HeartbeatResponse.Type.BACKEND); this.status = HbStatus.BAD; this.beId = beId; this.host = host; + this.hbTime = lastHbTime; this.msg = errMsg; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java index 3fc09b31f2d312..89f55239f7fb56 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatMgr.java @@ -315,13 +315,13 @@ public HeartbeatResponse call() { System.currentTimeMillis(), beStartTime, version, nodeRole, fragmentNum, lastFragmentUpdateTime, isShutDown, arrowFlightSqlPort, beMemory); } else { - return new BackendHbResponse(backendId, backend.getHost(), + return new BackendHbResponse(backendId, backend.getHost(), backend.getLastUpdateMs(), result.getStatus().getErrorMsgs().isEmpty() ? "Unknown error" : result.getStatus().getErrorMsgs().get(0)); } } catch (Exception e) { LOG.warn("backend heartbeat got exception", e); - return new BackendHbResponse(backendId, backend.getHost(), + return new BackendHbResponse(backendId, backend.getHost(), backend.getLastUpdateMs(), Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } finally { if (client != null) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatResponse.java b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatResponse.java index 447ffad81899aa..3fffd1214503d5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatResponse.java +++ b/fe/fe-core/src/main/java/org/apache/doris/system/HeartbeatResponse.java @@ -51,10 +51,12 @@ public enum HbStatus { protected boolean isTypeRead = false; /** - * msg and hbTime are no need to be synchronized to other Frontends, + * msg no need to be synchronized to other Frontends, * and only Master Frontend has these info */ protected String msg; + + @SerializedName(value = "hbTime") protected long hbTime; public HeartbeatResponse(Type type) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java index bf73f9b83fefa8..be698776cac61a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java +++ b/fe/fe-core/src/main/java/org/apache/doris/task/AgentBatchTask.java @@ -55,6 +55,7 @@ import com.google.common.collect.Lists; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.thrift.TException; import java.util.HashMap; import java.util.LinkedList; @@ -67,6 +68,8 @@ public class AgentBatchTask implements Runnable { private static final Logger LOG = LogManager.getLogger(AgentBatchTask.class); + private int batchSize = Integer.MAX_VALUE; + // backendId -> AgentTask List private Map> backendIdToTasks; @@ -74,6 +77,12 @@ public AgentBatchTask() { this.backendIdToTasks = new HashMap>(); } + public AgentBatchTask(int batchSize) { + this.backendIdToTasks = new HashMap>(); + this.batchSize = batchSize; + assert batchSize > 0; + } + public AgentBatchTask(AgentTask singleTask) { this(); addTask(singleTask); @@ -172,14 +181,12 @@ public void run() { List agentTaskRequests = new LinkedList(); for (AgentTask task : tasks) { agentTaskRequests.add(toAgentTaskRequest(task)); - } - client.submitTasks(agentTaskRequests); - if (LOG.isDebugEnabled()) { - for (AgentTask task : tasks) { - LOG.debug("send task: type[{}], backend[{}], signature[{}]", - task.getTaskType(), backendId, task.getSignature()); + if (agentTaskRequests.size() >= batchSize) { + submitTasks(backendId, client, agentTaskRequests); + agentTaskRequests.clear(); } } + submitTasks(backendId, client, agentTaskRequests); ok = true; } catch (Exception e) { LOG.warn("task exec error. backend[{}]", backendId, e); @@ -198,6 +205,19 @@ public void run() { } // end for backend } + private static void submitTasks(long backendId, + BackendService.Client client, List agentTaskRequests) throws TException { + if (!agentTaskRequests.isEmpty()) { + client.submitTasks(agentTaskRequests); + } + if (LOG.isDebugEnabled()) { + for (TAgentTaskRequest req : agentTaskRequests) { + LOG.debug("send task: type[{}], backend[{}], signature[{}]", + req.getTaskType(), backendId, req.getSignature()); + } + } + } + private TAgentTaskRequest toAgentTaskRequest(AgentTask task) { TAgentTaskRequest tAgentTaskRequest = new TAgentTaskRequest(); tAgentTaskRequest.setProtocolVersion(TAgentServiceVersion.V1); diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java index 2d188230d8b5a8..4ff15653fa0842 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/CancelExportStmtTest.java @@ -234,7 +234,6 @@ public void testExportMgrCancelJob() throws UserException { exportMgr.unprotectAddJob(job3); exportMgr.unprotectAddJob(job4); - // cancel export job where state = "PENDING" Assert.assertTrue(job1.getState() == ExportJobState.PENDING); SlotRef stateSlotRef = new SlotRef(null, "state"); diff --git a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java index 6f3dd2eaaa851a..2d9efd895cb8f5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/load/routineload/RoutineLoadJobTest.java @@ -362,7 +362,7 @@ public void testGetShowCreateInfo() throws UserException { + "\"desired_concurrent_number\" = \"0\",\n" + "\"max_error_number\" = \"10\",\n" + "\"max_filter_ratio\" = \"1.0\",\n" - + "\"max_batch_interval\" = \"10\",\n" + + "\"max_batch_interval\" = \"60\",\n" + "\"max_batch_rows\" = \"10\",\n" + "\"max_batch_size\" = \"1073741824\",\n" + "\"format\" = \"csv\",\n" diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/parser/NereidsParserTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/parser/NereidsParserTest.java index c8a5364b710828..ff9e81f2bf3cfb 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/parser/NereidsParserTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/parser/NereidsParserTest.java @@ -47,6 +47,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.Set; @@ -660,4 +661,47 @@ public void testCreateRole() { String sql = "create role a comment 'create user'"; nereidsParser.parseSingle(sql); } + + @Test + public void testQualify() { + NereidsParser nereidsParser = new NereidsParser(); + + List sqls = new ArrayList<>(); + sqls.add("select year, country, profit, row_number() over (order by year) as rk from (select * from sales) a where year >= 2000 qualify rk > 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 qualify row_number() over (order by year) > 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 qualify rank() over (order by year) > 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 qualify dense_rank() over (order by year) > 1"); + + sqls.add("select country, sum(profit) as total, row_number() over (order by country) as rk from sales where year >= 2000 group by country having sum(profit) > 100 qualify rk = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country having sum(profit) > 100 qualify row_number() over (order by country) = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country having sum(profit) > 100 qualify rank() over (order by country) = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country having sum(profit) > 100 qualify dense_rank() over (order by country) = 1"); + + sqls.add("select country, sum(profit) as total, row_number() over (order by country) as rk from sales where year >= 2000 group by country qualify rk = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country qualify row_number() over (order by country) = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country qualify rank() over (order by country) = 1"); + sqls.add("select country, sum(profit) as total from sales where year >= 2000 group by country qualify dense_rank() over (order by country) = 1"); + + sqls.add("select year, country, product, profit, row_number() over (partition by year, country order by profit desc) as rk from sales where year >= 2000 qualify rk = 1 order by profit"); + sqls.add("select year, country, product, profit from sales where year >= 2000 qualify row_number() over (partition by year, country order by profit desc) = 1 order by profit"); + sqls.add("select year, country, product, profit from sales where year >= 2000 qualify rank() over (partition by year, country order by profit desc) = 1 order by profit"); + sqls.add("select year, country, product, profit from sales where year >= 2000 qualify dense_rank() over (partition by year, country order by profit desc) = 1 order by profit"); + + sqls.add("select year, country, profit, row_number() over (partition by year, country order by profit desc) as rk from (select * from sales) a where year >= 2000 having profit > 200 qualify rk = 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 having profit > 200 qualify row_number() over (partition by year, country order by profit desc) = 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 having profit > 200 qualify rank() over (partition by year, country order by profit desc) = 1"); + sqls.add("select year, country, profit from (select * from sales) a where year >= 2000 having profit > 200 qualify dense_rank() over (partition by year, country order by profit desc) = 1"); + + sqls.add("select distinct year, row_number() over (order by year) as rk from sales group by year qualify rk = 1"); + sqls.add("select distinct year from sales group by year qualify row_number() over (order by year) = 1"); + sqls.add("select distinct year from sales group by year qualify rank() over (order by year) = 1"); + sqls.add("select distinct year from sales group by year qualify dense_rank() over (order by year) = 1"); + + sqls.add("select year, country, profit from (select year, country, profit from (select year, country, profit, row_number() over (partition by year, country order by profit desc) as rk from (select * from sales) a where year >= 2000 having profit > 200) t where rk = 1) a where year >= 2000 qualify row_number() over (order by profit) = 1"); + sqls.add("select year, country, profit from (select year, country, profit from (select * from sales) a where year >= 2000 having profit > 200 qualify row_number() over (partition by year, country order by profit desc) = 1) a qualify row_number() over (order by profit) = 1"); + + for (String sql : sqls) { + nereidsParser.parseSingle(sql); + } + } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlotsTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlotsTest.java index 02f3caffa80163..31b135e7fbb970 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlotsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/analysis/FillUpMissingSlotsTest.java @@ -72,6 +72,16 @@ public void runBeforeAll() throws Exception { + "DISTRIBUTED BY HASH (pk)\n" + "PROPERTIES(\n" + " 'replication_num' = '1'\n" + + ");", + "CREATE TABLE sales (\n" + + " year INT,\n" + + " country STRING,\n" + + " product STRING,\n" + + " profit INT\n" + + ") \n" + + "DISTRIBUTED BY HASH(`year`)\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + ");" ); } @@ -597,4 +607,116 @@ void testSortHaving() { PlanChecker.from(connectContext).analyze(sql) .applyBottomUp(new CheckAfterRewrite()); } + + @Test + void testQualify() { + connectContext.getSessionVariable().setDisableNereidsRules("ELIMINATE_AGG_ON_EMPTYRELATION"); + String sql = "select year + 1, country from sales where year >= 2000 qualify row_number() over (order by profit) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalProject( + logicalFilter( + logicalWindow( + logicalEmptyRelation()) + ).when(filter -> filter.toString().contains("predicates=(row_number() OVER(ORDER BY profit asc null first)#5 > 1)")) + ) + ) + ); + + sql = "select year + 1, country, row_number() over (order by year) as rk from sales where year >= 2000 qualify rk > profit"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalProject( + logicalFilter( + logicalWindow( + logicalEmptyRelation()) + ).when(filter -> filter.toString().contains("predicates=(rk#5 > cast(profit#3 as BIGINT))")) + ) + ) + ); + + sql = "select year + 1, country from sales where year >= 2000 group by year,country qualify rank() over (order by year) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalProject( + logicalFilter( + logicalWindow( + logicalProject( + logicalAggregate(logicalEmptyRelation()))) + ).when(filter -> filter.toString().contains("predicates=(rank() OVER(ORDER BY year asc null first)#5 > 1)")) + ) + ) + ); + + sql = "select year + 1, country, sum(profit) as total from sales where year >= 2000 group by year,country having sum(profit) > 100 qualify row_number() over (order by year) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalProject( + logicalFilter( + logicalWindow( + logicalProject( + logicalFilter( + logicalAggregate(logicalEmptyRelation()) + ).when(filter -> filter.toString().contains("predicates=(total#5 > 100)")) + ) + ) + ).when(filter -> filter.toString().contains("predicates=(row_number() OVER(ORDER BY year asc null first)#6 > 1)")) + ) + ) + ); + + sql = "select distinct year + 1,country from sales qualify row_number() over (order by profit + 1) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalAggregate( + logicalProject( + logicalFilter( + logicalWindow( + logicalEmptyRelation()) + ).when(filter -> filter.toString().contains("predicates=(row_number() OVER(ORDER BY (profit + 1) asc null first)#5 > 1)")) + ) + ) + ) + ); + + sql = "select distinct year + 1 as year,country from sales group by year, country qualify row_number() over (order by year) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalAggregate( + logicalProject( + logicalFilter( + logicalWindow( + logicalProject(logicalAggregate(logicalEmptyRelation()))) + ).when(filter -> filter.toString().contains("predicates=(row_number() OVER(ORDER BY year asc null first)#5 > 1)")) + ) + ) + ) + ); + + sql = "select distinct year,country,rank() over (order by year) from sales having sum(profit) > 100 qualify row_number() over (order by year) > 1"; + PlanChecker.from(connectContext).analyze(sql).rewrite().matches( + logicalResultSink( + logicalProject( + logicalFilter( + logicalAggregate( + logicalProject( + logicalFilter( + logicalWindow( + logicalEmptyRelation()) + ).when(filter -> filter.toString().contains("predicates=(row_number() OVER(ORDER BY year asc null first)#5 > 1)")) + ) + ) + ).when(filter -> filter.toString().contains("predicates=(sum(profit)#6 > 100)")) + ) + ) + ); + + ExceptionChecker.expectThrowsWithMsg( + AnalysisException.class, + "qualify only used for window expression", + () -> PlanChecker.from(connectContext).analyze( + "select year + 1, country from sales where year >= 2000 qualify year > 1" + ) + ); + } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/rules/NullSafeEqualToEqualTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/rules/NullSafeEqualToEqualTest.java index 8da25e92e7eec7..d4adc821880b60 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/rules/NullSafeEqualToEqualTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/expression/rules/NullSafeEqualToEqualTest.java @@ -54,7 +54,7 @@ void testNullSafeEqualToFalse() { assertRewrite(new NullSafeEqual(new IntegerLiteral(0), NullLiteral.INSTANCE), BooleanLiteral.FALSE); } - // "NULL <=> Null" to false + // "NULL <=> Null" to true @Test void testNullSafeEqualToTrue() { executor = new ExpressionRuleExecutor(ImmutableList.of( diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionTest.java new file mode 100644 index 00000000000000..3c533216b1acb0 --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/PullUpJoinFromUnionTest.java @@ -0,0 +1,171 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.nereids.rules.rewrite; + +import org.apache.doris.nereids.util.MemoPatternMatchSupported; +import org.apache.doris.nereids.util.PlanChecker; +import org.apache.doris.utframe.TestWithFeService; + +import org.junit.jupiter.api.Test; + +class PullUpJoinFromUnionTest extends TestWithFeService implements MemoPatternMatchSupported { + @Override + protected void runBeforeAll() throws Exception { + createDatabase("test"); + connectContext.setDatabase("default_cluster:test"); + createTables( + "CREATE TABLE IF NOT EXISTS t1 (\n" + + " id int not null,\n" + + " name char\n" + + ")\n" + + "DUPLICATE KEY(id)\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\"replication_num\" = \"1\")\n", + "CREATE TABLE IF NOT EXISTS t2 (\n" + + " id int not null,\n" + + " name char\n" + + ")\n" + + "DUPLICATE KEY(id)\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\"replication_num\" = \"1\")\n", + "CREATE TABLE IF NOT EXISTS t3 (\n" + + " id int,\n" + + " name char\n" + + ")\n" + + "DUPLICATE KEY(id)\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\"replication_num\" = \"1\")\n" + ); + connectContext.getSessionVariable().setDisableNereidsRules("PRUNE_EMPTY_PARTITION"); + } + + @Test + void testSimple() { + String sql = "select * from t1 join t2 on t1.id = t2.id " + + "union all " + + "select * from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testProject() { + String sql = "select t2.id from t1 join t2 on t1.id = t2.id " + + "union all " + + "select t3.id from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + + sql = "select t2.id, t1.name from t1 join t2 on t1.id = t2.id " + + "union all " + + "select t3.id, t1.name from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testConstant() { + String sql = "select t2.id, t1.name, 1 as id1 from t1 join t2 on t1.id = t2.id " + + "union all " + + "select t3.id, t1.name, 2 as id2 from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testComplexProject() { + String sql = "select t2.id + 1, t1.name + 1, 1 as id1 from t1 join t2 on t1.id = t2.id " + + "union all " + + "select t3.id + 1, t1.name + 1, 2 as id2 from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalUnion(), any())); + } + + @Test + void testMissJoinSlot() { + String sql = "select t1.name + 1, 1 as id1 from t1 join t2 on t1.id = t2.id " + + "union all " + + "select t1.name + 1, 2 as id2 from t1 join t3 on t1.id = t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalUnion(), any())); + } + + @Test + void testFilter() { + String sql = "select * from t1 join t2 on t1.id = t2.id where t1.name = '' " + + "union all " + + "select * from t1 join t3 on t1.id = t3.id where t1.name = '' ;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + + sql = "select t2.id from t1 join t2 on t1.id = t2.id where t1.name = '' " + + "union all " + + "select t3.id from t1 join t3 on t1.id = t3.id where t1.name = '' ;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testMultipleJoinConditions() { + String sql = "select * from t1 join t2 on t1.id = t2.id and t1.name = t2.name " + + "union all " + + "select * from t1 join t3 on t1.id = t3.id and t1.name = t3.name;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testNonEqualityJoinConditions() { + String sql = "select * from t1 join t2 on t1.id < t2.id " + + "union all " + + "select * from t1 join t3 on t1.id < t3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .nonMatch(logicalJoin(logicalProject(logicalUnion()), any())); + } + + @Test + void testSubqueries() { + String sql = "select * from t1 join (select * from t2 where t2.id > 10) s2 on t1.id = s2.id " + + "union all " + + "select * from t1 join (select * from t3 where t3.id > 10) s3 on t1.id = s3.id;"; + PlanChecker.from(connectContext) + .analyze(sql) + .rewrite() + .matches(logicalJoin(logicalProject(logicalUnion()), any())); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/persist/DropAndRecoverInfoTest.java b/fe/fe-core/src/test/java/org/apache/doris/persist/DropAndRecoverInfoTest.java index bdaab002c53180..88aa22ded22e5e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/persist/DropAndRecoverInfoTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/persist/DropAndRecoverInfoTest.java @@ -44,7 +44,7 @@ public void testDropInfoSerialization() throws Exception { DropInfo info1 = new DropInfo(); info1.write(dos); - DropInfo info2 = new DropInfo(1, 2, "t2", -1, true, 0); + DropInfo info2 = new DropInfo(1, 2, "t2", -1, false, true, 0); info2.write(dos); dos.flush(); @@ -65,10 +65,10 @@ public void testDropInfoSerialization() throws Exception { Assert.assertEquals(rInfo2, rInfo2); Assert.assertNotEquals(rInfo2, this); - Assert.assertNotEquals(info2, new DropInfo(0, 2, "t2", -1L, true, 0)); - Assert.assertNotEquals(info2, new DropInfo(1, 0, "t0", -1L, true, 0)); - Assert.assertNotEquals(info2, new DropInfo(1, 2, "t2", -1L, false, 0)); - Assert.assertEquals(info2, new DropInfo(1, 2, "t2", -1L, true, 0)); + Assert.assertNotEquals(info2, new DropInfo(0, 2, "t2", -1L, false, true, 0)); + Assert.assertNotEquals(info2, new DropInfo(1, 0, "t0", -1L, false, true, 0)); + Assert.assertNotEquals(info2, new DropInfo(1, 2, "t2", -1L, false, false, 0)); + Assert.assertEquals(info2, new DropInfo(1, 2, "t2", -1L, false, true, 0)); // 3. delete files dis.close(); diff --git a/gensrc/proto/cloud.proto b/gensrc/proto/cloud.proto index 93420bddbf6382..8ae48851601b99 100644 --- a/gensrc/proto/cloud.proto +++ b/gensrc/proto/cloud.proto @@ -642,6 +642,7 @@ message BeginTxnResponse { optional MetaServiceResponseStatus status = 1; optional int64 txn_id = 2; optional int64 dup_txn_id = 3; + optional TxnStatusPB txn_status = 4; // TODO: There may be more fields TBD } diff --git a/gensrc/script/doris_builtins_functions.py b/gensrc/script/doris_builtins_functions.py index 73e68badcdae16..31b02f9b97980e 100644 --- a/gensrc/script/doris_builtins_functions.py +++ b/gensrc/script/doris_builtins_functions.py @@ -2077,7 +2077,10 @@ "Url": [ [['domain'], 'STRING', ['STRING'], ''], [['domain_without_www'], 'STRING', ['STRING'], ''], - [['protocol'], 'STRING', ['STRING'], ''] + [['protocol'], 'STRING', ['STRING'], ''], + [['top_level_domain'], 'STRING', ['STRING'], ''], + [['cut_to_first_significant_subdomain'], 'STRING', ['STRING'], ''], + [['first_significant_subdomain'], 'STRING', ['STRING'], ''] ], # search functions diff --git a/gensrc/thrift/Descriptors.thrift b/gensrc/thrift/Descriptors.thrift index 7d74c1773fec88..dd6ebf2248e083 100644 --- a/gensrc/thrift/Descriptors.thrift +++ b/gensrc/thrift/Descriptors.thrift @@ -352,7 +352,6 @@ struct TJdbcTable { 12: optional i32 connection_pool_max_life_time 13: optional bool connection_pool_keep_alive 14: optional i64 catalog_id - 15: optional bool enable_connection_pool } struct TMCTable { diff --git a/gensrc/thrift/PaloInternalService.thrift b/gensrc/thrift/PaloInternalService.thrift index 62a45260f80c9c..f531db3028224a 100644 --- a/gensrc/thrift/PaloInternalService.thrift +++ b/gensrc/thrift/PaloInternalService.thrift @@ -351,6 +351,7 @@ struct TQueryOptions { 136: optional bool enable_phrase_query_sequential_opt = true; + 137: optional bool enable_auto_create_when_overwrite = false; // For cloud, to control if the content would be written into file cache // In write path, to control if the content would be written into file cache. // In read path, read from file cache or remote storage when execute query. diff --git a/gensrc/thrift/Planner.thrift b/gensrc/thrift/Planner.thrift index ffcc33638db52c..866d8d45320243 100644 --- a/gensrc/thrift/Planner.thrift +++ b/gensrc/thrift/Planner.thrift @@ -64,10 +64,6 @@ struct TPlanFragment { 8: optional i64 initial_reservation_total_claims 9: optional QueryCache.TQueryCacheParam query_cache_param - - // Using serial source means a serial source operator will be used in this fragment (e.g. data will be shuffled to - // only 1 exchange operator) and then splitted by followed local exchanger - 10: optional bool use_serial_source } // location information for a single scan range diff --git a/gensrc/thrift/Types.thrift b/gensrc/thrift/Types.thrift index 7fabe64e26e292..235c1cb28378e2 100644 --- a/gensrc/thrift/Types.thrift +++ b/gensrc/thrift/Types.thrift @@ -454,7 +454,6 @@ struct TJdbcExecutorCtorParams { 14: optional i32 connection_pool_cache_clear_time 15: optional bool connection_pool_keep_alive 16: optional i64 catalog_id - 17: optional bool enable_connection_pool } struct TJavaUdfExecutorCtorParams { diff --git a/regression-test/data/auth_call/multi_load_data_1.csv b/regression-test/data/auth_call/multi_load_data_1.csv new file mode 100644 index 00000000000000..8e6b6f5ab16547 --- /dev/null +++ b/regression-test/data/auth_call/multi_load_data_1.csv @@ -0,0 +1,3 @@ +1,111 +2,222 +3,333 diff --git a/regression-test/data/auth_call/multi_load_data_2.csv b/regression-test/data/auth_call/multi_load_data_2.csv new file mode 100644 index 00000000000000..502600b75544df --- /dev/null +++ b/regression-test/data/auth_call/multi_load_data_2.csv @@ -0,0 +1,2 @@ +4,444 +5,555 diff --git a/regression-test/data/auth_call/routine_load_data.csv b/regression-test/data/auth_call/routine_load_data.csv new file mode 100644 index 00000000000000..33a3e989bb3833 --- /dev/null +++ b/regression-test/data/auth_call/routine_load_data.csv @@ -0,0 +1,2 @@ +1,2 +3,5 diff --git a/regression-test/data/auth_call/stream_load_data.csv b/regression-test/data/auth_call/stream_load_data.csv new file mode 100644 index 00000000000000..8e6b6f5ab16547 --- /dev/null +++ b/regression-test/data/auth_call/stream_load_data.csv @@ -0,0 +1,3 @@ +1,111 +2,222 +3,333 diff --git a/regression-test/data/external_table_p0/hive/ddl/test_hive_ddl.out b/regression-test/data/external_table_p0/hive/ddl/test_hive_ddl.out index eab813cf5f316c..96d17545c9be0b 100644 --- a/regression-test/data/external_table_p0/hive/ddl/test_hive_ddl.out +++ b/regression-test/data/external_table_p0/hive/ddl/test_hive_ddl.out @@ -58,3 +58,4 @@ false 1 1000 \N true 1 1000 2.3 true 1 1000 2.3 true 1 1000 2.3 + diff --git a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.out b/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.out deleted file mode 100644 index 247a342cb7eedf..00000000000000 Binary files a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.out and /dev/null differ diff --git a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.out b/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.out deleted file mode 100644 index cb9ca3b5231248..00000000000000 --- a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.out +++ /dev/null @@ -1,85 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types_refresh -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types_refresh -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types_refresh -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types_refresh -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types_refresh -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !mysql_all_types -- -\N 302 \N 502 602 4.14159 \N 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 \N -7.1400 row2 \N 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09.5678 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09.5678 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09.5678 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - diff --git a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.out b/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.out deleted file mode 100644 index fe8440d8a659da..00000000000000 --- a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.out +++ /dev/null @@ -1,109 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types_refresh -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !query_ojdbc6_all_types -- -1 111 123 7456123.89 573 34 673.43 34.1264 60.0 23.231 99 9999 999999999 999999999999999999 999 99999 9999999999 9999999999999999999 1 china beijing alice abcdefghrjkmnopq 123.45 12300 0.0012345 2022-01-21T05:23:01 2019-11-12T20:33:57.999 2019-11-12T20:33:57.999998 2019-11-12T20:33:57.999996 2019-11-12T20:33:57.999997 223-9 12 10:23:1.123457000 -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - diff --git a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.out b/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.out deleted file mode 100644 index b063d2e7919e4c..00000000000000 --- a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.out +++ /dev/null @@ -1,69 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - --- !select_all_types_refresh -- -1 abc def 2022-10-11 1 2 3 2022-10-22T10:59:59 34.123 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} (1.0,1.0) {1.0,1.0,1.0} [(1.0,1.0),(2.0,2.0)] (2.0,2.0),(1.0,1.0) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) ((1.0,1.0),(2.0,2.0),(2.0,1.0)) <(0.0,0.0),1.0> -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N 2 \N \N \N \N \N \N \N \N - diff --git a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.out b/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.out deleted file mode 100644 index 87fe3a17dd7813..00000000000000 --- a/regression-test/data/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.out +++ /dev/null @@ -1,81 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - --- !all_type_refresh -- -1 doris 18 0 1 1 123.123 123.123 123.123 12345678901234567890123456789012345678 12345678901234567890123456789012345678 1234567890123456789012345678.0123456789 1234567890123456789012345678.0123456789 Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! Make Doris Great! 2023-01-17 16:49:05.123 2023-01-17T16:49:05 2023-01-17T16:49:05.123456 2023-01-17T16:49 2023-01-17 16:49:05 +08:00 Make Doris Great! Make Doris Great! 922337203685477.5807 214748.3647 false -2 \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N \N - diff --git a/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_catalog.out b/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_catalog.out index 4e250143aba327..1caeec713e269c 100644 --- a/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_catalog.out +++ b/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_catalog.out @@ -243,22 +243,13 @@ information_schema mysql -- !specified_database_3 -- -DORIS -Doris -doris -information_schema -init_db -mysql -show_test_do_not_modify -- !specified_database_4 -- -information_schema -mysql -- !ex_tb1 -- {"k1":"v1", "k2":"v2"} --- !mysql_all_types_count-- +-- !mysql_all_types_count -- 4 -- !mysql_all_types -- @@ -445,3 +436,437 @@ t2 text Yes false \N NONE varchar varchar(65533) Yes true \N int_u bigint Yes false \N NONE +-- !sql -- +internal + +-- !sql -- +mysql_jdbc_catalog + +-- !ex_tb0 -- +111 abc +112 abd +113 abe +114 abf +115 abg + +-- !in_tb -- +111 abc +112 abd +113 abe +114 abf +115 abg + +-- !ex_tb1 -- +{"k1":"v1", "k2":"v2"} + +-- !ex_tb2 -- +123 10 +123 15 +123 20 + +-- !ex_tb3 -- +mus plat_code 1001169339 1590381433914 1590420872639 11 1006061 beijing +mus plat_code 1001169339 1590402594411 1590420872639 11 1006061 beijing +mus plat_code 1001169339 1590406790026 1590420872639 11 1006061 beijing +mus plat_code 1001169339 1590420482288 1590420872639 11 1006061 beijing +mus plat_code 1001169339 1590420872639 1590420872639 11 1006061 beijing + +-- !ex_tb4 -- +1 111 2021-09-01T07:01:01 2021-09-01T08:01:01 1 +2 112 2021-09-02T07:01:01 2021-09-02T08:01:01 1 +3 113 0001-01-01T00:00 2021-12-01T08:01:01 2 +5 115 2021-09-01T07:02:01 2021-09-01T08:01:04 4 +6 116 2021-10-01T07:03:01 2022-09-01T08:02:05 5 + +-- !ex_tb5 -- +1 test_apply_id 123321 zhangsan zhangsan ready ok 2 2022-01-01T02:03:04 + +-- !ex_tb6 -- +639215401565159424 1143681147589283841 test +639237839376089088 1143681147589283841 test123 + +-- !ex_tb7 -- +2 sim 1.000 +2 sim 1.001 +2 sim 1.002 + +-- !ex_tb8 -- +2022-07-15 2222 1 \N +2022-07-15 ddddd 2 0.5 + +-- !ex_tb9 -- +\N +2022-01-01 + +-- !ex_tb10 -- +a 1 2 +b 1 2 +c 1 2 +d 3 2 + +-- !ex_tb11 -- +a 1 +b 1 +c 1 + +-- !ex_tb12 -- +a 1 +b 1 +c 1 + +-- !ex_tb13 -- +张三0 11 1234567 123 321312 1999-02-13T00:00 中国 男 0 +张三1 11 12345678 123 321312 1999-02-13T00:00 中国 男 0 +张三2 11 12345671 123 321312 1999-02-13T00:00 中国 男 0 +张三3 11 12345673 123 321312 1999-02-13T00:00 中国 男 0 +张三4 11 123456711 123 321312 1999-02-13T00:00 中国 男 0 +张三5 11 1232134567 123 321312 1999-02-13T00:00 中国 男 0 +张三6 11 124314567 123 321312 1999-02-13T00:00 中国 男 0 +张三7 11 123445167 123 321312 1998-02-13T00:00 中国 男 0 + +-- !ex_tb14 -- +123 2022-11-02 2022-11-02 8011 oppo +abc 2022-11-02 2022-11-02 8011 agdtb +bca 2022-11-02 2022-11-02 8012 vivo + +-- !ex_tb15 -- +2022-11-04 2022-10-31 2022-11-04 62 5.4103451446E9 7.211386993606482E10 21 10 16 - - 2022-11-04T17:40:19 + +-- !ex_tb16 -- +1 a 0 4 3 6 8 +1 b 0 4 4 8 8 +1 c 0 9 9 5 4 +1 d 0 7 6 1 7 +1 e 0 7 5 6 3 +2 a 0 3 4 1 6 +2 b 0 1 5 4 5 +2 c 0 5 7 9 1 +2 d 0 4 4 8 4 +2 e 0 6 4 7 8 +3 a 0 7 9 4 8 +3 b 0 4 9 8 1 +3 d 0 2 7 1 5 +3 e 0 2 4 3 4 +4 a 0 5 7 4 1 +4 b 0 3 4 2 7 +4 c 0 3 9 3 7 +4 d 0 1 5 6 4 +5 a 0 1 2 2 1 +5 b 0 6 6 2 9 +5 c 0 8 5 7 6 +5 d 0 6 2 7 7 +5 e 0 5 7 9 2 +6 a 0 1 1 8 8 +6 b 0 3 9 1 6 +6 c 0 3 1 3 8 +6 d 0 1 2 4 7 +6 e 0 1 9 7 6 +7 a 0 1 1 3 8 +7 b 0 3 2 8 1 +7 c 0 3 7 7 1 +7 d 0 6 1 5 6 +7 e 0 6 1 3 7 +8 a 0 3 2 8 2 +8 b 0 4 9 4 9 +8 c 0 1 7 1 5 +8 e 0 4 4 5 4 +9 a 0 8 3 9 1 +9 b 0 2 1 4 2 +9 c 0 8 3 9 8 +9 d 0 6 6 5 3 +9 e 0 9 1 9 7 + +-- !ex_tb17 -- +1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 +2 8 9 8 2900.42 1 6 97486621.73 59634489.39 c 3 2 0 a e 7 4 +3 5 7 3 6276.86 8 9 32758730.38 10260499.72 c 8 1 0 d c 9 2 +4 3 7 5 2449.00 6 3 91359059.28 64743145.92 e 7 8 0 b d 8 4 +5 6 4 5 9137.82 2 7 26526675.70 90098303.36 a 6 7 0 d e 4 1 +6 3 6 8 7601.25 4 9 49117098.47 46499188.80 c 3 3 0 c d 4 8 +7 3 2 8 5297.81 9 3 23753694.20 96930000.64 c 7 2 0 b e 1 5 +8 3 6 7 3683.85 5 7 26056250.91 1127755.43 b 7 6 0 d b 4 7 +9 3 9 1 4785.38 1 5 95199488.12 94869703.42 a 4 4 0 c d 2 4 + +-- !ex_tb18 -- +-128 255 -32768 65535 -8388608 16777215 -9223372036854775808 -2147483648 2147483647 4294967295 33.14 422113.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqd asdas +1 1 1 1 1 1 1 1 1 1 3.14 13.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqdwqwdqwdqd asdadwqdqwddqwdsadqwdas +127 255 32767 65535 8388607 16777215 9223372036854775807 -2147483648 2147483647 4294967295 33.14 422113.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqd asdadwqdqwdsadqwdas + +-- !ex_tb19 -- +2022-11-27 07:09:51 2022 2022-11-27T07:09:51 2022-11-27T07:09:51 + +-- !ex_tb20 -- +1.12345 1.12345 1.12345 1.12345 1.12345 1.12345 +123456789012345678901234567890123.12345 12345678901234567890123456789012.12345 1234567890123456789012345678901234.12345 123456789012345678901234567890123.12345 123456789012345678901234567890123456789012345678901234567890.12345 123456789012345678901234567890123456789012345678901234567890.12345 + +-- !ex_tb21_1 -- +2 2 + +-- !ex_tb21_2 -- +2 2 + +-- !ex_tb21_3 -- +1 1 +2 2 + +-- !ex_tb21_4 -- +2 2 + +-- !ex_tb21_5 -- +1 1 +2 2 + +-- !ex_tb21_6 -- +1 1 + +-- !ex_tb21_7 -- +2 1 + +-- !ex_tb21_8 -- +2 2 + +-- !information_schema -- +processlist + +-- !dt -- +2023-06-17T10:00 2023-06-17T10:00:01 2023-06-17T10:00:02 2023-06-17T10:00:03 2023-06-17T10:00:04 2023-06-17T10:00:05 2023-06-17T10:00:06 + +-- !dt_null -- +\N +0001-01-01T00:00 +2023-06-17T10:00 + +-- !test_dz -- +1 \N +2 2022-01-01 +3 0001-01-01 + +-- !test_filter_not -- +张三1 11 12345678 123 321312 1999-02-13T00:00 中国 男 0 +张三2 11 12345671 123 321312 1999-02-13T00:00 中国 男 0 +张三3 11 12345673 123 321312 1999-02-13T00:00 中国 男 0 +张三4 11 123456711 123 321312 1999-02-13T00:00 中国 男 0 +张三5 11 1232134567 123 321312 1999-02-13T00:00 中国 男 0 +张三6 11 124314567 123 321312 1999-02-13T00:00 中国 男 0 +张三7 11 123445167 123 321312 1998-02-13T00:00 中国 男 0 + +-- !test_insert1 -- +doris1 18 + +-- !test_insert2 -- +doris2 19 +doris3 20 + +-- !test_insert3 -- +doris2 19 +doris2 19 +doris3 20 +doris3 20 + +-- !test_insert4 -- +1 abcHa1.12345 1.123450xkalowadawd 2022-10-01 3.14159 1 2 0 100000 1.2345678 24.000 07:09:51 2022 2022-11-27T07:09:51 2022-11-27T07:09:51 + +-- !specified_database_1 -- +doris_test +information_schema +mysql + +-- !specified_database_2 -- +doris_test +information_schema +mysql + +-- !specified_database_3 -- + +-- !specified_database_4 -- + +-- !ex_tb1 -- +{"k1":"v1", "k2":"v2"} + +-- !mysql_all_types_count -- +4 + +-- !mysql_all_types -- +\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 +201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 +202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 +203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 + +-- !select_insert_all_types -- +\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 +201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age":30,"city":"London","name":"Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 +202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age":18,"city":"ChongQing","name":"Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 +203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age":24,"city":"ChongQing","name":"ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 + +-- !ctas -- +\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 +201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 +202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 +203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 + +-- !ctas_desc -- +bigint bigint Yes false \N NONE +bigint_u largeint Yes false \N NONE +binary text Yes false \N NONE +bit text Yes false \N NONE +blob text Yes false \N NONE +boolean tinyint Yes false \N NONE +char text Yes false \N NONE +date date Yes false \N NONE +datetime datetime Yes false \N NONE +decimal decimal(12,4) Yes false \N NONE +decimal_u decimal(19,5) Yes false \N NONE +double double Yes false \N NONE +double_u double Yes false \N NONE +enum text Yes false \N NONE +float float Yes false \N NONE +float_u float Yes false \N NONE +int int Yes false \N NONE +int_u bigint Yes false \N NONE +json text Yes false \N NONE +mediumint int Yes false \N NONE +mediumint_u int Yes true \N +set text Yes false \N NONE +smallint smallint Yes false \N NONE +smallint_u int Yes true \N +text text Yes false \N NONE +time text Yes false \N NONE +timestamp datetime Yes false \N NONE +tinyint tinyint Yes false \N NONE +tinyint_u smallint Yes true \N +varbinary text Yes false \N NONE +varchar text Yes false \N NONE +year smallint Yes false \N NONE + +-- !mysql_view -- +10086 4294967295 201 + +-- !filter1 -- +1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 + +-- !filter2 -- +1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 +2 8 9 8 2900.42 1 6 97486621.73 59634489.39 c 3 2 0 a e 7 4 +3 5 7 3 6276.86 8 9 32758730.38 10260499.72 c 8 1 0 d c 9 2 +4 3 7 5 2449.00 6 3 91359059.28 64743145.92 e 7 8 0 b d 8 4 +5 6 4 5 9137.82 2 7 26526675.70 90098303.36 a 6 7 0 d e 4 1 +6 3 6 8 7601.25 4 9 49117098.47 46499188.80 c 3 3 0 c d 4 8 +7 3 2 8 5297.81 9 3 23753694.20 96930000.64 c 7 2 0 b e 1 5 +8 3 6 7 3683.85 5 7 26056250.91 1127755.43 b 7 6 0 d b 4 7 +9 3 9 1 4785.38 1 5 95199488.12 94869703.42 a 4 4 0 c d 2 4 + +-- !filter3 -- +1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 + +-- !date_trunc -- +2023-06-17T10:00 + +-- !money_format -- +1 + +-- !date_add_year -- +2 2022-01-01 + +-- !date_add_month -- +2 2022-01-01 + +-- !date_add_week -- +2 2022-01-01 + +-- !date_add_day -- +2 2022-01-01 + +-- !date_add_hour -- +2 2022-01-01 + +-- !date_add_min -- +2 2022-01-01 + +-- !date_add_sec -- +2 2022-01-01 + +-- !date_sub_year -- +2 2022-01-01 + +-- !date_sub_month -- +2 2022-01-01 + +-- !date_sub_week -- +2 2022-01-01 + +-- !date_sub_day -- +2 2022-01-01 + +-- !date_sub_hour -- +2 2022-01-01 + +-- !date_sub_min -- +2 2022-01-01 + +-- !date_sub_sec -- +2 2022-01-01 + +-- !auto_default_t1 -- +0 + +-- !auto_default_t2 -- +0 + +-- !sql -- +doris_1 +doris_2 +doris_3 +doris_test +information_schema +init_db +mysql +show_test_do_not_modify + +-- !sql -- +doris_1 +doris_2 +doris_3 + +-- !sql -- +DORIS + +-- !sql -- +Doris + +-- !sql -- +doris + +-- !sql -- +1 + +-- !sql -- +1 + +-- !sql -- +10 0 7744 +11 0 -94 +12 16970 95 +13 0 7023 +14 0 1 +15 3679 -11 +16 0 -1079 +17 -22 0 +18 30995 0 +19 0 -79 +5 0 -127 +6 14680 -26424 +7 -22270 12722 +8 0 0 +9 0 0 + +-- !sql -- + +-- !sql -- +int_u bigint Yes true \N +text varchar(65533) Yes true \N +t2 text Yes false \N NONE + +-- !sql -- +varchar varchar(65533) Yes true \N +int_u bigint Yes false \N NONE + diff --git a/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.out b/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.out deleted file mode 100644 index 736f2b57b4a768..00000000000000 --- a/regression-test/data/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.out +++ /dev/null @@ -1,420 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !sql -- -internal - --- !show_db -- -DORIS -Doris -doris -doris_test -information_schema -init_db -mysql -show_test_do_not_modify - --- !sql -- -internal - --- !sql -- -mysql_jdbc5_catalog - --- !ex_tb0 -- -111 abc -112 abd -113 abe -114 abf -115 abg - --- !in_tb -- -111 abc -112 abd -113 abe -114 abf -115 abg - --- !ex_tb1 -- -{"k1":"v1", "k2":"v2"} - --- !ex_tb2 -- -123 10 -123 15 -123 20 - --- !ex_tb3 -- -mus plat_code 1001169339 1590381433914 1590420872639 11 1006061 beijing -mus plat_code 1001169339 1590402594411 1590420872639 11 1006061 beijing -mus plat_code 1001169339 1590406790026 1590420872639 11 1006061 beijing -mus plat_code 1001169339 1590420482288 1590420872639 11 1006061 beijing -mus plat_code 1001169339 1590420872639 1590420872639 11 1006061 beijing - --- !ex_tb4 -- -1 111 2021-09-01T07:01:01 2021-09-01T08:01:01 1 -2 112 2021-09-02T07:01:01 2021-09-02T08:01:01 1 -3 113 0001-01-01T00:00 2021-12-01T08:01:01 2 -5 115 2021-09-01T07:02:01 2021-09-01T08:01:04 4 -6 116 2021-10-01T07:03:01 2022-09-01T08:02:05 5 - --- !ex_tb5 -- -1 test_apply_id 123321 zhangsan zhangsan ready ok 2 2022-01-01T02:03:04 - --- !ex_tb6 -- -639215401565159424 1143681147589283841 test -639237839376089088 1143681147589283841 test123 - --- !ex_tb7 -- -2 sim 1.000 -2 sim 1.001 -2 sim 1.002 - --- !ex_tb8 -- -2022-07-15 2222 1 \N -2022-07-15 ddddd 2 0.5 - --- !ex_tb9 -- -\N -2022-01-01 - --- !ex_tb10 -- -a 1 2 -b 1 2 -c 1 2 -d 3 2 - --- !ex_tb11 -- -a 1 -b 1 -c 1 - --- !ex_tb12 -- -a 1 -b 1 -c 1 - --- !ex_tb13 -- -张三0 11 1234567 123 321312 1999-02-13T00:00 中国 男 0 -张三1 11 12345678 123 321312 1999-02-13T00:00 中国 男 0 -张三2 11 12345671 123 321312 1999-02-13T00:00 中国 男 0 -张三3 11 12345673 123 321312 1999-02-13T00:00 中国 男 0 -张三4 11 123456711 123 321312 1999-02-13T00:00 中国 男 0 -张三5 11 1232134567 123 321312 1999-02-13T00:00 中国 男 0 -张三6 11 124314567 123 321312 1999-02-13T00:00 中国 男 0 -张三7 11 123445167 123 321312 1998-02-13T00:00 中国 男 0 - --- !ex_tb14 -- -123 2022-11-02 2022-11-02 8011 oppo -abc 2022-11-02 2022-11-02 8011 agdtb -bca 2022-11-02 2022-11-02 8012 vivo - --- !ex_tb15 -- -2022-11-04 2022-10-31 2022-11-04 62 5.4103451446E9 7.211386993606482E10 21 10 16 - - 2022-11-04T17:40:19 - --- !ex_tb16 -- -1 a 0 4 3 6 8 -1 b 0 4 4 8 8 -1 c 0 9 9 5 4 -1 d 0 7 6 1 7 -1 e 0 7 5 6 3 -2 a 0 3 4 1 6 -2 b 0 1 5 4 5 -2 c 0 5 7 9 1 -2 d 0 4 4 8 4 -2 e 0 6 4 7 8 -3 a 0 7 9 4 8 -3 b 0 4 9 8 1 -3 d 0 2 7 1 5 -3 e 0 2 4 3 4 -4 a 0 5 7 4 1 -4 b 0 3 4 2 7 -4 c 0 3 9 3 7 -4 d 0 1 5 6 4 -5 a 0 1 2 2 1 -5 b 0 6 6 2 9 -5 c 0 8 5 7 6 -5 d 0 6 2 7 7 -5 e 0 5 7 9 2 -6 a 0 1 1 8 8 -6 b 0 3 9 1 6 -6 c 0 3 1 3 8 -6 d 0 1 2 4 7 -6 e 0 1 9 7 6 -7 a 0 1 1 3 8 -7 b 0 3 2 8 1 -7 c 0 3 7 7 1 -7 d 0 6 1 5 6 -7 e 0 6 1 3 7 -8 a 0 3 2 8 2 -8 b 0 4 9 4 9 -8 c 0 1 7 1 5 -8 e 0 4 4 5 4 -9 a 0 8 3 9 1 -9 b 0 2 1 4 2 -9 c 0 8 3 9 8 -9 d 0 6 6 5 3 -9 e 0 9 1 9 7 - --- !ex_tb17 -- -1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 -2 8 9 8 2900.42 1 6 97486621.73 59634489.39 c 3 2 0 a e 7 4 -3 5 7 3 6276.86 8 9 32758730.38 10260499.72 c 8 1 0 d c 9 2 -4 3 7 5 2449.00 6 3 91359059.28 64743145.92 e 7 8 0 b d 8 4 -5 6 4 5 9137.82 2 7 26526675.70 90098303.36 a 6 7 0 d e 4 1 -6 3 6 8 7601.25 4 9 49117098.47 46499188.80 c 3 3 0 c d 4 8 -7 3 2 8 5297.81 9 3 23753694.20 96930000.64 c 7 2 0 b e 1 5 -8 3 6 7 3683.85 5 7 26056250.91 1127755.43 b 7 6 0 d b 4 7 -9 3 9 1 4785.38 1 5 95199488.12 94869703.42 a 4 4 0 c d 2 4 - --- !ex_tb18 -- --128 255 -32768 65535 -8388608 16777215 -9223372036854775808 -2147483648 2147483647 4294967295 33.14 422113.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqd asdas -1 1 1 1 1 1 1 1 1 1 3.14 13.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqdwqwdqwdqd asdadwqdqwddqwdsadqwdas -127 255 32767 65535 8388607 16777215 9223372036854775807 -2147483648 2147483647 4294967295 33.14 422113.141 2342.23 aa asdawdasdaasdasd aaa bbbbbbbb xaqwdqwdqwdqd asdadwqdqwdsadqwdas - --- !ex_tb19 -- -2022-11-27 07:09:51 2022 2022-11-27T07:09:51 2022-11-27T07:09:51 - --- !ex_tb20 -- -1.12345 1.12345 1.12345 1.12345 1.12345 1.12345 -123456789012345678901234567890123.12345 12345678901234567890123456789012.12345 1234567890123456789012345678901234.12345 123456789012345678901234567890123.12345 123456789012345678901234567890123456789012345678901234567890.12345 123456789012345678901234567890123456789012345678901234567890.12345 - --- !ex_tb21_1 -- -2 2 - --- !ex_tb21_2 -- -2 2 - --- !ex_tb21_3 -- -1 1 -2 2 - --- !ex_tb21_4 -- -2 2 - --- !ex_tb21_5 -- -1 1 -2 2 - --- !ex_tb21_6 -- -1 1 - --- !ex_tb21_7 -- -2 1 - --- !ex_tb21_8 -- -2 2 - --- !information_schema -- -processlist - --- !dt -- -2023-06-17T10:00 2023-06-17T10:00:01 2023-06-17T10:00:02 2023-06-17T10:00:03 2023-06-17T10:00:04 2023-06-17T10:00:05 2023-06-17T10:00:06 - --- !dt_null -- -\N -0001-01-01T00:00 -2023-06-17T10:00 - --- !test_dz -- -1 \N -2 2022-01-01 -3 0001-01-01 - --- !test_filter_not -- -张三1 11 12345678 123 321312 1999-02-13T00:00 中国 男 0 -张三2 11 12345671 123 321312 1999-02-13T00:00 中国 男 0 -张三3 11 12345673 123 321312 1999-02-13T00:00 中国 男 0 -张三4 11 123456711 123 321312 1999-02-13T00:00 中国 男 0 -张三5 11 1232134567 123 321312 1999-02-13T00:00 中国 男 0 -张三6 11 124314567 123 321312 1999-02-13T00:00 中国 男 0 -张三7 11 123445167 123 321312 1998-02-13T00:00 中国 男 0 - --- !test_insert1 -- -doris1 18 - --- !test_insert2 -- -doris2 19 -doris3 20 - --- !test_insert3 -- -doris2 19 -doris2 19 -doris3 20 -doris3 20 - --- !test_insert4 -- -1 abcHa1.12345 1.123450xkalowadawd 2022-10-01 3.14159 1 2 0 100000 1.2345678 24.000 07:09:51 2022 2022-11-27T07:09:51 2022-11-27T07:09:51 - --- !specified_database_1 -- -doris_test -information_schema -mysql - --- !specified_database_2 -- -doris_test -information_schema -mysql - --- !specified_database_3 -- -DORIS -Doris -doris -information_schema -init_db -mysql -show_test_do_not_modify - --- !specified_database_4 -- -information_schema -mysql - --- !ex_tb1 -- -{"k1":"v1", "k2":"v2"} - --- !mysql_all_types -- -\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !select_insert_all_types -- -\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36.345700 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age":30,"city":"London","name":"Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39.345700 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age":18,"city":"ChongQing","name":"Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age":24,"city":"ChongQing","name":"ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !ctas -- -\N 302 0 502 602 4.14159 0.0 6.14159 \N -124 -302 2013 -402 -502 -602 \N 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 0.0 -7.1400 row2 \N 09:11:09 text2 0xE86F6C6C6F20576F726C67 \N \N 0x2F \N 0x88656C6C9F Value3 -201 301 401 501 601 3.14159 4.1415926 5.14159 1 -123 -301 2012 -401 -501 -601 2012-10-30 2012-10-25T12:05:36 2012-10-25T08:08:08 -4.14145 -5.1400000001 -6.1400 row1 line1 09:09:09 text1 0x48656C6C6F20576F726C64 {"age": 30, "city": "London", "name": "Alice"} Option1,Option3 0x2A 0x48656C6C6F00000000000000 0x48656C6C6F Value2 -202 302 402 502 602 4.14159 5.1415926 6.14159 0 -124 -302 2013 -402 -502 -602 2012-11-01 2012-10-26T02:08:39 2013-10-26T08:09:18 -5.14145 -6.1400000001 -7.1400 row2 line2 09:11:09 text2 0xE86F6C6C6F20576F726C67 {"age": 18, "city": "ChongQing", "name": "Gaoxin"} Option1,Option2 0x2F 0x58676C6C6F00000000000000 0x88656C6C9F Value3 -203 303 403 503 603 7.14159 8.1415926 9.14159 0 \N -402 2017 -602 -902 -1102 2012-11-02 \N 2013-10-27T08:11:18 -5.14145 -6.1400000000001 -7.1400 row3 line3 09:11:09 text3 0xE86F6C6C6F20576F726C67 {"age": 24, "city": "ChongQing", "name": "ChenQi"} Option2 0x2F 0x58676C6C6F00000000000000 \N Value1 - --- !ctas_desc -- -bigint bigint Yes false \N NONE -bigint_u largeint Yes false \N NONE -binary text Yes false \N NONE -bit text Yes false \N NONE -blob text Yes false \N NONE -boolean tinyint Yes false \N NONE -char text Yes false \N NONE -date date Yes false \N NONE -datetime datetime Yes false \N NONE -decimal decimal(12,4) Yes false \N NONE -decimal_u decimal(19,5) Yes false \N NONE -double double Yes false \N NONE -double_u double Yes false \N NONE -enum text Yes false \N NONE -float float Yes false \N NONE -float_u float Yes false \N NONE -int int Yes false \N NONE -int_u bigint Yes false \N NONE -json text Yes false \N NONE -mediumint int Yes false \N NONE -mediumint_u int Yes true \N -set text Yes false \N NONE -smallint smallint Yes false \N NONE -smallint_u int Yes true \N -text text Yes false \N NONE -time text Yes false \N NONE -timestamp datetime Yes false \N NONE -tinyint tinyint Yes false \N NONE -tinyint_u smallint Yes true \N -varbinary text Yes false \N NONE -varchar text Yes false \N NONE -year smallint Yes false \N NONE - --- !mysql_view -- -10086 4294967295 201 - --- !filter1 -- -1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 - --- !filter2 -- -1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 -2 8 9 8 2900.42 1 6 97486621.73 59634489.39 c 3 2 0 a e 7 4 -3 5 7 3 6276.86 8 9 32758730.38 10260499.72 c 8 1 0 d c 9 2 -4 3 7 5 2449.00 6 3 91359059.28 64743145.92 e 7 8 0 b d 8 4 -5 6 4 5 9137.82 2 7 26526675.70 90098303.36 a 6 7 0 d e 4 1 -6 3 6 8 7601.25 4 9 49117098.47 46499188.80 c 3 3 0 c d 4 8 -7 3 2 8 5297.81 9 3 23753694.20 96930000.64 c 7 2 0 b e 1 5 -8 3 6 7 3683.85 5 7 26056250.91 1127755.43 b 7 6 0 d b 4 7 -9 3 9 1 4785.38 1 5 95199488.12 94869703.42 a 4 4 0 c d 2 4 - --- !filter3 -- -1 6 1 1 2099.18 3 8 1554296.82 68781940.49 d 8 5 0 d a 7 9 - --- !date_trunc -- -2023-06-17T10:00 - --- !money_format -- -1 - --- !date_add_year -- -2 2022-01-01 - --- !date_add_month -- -2 2022-01-01 - --- !date_add_week -- -2 2022-01-01 - --- !date_add_day -- -2 2022-01-01 - --- !date_add_hour -- -2 2022-01-01 - --- !date_add_min -- -2 2022-01-01 - --- !date_add_sec -- -2 2022-01-01 - --- !date_sub_year -- -2 2022-01-01 - --- !date_sub_month -- -2 2022-01-01 - --- !date_sub_week -- -2 2022-01-01 - --- !date_sub_day -- -2 2022-01-01 - --- !date_sub_hour -- -2 2022-01-01 - --- !date_sub_min -- -2 2022-01-01 - --- !date_sub_sec -- -2 2022-01-01 - --- !auto_default_t1 -- -0 - --- !auto_default_t2 -- -0 - --- !sql -- -doris_1 -doris_2 -doris_3 -doris_test -information_schema -init_db -mysql -show_test_do_not_modify - --- !sql -- -doris_1 -doris_2 -doris_3 - --- !sql -- -DORIS - --- !sql -- -Doris - --- !sql -- -doris - diff --git a/regression-test/data/external_table_p0/jdbc/test_pg_jdbc_catalog.out b/regression-test/data/external_table_p0/jdbc/test_pg_jdbc_catalog.out index 892d6a8e38284b..1950235745749e 100644 --- a/regression-test/data/external_table_p0/jdbc/test_pg_jdbc_catalog.out +++ b/regression-test/data/external_table_p0/jdbc/test_pg_jdbc_catalog.out @@ -2130,9 +2130,6 @@ true abc def 2022-10-11 1.234 1 2 99 2022-10-22T10:59:59 34.123 -- !filter4 -- 234 bcd --- !filter4_old -- -234 bcd - -- !test12 -- 1 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:aa 1010101010 01010 1 {"id": 1} 2 false 12.123456 10.16.10.14/32 10.16.10.14 ff:ff:ff:ff:ff:ff 0000001010 0000001010 2 {"id": 1} diff --git a/regression-test/data/fault_injection_p0/test_fix_tablet_stat_fault_injection.out b/regression-test/data/fault_injection_p0/test_fix_tablet_stat_fault_injection.out new file mode 100644 index 00000000000000..a9db9fa716ed0d --- /dev/null +++ b/regression-test/data/fault_injection_p0/test_fix_tablet_stat_fault_injection.out @@ -0,0 +1,13 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_1 -- +test_fix_tablet_stat_fault_injection test_fix_tablet_stat_fault_injection 518.911 KB 1000 500 0.000 + Total 518.911 KB 1000 0.000 + +-- !select_2 -- +test_fix_tablet_stat_fault_injection test_fix_tablet_stat_fault_injection 9.314 GB 1000 100 0.000 + Total 9.314 GB 1000 0.000 + +-- !select_3 -- +test_fix_tablet_stat_fault_injection test_fix_tablet_stat_fault_injection 114.974 KB 1000 100 0.000 + Total 114.974 KB 1000 0.000 + diff --git a/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create.out b/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create.out new file mode 100644 index 00000000000000..594c0cfabde723 --- /dev/null +++ b/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create.out @@ -0,0 +1,24 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !origin -- +1234567 +Beijing +Shanghai +list +xxx + +-- !0 -- +SHANGHAI +zzz + +-- !1 -- +zzz2 + +-- !2 -- +1234567 +BEIJING +Shanghai +abcd +list +xxx +zzz2 + diff --git a/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create_many.out b/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create_many.out new file mode 100644 index 00000000000000..b52a4ecbc1ae9e --- /dev/null +++ b/regression-test/data/insert_overwrite_p0/test_iot_overwrite_and_create_many.out @@ -0,0 +1,15 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !sql1 -- +1234567 1 +Beijing 20000 +Shanghai 20000 +list 1 +xxx 1 +zzz 20000 + +-- !sql2 -- +Beijing 20000 +Shanghai 20000 +yyy 20000 +zzz 20000 + diff --git a/regression-test/data/inverted_index_p0/array_contains/arr_null_test_data.csv b/regression-test/data/inverted_index_p0/array_contains/arr_null_test_data.csv new file mode 100644 index 00000000000000..2258751c9315d7 --- /dev/null +++ b/regression-test/data/inverted_index_p0/array_contains/arr_null_test_data.csv @@ -0,0 +1,200 @@ +0|NULL|NULL +1|[]|NULL +2|NULL|[] +3|[null, 'None', null, null, 'None', 'text5', null]|[] +4|[null, 'None', 'None', null, 'text4', null, 'None']|NULL +5|NULL|NULL +6|NULL|[] +7|NULL|NULL +8|[null, 'None', 'None', null, 'text4', 'None', null, 'text7', null]|[] +9|NULL|[] +10|['None', 'text1', 'None', 'text3', 'text4', null, 'text6', null]|['text0', 'None', null, 'text3', 'None'] +11|[null, 'None', null, 'None', null, 'text5', null, null, 'text8']|[null, null, null, 'text3', null, 'text5', null] +12|[]|['text0', 'text1', null, 'text3', 'text4', 'None', 'text6', null] +13|[]|[] +14|NULL|[null, null, null, 'None', 'None'] +15|NULL|NULL +16|NULL|NULL +17|NULL|['text0', null, null, null, 'text4', 'text5', null, null] +18|[null, 'None', 'None', 'None', 'None', null, 'text6', 'text7', null, null]|NULL +19|[]|[] +20|NULL|[] +21|NULL|NULL +22|NULL|NULL +23|NULL|NULL +24|NULL|[] +25|NULL|[] +26|NULL|['text0', 'text1', 'text2', 'text3', null, null, null] +27|[]|NULL +28|NULL|NULL +29|NULL|[] +30|[]|[null, null, null, 'text3', 'None', 'None', null, null, null, null] +31|NULL|NULL +32|NULL|NULL +33|[]|[] +34|['None', null, null, 'text3', 'None', null, null, null, 'text8']|[null, 'None', 'None', 'None', 'None', 'None', null] +35|[]|NULL +36|NULL|[null, 'None', null, 'text3', 'text4', null, 'text6', null, 'None', 'text9'] +37|['text0', null, 'text2', null, 'text4', 'text5']|NULL +38|[]|[null, null, null, 'None', 'text4'] +39|[]|['text0', 'text1', null, 'None', 'None'] +40|NULL|['None', null, 'text2', 'text3', 'text4', 'None', 'None', null, null, null] +41|[null, 'text1', 'text2', 'None', 'None', 'None', null, null, null]|[null, 'None', null, null, 'text4', 'text5', null, 'text7', null, 'None'] +42|NULL|NULL +43|NULL|NULL +44|NULL|[] +45|['None', null, null, 'None', null, 'None', 'None', 'None']|[] +46|[null, null, 'text2', null, null, null, 'text6', null, 'text8']|NULL +47|NULL|NULL +48|NULL|[] +49|NULL|NULL +50|['text0', null, 'None', null, null, null, null, 'None']|[] +51|NULL|NULL +52|NULL|['text0', 'None', 'None', 'None', 'text4'] +53|[]|NULL +54|['text0', 'None', 'text2', null, 'None', 'None', null, null]|NULL +55|['None', null, null, null, null, 'None', null]|[] +56|NULL|[] +57|['None', 'text1', null, null, null, null, 'None', null]|[null, null, 'None', 'text3', 'None', null, 'None', 'None'] +58|NULL|[] +59|NULL|NULL +60|[]|NULL +61|[null, 'text1', null, 'None', 'text4', null]|[] +62|NULL|NULL +63|[null, null, 'text2', null, 'None', null, null, null, null]|[null, 'text1', 'None', 'text3', null, null, 'text6'] +64|['text0', null, null, null, null, 'text5']|NULL +65|NULL|['text0', 'None', 'None', 'None', null, 'None', null, 'None', 'text8', 'None'] +66|NULL|NULL +67|[]|NULL +68|['None', null, null, 'text3', 'None', null, 'text6', 'None']|NULL +69|NULL|['text0', null, null, 'None', 'text4', 'text5', null] +70|['text0', null, null, null, 'text4']|[] +71|NULL|[null, 'text1', null, null, 'text4', 'text5'] +72|NULL|[] +73|NULL|[] +74|[null, 'None', null, 'None', null, null]|[null, 'text1', 'None', null, 'text4', null] +75|NULL|NULL +76|[]|[] +77|[]|[] +78|[null, null, 'None', 'None', null, null]|['None', 'text1', null, 'None', null, null, null, 'None', null] +79|NULL|[] +80|[]|[] +81|[]|NULL +82|NULL|NULL +83|['None', 'None', null, 'None', 'None', 'text5', null, null, null, 'text9']|['None', null, 'None', null, 'text4', 'None', 'None', 'text7', 'None'] +84|NULL|['None', 'None', 'text2', null, 'None', null, null, 'None'] +85|NULL|[null, null, 'text2', 'text3', null, 'text5'] +86|[]|['text0', null, null, 'text3', null, 'text5'] +87|NULL|['None', null, null, null, 'None'] +88|NULL|[] +89|[null, 'None', 'None', null, 'text4', null, 'text6']|[] +90|NULL|['None', null, 'None', null, 'None', 'text5', 'text6', 'text7', null] +91|NULL|[null, 'None', null, 'text3', null, null, 'text6', 'text7', 'None'] +92|NULL|['None', 'None', 'None', null, null, 'text5', null, null] +93|[]|[] +94|[null, null, 'None', 'text3', 'text4', null]|[] +95|NULL|[null, 'None', 'None', null, 'None', null, null, 'None'] +96|NULL|NULL +97|[]|NULL +98|[]|NULL +99|['text0', 'text1', null, 'text3', 'text4', null, null, null, null, 'None']|['None', 'text1', 'text2', 'text3', null, null, 'None', null] +100|NULL|NULL +101|NULL|[null, null, null, null, 'text4', null, 'None', 'None'] +102|[null, 'None', 'None', 'text3', 'None', null, null, null, null]|['None', null, null, 'text3', null] +103|NULL|NULL +104|NULL|[null, 'None', 'text2', null, 'None', null] +105|[null, null, 'text2', 'text3', null]|NULL +106|NULL|[] +107|NULL|['text0', null, null, null, 'None'] +108|NULL|NULL +109|NULL|[] +110|[null, 'text1', 'None', null, 'None', null, 'text6', 'text7']|[] +111|[null, null, 'None', 'text3', null, 'None']|[null, 'text1', null, null, 'None', null] +112|NULL|[null, 'None', null, null, 'text4', 'text5', 'text6', 'None'] +113|NULL|[] +114|NULL|['None', null, null, null, null, 'text5'] +115|[null, null, 'None', 'text3', null]|['text0', 'text1', null, null, null] +116|NULL|NULL +117|NULL|[] +118|['text0', null, null, null, 'None', 'text5', 'text6', 'None', 'text8']|NULL +119|[null, 'text1', 'text2', 'None', null, null, 'text6', 'None']|NULL +120|[null, 'None', null, 'None', null, null, null, null, null]|['text0', 'text1', 'None', 'None', null, 'None', null, null] +121|NULL|[null, null, 'text2', null, 'None'] +122|NULL|[] +123|['None', 'text1', 'None', 'None', null, 'text5']|[null, 'None', null, null, 'None', 'None', null] +124|[]|[null, 'None', null, null, null, null] +125|NULL|NULL +126|NULL|[] +127|NULL|NULL +128|NULL|[] +129|['text0', 'None', 'None', 'text3', null, 'text5']|['None', 'text1', 'text2', 'None', null, null, 'None', null, 'None'] +130|NULL|NULL +131|['text0', 'text1', 'text2', 'None', null, null, 'None']|NULL +132|NULL|NULL +133|NULL|NULL +134|[]|[] +135|NULL|NULL +136|NULL|[null, null, null, null, null, 'None', 'text6', null] +137|[]|NULL +138|NULL|[] +139|NULL|['text0', null, 'text2', null, 'text4', null, null, 'None', 'text8', null] +140|[]|[] +141|[]|[] +142|NULL|NULL +143|NULL|NULL +144|[]|['None', 'text1', null, null, 'text4', null, 'text6', null, null, 'None'] +145|[]|NULL +146|[]|['text0', 'None', null, null, null, 'None', null] +147|NULL|NULL +148|['None', 'None', null, 'None', 'text4', 'None']|['None', null, null, null, 'None'] +149|NULL|['None', null, null, 'None', 'None', null, null, 'text7'] +150|['text0', 'text1', 'None', 'text3', 'text4', 'None']|[] +151|[]|[] +152|NULL|NULL +153|NULL|NULL +154|[null, 'text1', null, null, null, 'None', 'None', 'text7']|[null, null, null, null, 'text4', 'text5', 'text6'] +155|NULL|['None', 'text1', 'text2', 'text3', 'None', null, 'None', 'text7', 'text8', 'None'] +156|[null, 'None', null, 'text3', null, 'text5', 'text6', null]|NULL +157|NULL|['None', 'None', 'text2', null, null] +158|[]|[] +159|NULL|[] +160|NULL|NULL +161|[]|[] +162|[null, 'None', null, 'None', 'text4']|NULL +163|NULL|NULL +164|['None', 'None', 'None', null, null, null, 'text6', null, null]|NULL +165|[]|NULL +166|NULL|[] +167|NULL|NULL +168|[]|[] +169|[]|['text0', 'text1', null, null, 'text4', null] +170|NULL|[null, null, 'None', 'None', 'None', 'text5', 'None', null, 'None', 'None'] +171|NULL|NULL +172|[]|NULL +173|NULL|[] +174|['None', 'text1', null, 'None', 'text4', null, null, 'None', 'text8']|[] +175|NULL|NULL +176|[]|NULL +177|NULL|[] +178|[]|[] +179|NULL|NULL +180|[null, null, null, null, 'text4', null, null, null, 'text8']|NULL +181|[]|[null, 'None', null, null, 'text4', null, 'text6', 'None'] +182|NULL|['None', 'text1', null, 'None', 'None', null, 'None', 'text7'] +183|NULL|[null, null, 'None', 'text3', null, 'None', null, 'None', null, null] +184|[null, 'None', null, 'None', null, 'text5', 'None', 'text7']|NULL +185|[]|NULL +186|NULL|[null, null, null, 'text3', 'text4'] +187|[null, null, 'None', 'text3', 'text4']|NULL +188|[]|[] +189|[null, null, 'text2', null, 'None', null, null, 'None', 'text8']|NULL +190|['None', null, 'None', null, 'None', 'text5', 'None', null, 'text8', null]|[] +191|NULL|NULL +192|NULL|NULL +193|['None', null, 'text2', 'None', null]|NULL +194|NULL|[] +195|[]|NULL +196|NULL|NULL +197|[null, 'text1', 'text2', 'None', 'text4', null, null]|[null, 'None', 'None', 'None', null, null, null, 'text7', null, null] +198|NULL|NULL +199|[]|[null, 'None', 'None', null, null, null, null, null, null, 'text9'] diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out index 16316c630a5d62..d8cf8455c334a3 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out index 61fba77c876be0..0d4af14e34d2cc 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,16 +87,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF11 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +110,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF14 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +133,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF18 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out index 57c89d80df9807..ee0be517b75e68 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query71.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query71.out index 0df4b3fff81a7f..fc0e6dc12ce55e 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query71.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out index 8b171914ebd371..30e95b3fd06a84 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_hint_tpcds_p0/shape/query76.out b/regression-test/data/nereids_hint_tpcds_p0/shape/query76.out index b612ba67e96ec1..473b9fded85715 100644 --- a/regression-test/data/nereids_hint_tpcds_p0/shape/query76.out +++ b/regression-test/data/nereids_hint_tpcds_p0/shape/query76.out @@ -7,40 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_customer_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] ---------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] ------------------------PhysicalProject ---------------------------PhysicalOlapScan[item] apply RFs: RF2 +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] ------------------------PhysicalProject ---------------------------filter(ws_promo_sk IS NULL) -----------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 ---------------------PhysicalProject -----------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_promo_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_bill_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_bill_customer_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_p0/dist_expr_list/dist_expr_list.out b/regression-test/data/nereids_p0/dist_expr_list/dist_expr_list.out new file mode 100644 index 00000000000000..b0ee877195aef8 --- /dev/null +++ b/regression-test/data/nereids_p0/dist_expr_list/dist_expr_list.out @@ -0,0 +1,16 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !shuffle -- +2 2 2 2 1 2 2 2.00000000 4.00000000 109.20 109.20 109.20 1 +2 2 2 2 1 2 2 6.00000000 4.00000000 109.20 109.20 109.20 1 +2 2 2 2 3 2 2 2.00000000 4.00000000 109.20 109.20 109.20 1 +2 2 2 2 3 2 2 6.00000000 4.00000000 109.20 109.20 109.20 1 +3 3 3 3 1 3 3 3.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 1 3 3 6.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 1 3 3 9.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 2 3 3 3.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 2 3 3 6.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 2 3 3 9.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 3 3 3 3.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 3 3 3 6.00000000 9.00000000 99.50 99.50 99.50 1 +3 3 3 3 3 3 3 9.00000000 9.00000000 99.50 99.50 99.50 1 + diff --git a/regression-test/data/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.out b/regression-test/data/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.out new file mode 100644 index 00000000000000..10820e9ee48414 --- /dev/null +++ b/regression-test/data/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.out @@ -0,0 +1,306 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !basic_join_union -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !three_way_union -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +------PhysicalOlapScan[table_d] +----PhysicalOlapScan[table_a] + +-- !union_with_projections -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !union_with_constants -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !union_with_loss_slots -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !different_join_conditions -- +PhysicalResultSink +--PhysicalUnion +----hashJoin[INNER_JOIN] hashCondition=((a.id = b.id)) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_b] +----hashJoin[INNER_JOIN] hashCondition=((a.name = c.name)) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_c] + +-- !multi_column_join -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id) and (name = name)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !left_joins -- +PhysicalResultSink +--PhysicalUnion +----hashJoin[LEFT_OUTER_JOIN] hashCondition=((a.id = b.id)) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_b] +----hashJoin[LEFT_OUTER_JOIN] hashCondition=((a.id = c.id)) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_c] + +-- !subquery_join -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------hashAgg[LOCAL] +--------PhysicalOlapScan[table_b] +------hashAgg[LOCAL] +--------PhysicalOlapScan[table_c] +----PhysicalOlapScan[table_a] + +-- !complex_join_condition1 -- +PhysicalResultSink +--PhysicalUnion +----hashJoin[INNER_JOIN] hashCondition=((expr_cast(id as BIGINT) = expr_(cast(id as BIGINT) - 1))) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_b] +----hashJoin[INNER_JOIN] hashCondition=((expr_cast(id as BIGINT) = expr_(cast(id as BIGINT) - 1))) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_c] + +-- !complex_join_condition2 -- +PhysicalResultSink +--PhysicalUnion +----hashJoin[INNER_JOIN] hashCondition=((expr_cast(id as BIGINT) = expr_(cast(id as BIGINT) - 1))) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_b] +----hashJoin[INNER_JOIN] hashCondition=((expr_cast(id as DOUBLE) = expr_(cast(id as DOUBLE) - 1.0))) otherCondition=() +------PhysicalOlapScan[table_a] +------PhysicalOlapScan[table_c] + +-- !union_filter1 -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------filter((b.id = 1)) +--------PhysicalOlapScan[table_b] +------filter((c.id = 1)) +--------PhysicalOlapScan[table_c] +----filter((a.id = 1)) +------PhysicalOlapScan[table_a] + +-- !union_filter2 -- +PhysicalResultSink +--hashJoin[INNER_JOIN] hashCondition=((id = id)) otherCondition=() +----PhysicalUnion +------PhysicalOlapScan[table_b] +------PhysicalOlapScan[table_c] +----filter((cast(value as DOUBLE) = 1.0)) +------PhysicalOlapScan[table_a] + +-- !basic_join_union_res -- +1 Alice Value_B1 +1 Alice Value_C1 +2 Bob Value_B2 +3 Charlie Value_C3 + +-- !three_way_union_res -- +1 Alice Value_B1 +1 Alice Value_C1 +1 Alice Value_D1 +2 Bob Value_B2 +2 Bob Value_D2 +3 Charlie Value_C3 +3 Charlie Value_D3 + +-- !union_with_projections_res -- +1 Alice VALUE_B1 +1 Alice value_c1 +2 Bob VALUE_B2 +3 Charlie value_c3 + +-- !union_with_constants_res -- +1 Alice Value_B1 B +1 Alice Value_C1 C +2 Bob Value_B2 B +3 Charlie Value_C3 C + +-- !union_with_loss_slots_res -- +1 +1 +2 +3 + +-- !different_join_conditions_res -- +1 Alice Value_B1 +1 Alice Value_C1 +2 Bob Value_B2 +3 Charlie Value_C3 + +-- !multi_column_join_res -- +1 Alice Value_B1 +1 Alice Value_C1 +2 Bob Value_B2 +3 Charlie Value_C3 + +-- !left_joins_res -- +1 Alice Value_B1 +1 Alice Value_C1 +2 Bob \N +2 Bob Value_B2 +3 Charlie \N +3 Charlie Value_C3 +5 Eva \N +5 Eva \N + +-- !subquery_join_res -- +1 Alice Value_B1 +1 Alice Value_C1 +2 Bob Value_B2 +3 Charlie Value_C3 + +-- !complex_join_condition1_res -- +1 Alice Value_B2 +2 Bob Value_C3 +3 Charlie Value_B4 +3 Charlie Value_C4 +5 Eva Value_B6 + +-- !complex_join_condition2_res -- +1 Alice Value_B2 +2 Bob Value_C3 +3 Charlie Value_B4 +3 Charlie Value_C4 +5 Eva Value_B6 + +-- !union_filter1_res -- +1 Alice Value_B1 B +1 Alice Value_C1 C + +-- !union_filter2_res -- + +-- !expr -- +1 4 +34 25 +34 35 +34 35 +34 35 +79 25 +101 5 + +-- !const -- +1 2 +34 2 +34 3 +34 3 +34 3 +79 2 +101 2 + +-- !multi_condition -- +\N 2 +\N 2 +\N 2 +1 2 +2 3 +11 2 + +-- !multi_condition2 -- +1 2 +2 3 +11 2 + +-- !multi_differenct_count_condition -- + +-- !no_common_side_project -- +1 4 +34 25 +34 35 +34 35 +34 35 +79 25 +101 5 + +-- !common_slot_differnt -- +34 5 +34 25 +34 101 + +-- !other_expr_differnt -- +1 4 +34 25 +34 103 +34 123 +34 199 +79 25 +101 5 + +-- !2_same_tables -- +0 0 +0 1 +33 1 +33 33 +78 1 +78 78 +100 1 +100 100 + +-- !simple_column -- +0 0 +0 0 +33 33 +33 33 +78 78 +78 78 +100 100 +100 100 + +-- !func_column -- +1 2 +34 2 +34 3 +34 3 +34 3 +79 2 +101 2 + +-- !other_join_slot_differnt -- +34 +34 +34 + +-- !join_common_slot_has_expr -- + +-- !can_not_transform -- +0 2 +2 0 +3 100 +23 33 +23 78 +33 23 +78 23 +100 3 + +-- !other_side_condition_slot_has_expr_do_transform -- +13 + diff --git a/regression-test/data/nereids_syntax_p0/test_limit.out b/regression-test/data/nereids_syntax_p0/test_limit.out deleted file mode 100644 index 3e83144c18b7bd..00000000000000 --- a/regression-test/data/nereids_syntax_p0/test_limit.out +++ /dev/null @@ -1,10 +0,0 @@ --- This file is automatically generated. You should know what you did if you want to edit this --- !limit1 -- -2 7844 TURNER SALESMAN 7698 1981-09-08 1500.0 0.0 30 - --- !lmit2 -- -3 7934 MILLER CLERK 7782 1982-01-23 1300.0 0.0 10 - --- !lmit3 -- -3 7934 MILLER CLERK 7782 1982-01-23 1300.0 0.0 10 - diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out index 5593df9194937d..35504b7f44d24e 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out index 813924b9b99e18..61f29b11211346 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,16 +87,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF11 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +110,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF14 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +133,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF18 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out index d99f0294700040..709da33d851bff 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query71.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query71.out index 068d0b83b15427..0d26e1f81ccb94 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out index 8b171914ebd371..30e95b3fd06a84 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query76.out b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query76.out index b612ba67e96ec1..473b9fded85715 100644 --- a/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf1000_p0/shape/query76.out @@ -7,40 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_customer_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalProject -------------------hashJoin[INNER_JOIN shuffle] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] ---------------------PhysicalProject -----------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] ------------------------PhysicalProject ---------------------------PhysicalOlapScan[item] apply RFs: RF2 +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] ------------------------PhysicalProject ---------------------------filter(ws_promo_sk IS NULL) -----------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 ---------------------PhysicalProject -----------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_promo_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_bill_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_bill_customer_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out index a30f38dbe4c49a..8cab83d94f65ac 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out index d08a6aedb094e3..d817c6f0053791 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,12 +87,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -120,12 +110,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF17 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -143,12 +133,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF20 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out index 19e9098ee4555e..a083e5a72ef86a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query71.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query71.out index 4af8cfcf0e3ea9..7ae1c5b71ddaf1 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out index d8a82ca998ac09..6915274e1a1301 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query76.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query76.out index 1bbcdaee7491b7..2f21640b079929 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/noStatsRfPrune/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out index eaf1cde7b0a304..e963fc6a8caa7a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out index 04bf133065b269..e73d45b0732736 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,13 +87,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF12 ss_item_sk->[ss_item_sk] +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF11 i_item_sk->[ss_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF10 ss_item_sk->[ss_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF11 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -120,13 +110,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[cs_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF15 ss_item_sk->[cs_item_sk] +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[cs_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[cs_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF16 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -143,13 +133,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[ss_item_sk,ws_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF18 ss_item_sk->[ws_item_sk] +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[ws_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF19 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out index 546119842b58bd..9a590246f64a4a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query71.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query71.out index 4af8cfcf0e3ea9..7ae1c5b71ddaf1 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out index 64a56e4e850db7..c32a9187e34e92 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query76.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query76.out index 768d8a9e8dcf7f..8f739a1d12b35a 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/no_stats_shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[ws_item_sk] +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF1 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[ws_item_sk] ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF2 RF3 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out index b699aa67e934a0..e7ae73f8e00980 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out index 9d7cfc860ad6a4..48ac240d961d98 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -100,9 +90,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] @@ -123,9 +113,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] @@ -146,9 +136,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out index 43138dc7c62651..980ceef87cedc2 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query71.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query71.out index b78dcd1d31e61d..3010f0b574e03b 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out index f300a896a4d563..c3687dadd21872 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query76.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query76.out index 0e72c30a539c8c..668c3625c56841 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/rf_prune/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ss_sold_date_sk->[d_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 ss_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF1 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF0 ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalOlapScan[date_dim] apply RFs: RF3 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ws_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF3 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF2 ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 cs_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF5 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF4 ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF0 +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 cs_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF2 +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out index bcfe7ba3d74e79..8abb7de87e97f9 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out index 8a1467be7a4a58..5aad6142d951f9 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,16 +87,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF11 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +110,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] +--------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF14 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +133,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] +--------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF18 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out index d99f0294700040..709da33d851bff 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query71.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query71.out index b78dcd1d31e61d..3010f0b574e03b 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out index 0f159a647c03de..421b74396da876 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query76.out b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query76.out index 0e72c30a539c8c..668c3625c56841 100644 --- a/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf100_p0/shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ss_sold_date_sk->[d_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 ss_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF1 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF0 ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalOlapScan[date_dim] apply RFs: RF3 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ws_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF3 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF2 ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 cs_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF5 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF4 ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF0 +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 cs_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF2 +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] diff --git a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query14.out b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query14.out index 299549d2b2ce02..9d5c47615cb77c 100644 --- a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query14.out +++ b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,13 +87,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF12 ss_item_sk->[ss_item_sk] +------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF11 i_item_sk->[ss_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF10 ss_item_sk->[ss_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF11 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -120,13 +110,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[cs_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF15 ss_item_sk->[cs_item_sk] +------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[cs_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[cs_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF16 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -143,13 +133,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[ss_item_sk,ws_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF18 ss_item_sk->[ws_item_sk] +------------------------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ss_item_sk,ws_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN shuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[ws_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF19 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject diff --git a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query71.out b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query71.out index b2717237b73c8b..b76b7d6566e595 100644 --- a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query71.out +++ b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2000)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2000)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2000)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2000)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query76.out b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query76.out index d929aca4ca8137..47d1baebaf8c15 100644 --- a/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query76.out +++ b/regression-test/data/nereids_tpcds_shape_sf10t_orc/shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_customer_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[ws_item_sk] +----------------------------PhysicalProject +------------------------------filter(ws_ship_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF1 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_ship_mode_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[ws_item_sk] ---------------------------PhysicalProject -----------------------------filter(ws_ship_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF2 RF3 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_ship_mode_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out index a30f38dbe4c49a..8cab83d94f65ac 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out index 1c17f5a78a9ca3..10192bf86cb782 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,12 +87,12 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 ----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] @@ -120,13 +110,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() --------------------------------------------PhysicalProject ----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF17 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 ------------------------------------------------PhysicalProject --------------------------------------------------PhysicalOlapScan[item] --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) @@ -144,13 +134,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject ------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() --------------------------------------------PhysicalProject ----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF20 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 ------------------------------------------------PhysicalProject --------------------------------------------------PhysicalOlapScan[item] --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out index 19e9098ee4555e..a083e5a72ef86a 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query71.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query71.out index 4af8cfcf0e3ea9..7ae1c5b71ddaf1 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query71.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out index d8a82ca998ac09..6915274e1a1301 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query76.out b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query76.out index 1bbcdaee7491b7..2f21640b079929 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query76.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/noStatsRfPrune/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out index eaf1cde7b0a304..e963fc6a8caa7a 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out index e4f277daf67cac..966f8701126465 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,13 +87,13 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF14 d_date_sk->[ss_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[ss_item_sk,ss_item_sk] ---------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF12 ss_item_sk->[ss_item_sk] +------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF11 i_item_sk->[ss_item_sk,ss_item_sk] +--------------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF10 ss_item_sk->[ss_item_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 -----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF13 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 +----------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF11 --------------------------------------------PhysicalProject ----------------------------------------------PhysicalOlapScan[item] ----------------------------------------PhysicalProject @@ -120,15 +110,15 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF17 d_date_sk->[cs_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk,i_item_sk] +------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF14 ss_item_sk->[cs_item_sk,i_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk] +----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF13 i_item_sk->[cs_item_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[item] apply RFs: RF16 +--------------------------------------------------PhysicalOlapScan[item] apply RFs: RF14 --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) @@ -144,15 +134,15 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF20 d_date_sk->[ws_sold_date_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[i_item_sk,ws_item_sk] +------------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[i_item_sk,ws_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ws_item_sk] +----------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[ws_item_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[item] apply RFs: RF19 +--------------------------------------------------PhysicalOlapScan[item] apply RFs: RF17 --------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out index 546119842b58bd..9a590246f64a4a 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query71.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query71.out index 4af8cfcf0e3ea9..7ae1c5b71ddaf1 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query71.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query71.out @@ -9,34 +9,25 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +--------------------------------PhysicalOlapScan[date_dim] ------------------------PhysicalProject --------------------------filter((item.i_manager_id = 1)) ----------------------------PhysicalOlapScan[item] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out index 64a56e4e850db7..c32a9187e34e92 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN broadcast] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query76.out b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query76.out index 768d8a9e8dcf7f..8f739a1d12b35a 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query76.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/no_stats_shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 i_item_sk->[ws_item_sk] +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF1 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[ws_item_sk] ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF2 RF3 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out index b699aa67e934a0..e7ae73f8e00980 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out index 361e56fa135dab..2a29746e37ef07 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -100,9 +90,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() ----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] @@ -124,9 +114,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] @@ -148,9 +138,9 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) ----------------------------------------PhysicalProject ------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out index 43138dc7c62651..980ceef87cedc2 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query71.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query71.out index b78dcd1d31e61d..3010f0b574e03b 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query71.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out index f300a896a4d563..c3687dadd21872 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query76.out b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query76.out index 0e72c30a539c8c..668c3625c56841 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query76.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/rf_prune/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ss_sold_date_sk->[d_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 ss_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF1 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF0 ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalOlapScan[date_dim] apply RFs: RF3 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ws_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF3 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF2 ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 cs_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF5 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF4 ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF0 +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 cs_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF2 +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out index bcfe7ba3d74e79..8abb7de87e97f9 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out index d54d10fc56004f..196a98b5a2f51d 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2002) and (date_dim.d_year >= 2000)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,16 +87,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF11 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,18 +110,18 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[cs_item_sk,i_item_sk] +--------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF15 ss_item_sk->[cs_item_sk,i_item_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF16 i_item_sk->[cs_item_sk] +------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[cs_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +--------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] --------------------------------------------PhysicalProject -----------------------------------------------PhysicalOlapScan[item] apply RFs: RF17 +----------------------------------------------PhysicalOlapScan[item] apply RFs: RF15 ----------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------PhysicalProject ------------------------------PhysicalAssertNumRows @@ -144,18 +134,18 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF20 ss_item_sk->[i_item_sk,ws_item_sk] +--------------------------------------hashJoin[LEFT_SEMI_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF18 ss_item_sk->[i_item_sk,ws_item_sk] ----------------------------------------PhysicalProject -------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF19 i_item_sk->[ws_item_sk] +------------------------------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[ws_item_sk] --------------------------------------------PhysicalProject -----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +----------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ------------------------------------------------PhysicalProject ---------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +--------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ------------------------------------------------PhysicalProject --------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2002)) ----------------------------------------------------PhysicalOlapScan[date_dim] --------------------------------------------PhysicalProject -----------------------------------------------PhysicalOlapScan[item] apply RFs: RF20 +----------------------------------------------PhysicalOlapScan[item] apply RFs: RF18 ----------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) ----------------------------PhysicalProject ------------------------------PhysicalAssertNumRows diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out index d99f0294700040..709da33d851bff 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query71.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query71.out index b78dcd1d31e61d..3010f0b574e03b 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query71.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 1998)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out index 0f159a647c03de..421b74396da876 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query76.out b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query76.out index 0e72c30a539c8c..668c3625c56841 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query76.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf100/shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ss_sold_date_sk->[d_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 ss_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF1 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF0 ---------------------------PhysicalProject -----------------------------filter(ss_hdemo_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalOlapScan[date_dim] apply RFs: RF3 ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 ws_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF3 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF2 ---------------------------PhysicalProject -----------------------------filter(ws_bill_addr_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 cs_sold_date_sk->[d_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] apply RFs: RF5 -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 cs_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF4 ---------------------------PhysicalProject -----------------------------filter(cs_warehouse_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 ss_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF0 +----------------------------PhysicalProject +------------------------------filter(ss_hdemo_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_bill_addr_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 cs_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF2 +----------------------------PhysicalProject +------------------------------filter(cs_warehouse_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out index 5593df9194937d..35504b7f44d24e 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query11.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out index 813924b9b99e18..61f29b11211346 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query14.out @@ -55,31 +55,21 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------PhysicalDistribute[DistributionSpecGather] ----------hashAgg[LOCAL] ------------PhysicalProject ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[ss_sold_date_sk] +--------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF9 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +----------------PhysicalProject +------------------PhysicalUnion +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject ------------------------PhysicalOlapScan[store_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF10 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF11 d_date_sk->[ws_sold_date_sk] +------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF9 +--------------------PhysicalDistribute[DistributionSpecExecutionAny] ----------------------PhysicalProject -------------------------PhysicalOlapScan[web_sales] apply RFs: RF11 -----------------------PhysicalProject -------------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) ---------------------------PhysicalOlapScan[date_dim] +------------------------PhysicalOlapScan[web_sales] apply RFs: RF9 +----------------PhysicalProject +------------------filter((date_dim.d_year <= 2001) and (date_dim.d_year >= 1999)) +--------------------PhysicalOlapScan[date_dim] ----PhysicalResultSink ------PhysicalTopN[MERGE_SORT] --------PhysicalDistribute[DistributionSpecGather] @@ -97,16 +87,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF14 i_item_sk->[ss_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF13 ss_item_sk->[ss_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF12 i_item_sk->[ss_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF11 ss_item_sk->[ss_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF12 d_date_sk->[ss_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF10 d_date_sk->[ss_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF12 RF13 RF14 +------------------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF10 RF11 RF12 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF14 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF12 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -120,16 +110,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF17 i_item_sk->[cs_item_sk,ss_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF16 ss_item_sk->[cs_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF15 i_item_sk->[cs_item_sk,ss_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF14 ss_item_sk->[cs_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF15 d_date_sk->[cs_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF13 d_date_sk->[cs_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF15 RF16 RF17 +------------------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF13 RF14 RF15 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF17 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF15 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject @@ -143,16 +133,16 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --------------------------------PhysicalDistribute[DistributionSpecHash] ----------------------------------hashAgg[LOCAL] ------------------------------------PhysicalProject ---------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF20 i_item_sk->[ss_item_sk,ws_item_sk] -----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF19 ss_item_sk->[ws_item_sk] +--------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF18 i_item_sk->[ss_item_sk,ws_item_sk] +----------------------------------------hashJoin[LEFT_SEMI_JOIN broadcast] hashCondition=((web_sales.ws_item_sk = cross_items.ss_item_sk)) otherCondition=() build RFs:RF17 ss_item_sk->[ws_item_sk] ------------------------------------------PhysicalProject ---------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF18 d_date_sk->[ws_sold_date_sk] +--------------------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF16 d_date_sk->[ws_sold_date_sk] ----------------------------------------------PhysicalProject -------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF18 RF19 RF20 +------------------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF16 RF17 RF18 ----------------------------------------------PhysicalProject ------------------------------------------------filter((date_dim.d_moy = 11) and (date_dim.d_year = 2001)) --------------------------------------------------PhysicalOlapScan[date_dim] -------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF20 +------------------------------------------PhysicalCteConsumer ( cteId=CTEId#0 ) apply RFs: RF18 ----------------------------------------PhysicalProject ------------------------------------------PhysicalOlapScan[item] ----------------------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out index d99f0294700040..709da33d851bff 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query4.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF3 c_customer_sk->[cs_bill_customer_sk,ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query71.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query71.out index 068d0b83b15427..0d26e1f81ccb94 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query71.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query71.out @@ -9,37 +9,28 @@ PhysicalResultSink ------------PhysicalDistribute[DistributionSpecHash] --------------hashAgg[LOCAL] ----------------PhysicalProject -------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF4 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] +------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.time_sk = time_dim.t_time_sk)) otherCondition=() build RFs:RF2 t_time_sk->[cs_sold_time_sk,ss_sold_time_sk,ws_sold_time_sk] --------------------PhysicalProject -----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF3 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] -------------------------PhysicalUnion ---------------------------PhysicalDistribute[DistributionSpecHash] -----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = web_sales.ws_sold_date_sk)) otherCondition=() build RFs:RF0 d_date_sk->[ws_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +----------------------hashJoin[INNER_JOIN broadcast] hashCondition=((ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((tmp.sold_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[cs_item_sk,ss_item_sk,ws_item_sk] ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = catalog_sales.cs_sold_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[cs_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF1 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] ---------------------------PhysicalDistribute[DistributionSpecHash] +------------------------------PhysicalUnion +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[web_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF0 RF1 RF2 +--------------------------------PhysicalDistribute[DistributionSpecExecutionAny] +----------------------------------PhysicalProject +------------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 RF2 ----------------------------PhysicalProject -------------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((date_dim.d_date_sk = store_sales.ss_sold_date_sk)) otherCondition=() build RFs:RF2 d_date_sk->[ss_sold_date_sk] ---------------------------------PhysicalProject -----------------------------------PhysicalOlapScan[store_sales] apply RFs: RF2 RF3 RF4 ---------------------------------PhysicalProject -----------------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) -------------------------------------PhysicalOlapScan[date_dim] +------------------------------filter((item.i_manager_id = 1)) +--------------------------------PhysicalOlapScan[item] ------------------------PhysicalProject ---------------------------filter((item.i_manager_id = 1)) -----------------------------PhysicalOlapScan[item] +--------------------------filter((date_dim.d_moy = 12) and (date_dim.d_year = 2002)) +----------------------------PhysicalOlapScan[date_dim] --------------------PhysicalProject ----------------------filter(t_meal_time IN ('breakfast', 'dinner')) ------------------------PhysicalOlapScan[time_dim] diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out index 8b171914ebd371..30e95b3fd06a84 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query74.out @@ -3,7 +3,7 @@ PhysicalCteAnchor ( cteId=CTEId#0 ) --PhysicalCteProducer ( cteId=CTEId#0 ) ----PhysicalProject -------hashJoin[INNER_JOIN shuffle] hashCondition=((PULL_UP_UNIFIED_OUTPUT_ALIAS = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] +------hashJoin[INNER_JOIN shuffle] hashCondition=((ss_customer_sk = customer.c_customer_sk)) otherCondition=() build RFs:RF2 c_customer_sk->[ss_customer_sk,ws_bill_customer_sk] --------PhysicalProject ----------PhysicalUnion ------------PhysicalProject diff --git a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query76.out b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query76.out index c81d5d95feb264..473b9fded85715 100644 --- a/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query76.out +++ b/regression-test/data/new_shapes_p0/tpcds_sf1000/shape/query76.out @@ -7,41 +7,34 @@ PhysicalResultSink --------hashAgg[GLOBAL] ----------PhysicalDistribute[DistributionSpecHash] ------------hashAgg[LOCAL] ---------------PhysicalUnion -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------PhysicalProject +----------------hashJoin[INNER_JOIN broadcast] hashCondition=((ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[cs_sold_date_sk,ss_sold_date_sk,ws_sold_date_sk] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF1 d_date_sk->[ss_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] ---------------------------PhysicalProject -----------------------------filter(ss_customer_sk IS NULL) -------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF1 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] +--------------------PhysicalUnion +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((store_sales.ss_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF0 i_item_sk->[ss_item_sk] +----------------------------PhysicalProject +------------------------------filter(ss_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[store_sales] apply RFs: RF0 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF1 ws_item_sk->[i_item_sk] +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] apply RFs: RF1 +----------------------------PhysicalProject +------------------------------filter(ws_promo_sk IS NULL) +--------------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 +----------------------PhysicalDistribute[DistributionSpecExecutionAny] +------------------------PhysicalProject +--------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 i_item_sk->[cs_item_sk] +----------------------------PhysicalProject +------------------------------filter(cs_bill_customer_sk IS NULL) +--------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF2 RF3 +----------------------------PhysicalProject +------------------------------PhysicalOlapScan[item] ------------------PhysicalProject ---------------------hashJoin[INNER_JOIN shuffleBucket] hashCondition=((web_sales.ws_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF3 d_date_sk->[ws_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN bucketShuffle] hashCondition=((web_sales.ws_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF2 ws_item_sk->[i_item_sk] ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] apply RFs: RF2 ---------------------------PhysicalProject -----------------------------filter(ws_promo_sk IS NULL) -------------------------------PhysicalOlapScan[web_sales] apply RFs: RF3 -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] -----------------PhysicalDistribute[DistributionSpecExecutionAny] -------------------PhysicalProject ---------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_sold_date_sk = date_dim.d_date_sk)) otherCondition=() build RFs:RF5 d_date_sk->[cs_sold_date_sk] -----------------------PhysicalProject -------------------------hashJoin[INNER_JOIN broadcast] hashCondition=((catalog_sales.cs_item_sk = item.i_item_sk)) otherCondition=() build RFs:RF4 i_item_sk->[cs_item_sk] ---------------------------PhysicalProject -----------------------------filter(cs_bill_customer_sk IS NULL) -------------------------------PhysicalOlapScan[catalog_sales] apply RFs: RF4 RF5 ---------------------------PhysicalProject -----------------------------PhysicalOlapScan[item] -----------------------PhysicalProject -------------------------PhysicalOlapScan[date_dim] +--------------------PhysicalOlapScan[date_dim] diff --git a/regression-test/data/query_p0/sql_functions/string_functions/test_url_functions.out b/regression-test/data/query_p0/sql_functions/string_functions/test_url_functions.out new file mode 100644 index 00000000000000..ce1ef7179752d5 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/string_functions/test_url_functions.out @@ -0,0 +1,121 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !empty_nullable1 -- + +-- !empty_nullable2 -- + +-- !empty_nullable3 -- + +-- !empty_not_nullable1 -- + +-- !empty_not_nullable2 -- + +-- !empty_not_nullable3 -- + +-- !empty_null1 -- +\N + +-- !empty_null2 -- +\N + +-- !empty_null3 -- +\N + +-- !empty_const1 -- +com + +-- !empty_const2 -- +baidu + +-- !empty_const3 -- +baidu.com + +-- !empty_const4 -- +cn + +-- !empty_const5 -- +google + +-- !empty_const6 -- +google.com.cn + +-- !empty_const7 -- + + +-- !empty_const8 -- + + +-- !empty_const9 -- + + +-- !nullable1 -- +1 www.baidu.com com +10 https://news.clickhouse.com.tr/ tr +2 www.google.com.cn cn +3 invalid url +4 +5 +6 \N \N +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b com +9 https://news.clickhouse.com/ com + +-- !nullable2 -- +1 www.baidu.com baidu +10 https://news.clickhouse.com.tr/ clickhouse +2 www.google.com.cn google +3 invalid url +4 +5 +6 \N \N +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b example +9 https://news.clickhouse.com/ clickhouse + +-- !nullable3 -- +1 www.baidu.com baidu.com +10 https://news.clickhouse.com.tr/ clickhouse.com.tr +2 www.google.com.cn google.com.cn +3 invalid url +4 +5 +6 \N \N +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b example.com +9 https://news.clickhouse.com/ clickhouse.com + +-- !not_nullable1 -- +1 www.baidu.com com +10 https://news.clickhouse.com.tr/ tr +2 www.google.com.cn cn +3 invalid url +4 +5 +6 +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b com +9 https://news.clickhouse.com/ com + +-- !not_nullable2 -- +1 www.baidu.com baidu +10 https://news.clickhouse.com.tr/ clickhouse +2 www.google.com.cn google +3 invalid url +4 +5 +6 +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b example +9 https://news.clickhouse.com/ clickhouse + +-- !not_nullable3 -- +1 www.baidu.com baidu.com +10 https://news.clickhouse.com.tr/ clickhouse.com.tr +2 www.google.com.cn google.com.cn +3 invalid url +4 +5 +6 +7 xxxxxxxx +8 http://www.example.com/a/b/c?a=b example.com +9 https://news.clickhouse.com/ clickhouse.com + diff --git a/regression-test/data/query_p0/sql_functions/window_functions/test_qualify_query.out b/regression-test/data/query_p0/sql_functions/window_functions/test_qualify_query.out new file mode 100644 index 00000000000000..29dac1331067d4 --- /dev/null +++ b/regression-test/data/query_p0/sql_functions/window_functions/test_qualify_query.out @@ -0,0 +1,122 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !select_1 -- +2001 Finland +2001 Finland +2001 India +2001 India +2001 India +2001 USA +2002 Finland +2002 USA +2002 USA +2002 USA +2002 USA +2002 USA + +-- !select_4 -- +2000 USA 1502 1 + +-- !select_5 -- +2000 Finland Computer 1501 1 +2000 India Computer 1201 1 +2000 USA Computer 1502 1 + +-- !select_6 -- +2000 Finland 1501 1 +2000 India 1201 1 +2000 USA 1502 1 +2001 USA 1503 1 + +-- !select_7 -- +2000 India 1201 + +-- !select_8 -- +2000 India 1201 + +-- !select_9 -- +Finland Phone 11 1 + +-- !select_10 -- +Finland Phone 11 + +-- !select_12 -- +2001 India 1201 1 +2001 Finland 1501 1 +2001 usa 1502 1 +2002 usa 1503 1 + +-- !select_13 -- +2001 India 1201 +2001 Finland 1501 +2001 usa 1502 +2002 usa 1503 + +-- !select_14 -- +2000 USA Computer 1502 +2001 USA Computer 1503 + +-- !select_15 -- +2000 USA Computer 1502 1 +2001 USA Computer 1503 1 + +-- !select_16 -- + +-- !select_17 -- + +-- !select_18 -- + +-- !select_19 -- + +-- !select_20 -- + +-- !select_21 -- +2001 Finland 10 1 +2001 USA 50 2 +2000 India 75 3 + +-- !select_22 -- +2001 Finland 10 1 + +-- !select_23 -- + +-- !select_24 -- +2001 Finland 1601 + +-- !select_25 -- +2000 Finland 1501 +2000 India 1201 +2000 USA 1502 +2001 Finland 10 +2001 USA 1503 + +-- !select_26 -- +2002 Finland + +-- !select_27 -- +2002 Finland 2 + +-- !select_28 -- +2002 USA 3006 + +-- !select_29 -- +2002 Finland + +-- !select_30 -- +2001 Finland 1 + +-- !select_31 -- +2001 Finland + +-- !select_32 -- + +-- !select_33 -- +2001 USA 3 + +-- !select_34 -- +2000 India 1 + +-- !select_35 -- +2001 Finland + +-- !select_36 -- +2001 Finland 6 diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy index 4aab3f774d76fe..6dda050fbf34d6 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy @@ -17,16 +17,20 @@ package org.apache.doris.regression.suite -import org.awaitility.Awaitility import static java.util.concurrent.TimeUnit.SECONDS -import groovy.json.JsonOutput + +import com.google.common.base.Strings +import com.google.common.collect.ImmutableList import com.google.common.collect.Maps import com.google.common.util.concurrent.Futures import com.google.common.util.concurrent.ListenableFuture import com.google.common.util.concurrent.MoreExecutors import com.google.gson.Gson +import groovy.json.JsonOutput import groovy.json.JsonSlurper -import com.google.common.collect.ImmutableList +import groovy.util.logging.Slf4j + +import org.awaitility.Awaitility import org.apache.commons.lang3.ObjectUtils import org.apache.doris.regression.Config import org.apache.doris.regression.RegressionTest @@ -53,7 +57,6 @@ import org.jetbrains.annotations.NotNull import org.junit.jupiter.api.Assertions import org.slf4j.Logger import org.slf4j.LoggerFactory -import groovy.util.logging.Slf4j import java.sql.Connection import java.io.File @@ -968,6 +971,31 @@ class Suite implements GroovyInterceptable { Assert.assertEquals(0, code) } + String cmd(String cmd, int timeoutSecond = 0) { + var processBuilder = new ProcessBuilder() + processBuilder.command("/bin/bash", "-c", cmd) + var process = processBuilder.start() + def outBuf = new StringBuilder() + def errBuf = new StringBuilder() + process.consumeProcessOutput(outBuf, errBuf) + var reader = new BufferedReader(new InputStreamReader(process.getInputStream())); + String line + while ((line = reader.readLine()) != null) { + System.out.println(line) + } + // wait until cmd finish + if (timeoutSecond > 0) { + process.waitForOrKill(timeoutSecond * 1000) + } else { + process.waitFor() + } + if (process.exitValue() != 0) { + println outBuf + throw new RuntimeException(errBuf.toString()) + } + return outBuf.toString() + } + void sshExec(String username, String host, String cmd, boolean alert=true) { String command = "ssh ${username}@${host} '${cmd}'" def cmds = ["/bin/bash", "-c", command] @@ -1376,6 +1404,28 @@ class Suite implements GroovyInterceptable { sql "analyze table ${result.last().get(6)}.${mvName} with sync;" } + void waitingMVTaskFinishedByMvName(String dbName, String tableName) { + Thread.sleep(2000) + String showTasks = "SHOW ALTER TABLE MATERIALIZED VIEW from ${dbName} where TableName='${tableName}' ORDER BY CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(8) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status != 'FINISHED')) + if (status != "FINISHED") { + logger.info("status is not success") + } + Assert.assertEquals("FINISHED", status) + } + void waitingPartitionIsExpected(String tableName, String partitionName, boolean expectedStatus) { Thread.sleep(2000); String showPartitions = "show partitions from ${tableName}" @@ -1512,12 +1562,13 @@ class Suite implements GroovyInterceptable { } boolean enableStoragevault() { - boolean ret = false; - if (context.config.metaServiceHttpAddress == null || context.config.metaServiceHttpAddress.isEmpty() || - context.config.instanceId == null || context.config.instanceId.isEmpty() || - context.config.metaServiceToken == null || context.config.metaServiceToken.isEmpty()) { - return ret; + if (Strings.isNullOrEmpty(context.config.metaServiceHttpAddress) + || Strings.isNullOrEmpty(context.config.instanceId) + || Strings.isNullOrEmpty(context.config.metaServiceToken)) { + return false; } + + boolean ret = false; def getInstanceInfo = { check_func -> httpTest { endpoint context.config.metaServiceHttpAddress @@ -2387,6 +2438,27 @@ class Suite implements GroovyInterceptable { } } + def fix_tablet_stats = { table_id -> + def jsonOutput = new JsonOutput() + def map = [] + def js = jsonOutput.toJson(map) + log.info("fix tablet stat req: /MetaService/http/fix_tablet_stats?token=${token}&cloud_unique_id=${instance_id}&table_id=${table_id} ".toString()) + + def fix_tablet_stats_api = { request_body, check_func -> + httpTest { + endpoint context.config.metaServiceHttpAddress + uri "/MetaService/http/fix_tablet_stats?token=${token}&cloud_unique_id=${instance_id}&table_id=${table_id}" + body request_body + check check_func + } + } + + fix_tablet_stats_api.call(js) { + respCode, body -> + log.info("fix tablet stats resp: ${body} ${respCode}".toString()) + } + } + public void resetConnection() { context.resetConnection() } diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy index 159e622f454722..856b0e76956395 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy @@ -615,6 +615,11 @@ class SuiteCluster { } } + void addRWPermToAllFiles() { + def cmd = 'add-rw-perm ' + name + runCmd(cmd) + } + private void waitHbChanged() { // heart beat interval is 5s Thread.sleep(7000) diff --git a/regression-test/pipeline/cloud_p0/conf/be_custom.conf b/regression-test/pipeline/cloud_p0/conf/be_custom.conf index 377a02536c6d29..5fbf89fd75cc77 100644 --- a/regression-test/pipeline/cloud_p0/conf/be_custom.conf +++ b/regression-test/pipeline/cloud_p0/conf/be_custom.conf @@ -37,3 +37,4 @@ enable_new_tablet_do_compaction = true arrow_flight_sql_port = 8181 pipeline_task_leakage_detect_period_sec=1 crash_in_memory_tracker_inaccurate = true +enable_table_size_correctness_check=true diff --git a/regression-test/pipeline/cloud_p1/conf/be_custom.conf b/regression-test/pipeline/cloud_p1/conf/be_custom.conf index 4310441a0ed66b..0d3ae0c526d942 100644 --- a/regression-test/pipeline/cloud_p1/conf/be_custom.conf +++ b/regression-test/pipeline/cloud_p1/conf/be_custom.conf @@ -33,3 +33,4 @@ arrow_flight_sql_port = 8181 pipeline_task_leakage_detect_period_sec=1 crash_in_memory_tracker_inaccurate = true enable_new_tablet_do_compaction = true +enable_table_size_correctness_check=true diff --git a/regression-test/pipeline/external/conf/be.conf b/regression-test/pipeline/external/conf/be.conf index 51b04b353ef238..2f5f207ce7b006 100644 --- a/regression-test/pipeline/external/conf/be.conf +++ b/regression-test/pipeline/external/conf/be.conf @@ -30,7 +30,7 @@ JAVA_OPTS_FOR_JDK_17="-Xmx2048m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc*:$DOR # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile # https://jemalloc.net/jemalloc.3.html -JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" +JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" JEMALLOC_PROF_PRFIX="jemalloc_heap_profile_" # INFO, WARNING, ERROR, FATAL diff --git a/regression-test/pipeline/p0/conf/be.conf b/regression-test/pipeline/p0/conf/be.conf index c5c8104ecf1279..760f813ffebcdc 100644 --- a/regression-test/pipeline/p0/conf/be.conf +++ b/regression-test/pipeline/p0/conf/be.conf @@ -30,7 +30,7 @@ JAVA_OPTS_FOR_JDK_17="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc*:$DOR # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile # https://jemalloc.net/jemalloc.3.html -JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" +JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" JEMALLOC_PROF_PRFIX="jemalloc_heap_profile_" # INFO, WARNING, ERROR, FATAL @@ -71,3 +71,4 @@ be_proc_monitor_interval_ms = 30000 webserver_num_workers = 128 pipeline_task_leakage_detect_period_sec=1 crash_in_memory_tracker_inaccurate = true +enable_table_size_correctness_check=true diff --git a/regression-test/pipeline/p1/conf/be.conf b/regression-test/pipeline/p1/conf/be.conf index 01510e6422b975..1512bce7ac2388 100644 --- a/regression-test/pipeline/p1/conf/be.conf +++ b/regression-test/pipeline/p1/conf/be.conf @@ -30,7 +30,7 @@ JAVA_OPTS_FOR_JDK_17="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc*:$DOR # https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile # https://jemalloc.net/jemalloc.3.html -JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:false,lg_prof_interval:-1" +JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:5000,dirty_decay_ms:5000,oversize_threshold:0,prof:true,prof_active:false,lg_prof_interval:-1" JEMALLOC_PROF_PRFIX="jemalloc_heap_profile_" # INFO, WARNING, ERROR, FATAL @@ -63,3 +63,4 @@ enable_missing_rows_correctness_check=true enable_jvm_monitor = true pipeline_task_leakage_detect_period_sec=1 crash_in_memory_tracker_inaccurate = true +enable_table_size_correctness_check=true diff --git a/regression-test/suites/auth_call/data/multi_table_csv.csv b/regression-test/suites/auth_call/data/multi_table_csv.csv new file mode 100644 index 00000000000000..bf47fc91d70a42 --- /dev/null +++ b/regression-test/suites/auth_call/data/multi_table_csv.csv @@ -0,0 +1,2 @@ +test_dml_multi_routine_load_auth_tb1|49|2023-08-08|FALSE|\N|16275|-2144851675|-2303421957908954634|-46526938720058765|-13141.142578|-686632233.230200|229942298.0|-152553823.0|2022-09-01 00:16:01|2023-03-25|2022-09-07 14:59:03|s||yvuILR2iNxfe8RRml|{"student": true, "name": "Alice", "grade": 9, "subjects": ["math", "science", "history"]}|true|1|2|3|4|5|6.0|7.0|888888888|999999999|2023-08-24|2023-08-24 12:00:00|2023-08-24|2023-08-24 12:00:00|我能吞下玻璃而不伤身体|我能吞下玻璃而不伤身体|我能吞下玻璃而不伤身体|{} +test_dml_multi_routine_load_auth_tb2|49|2023-08-08|FALSE|\N|16275|-2144851675|-2303421957908954634|-46526938720058765|-13141.142578|-686632233.230200|229942298.0|-152553823.0|2022-09-01 00:16:01|2023-03-25|2022-09-07 14:59:03|s||yvuILR2iNxfe8RRml|{"student": true, "name": "Alice", "grade": 9, "subjects": ["math", "science", "history"]}|true|1|2|3|4|5|6.0|7.0|888888888|999999999|2023-08-24|2023-08-24 12:00:00|2023-08-24|2023-08-24 12:00:00|我能吞下玻璃而不伤身体|我能吞下玻璃而不伤身体|我能吞下玻璃而不伤身体|{} diff --git a/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb1.sql b/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb1.sql new file mode 100644 index 00000000000000..91721ea0ccc525 --- /dev/null +++ b/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb1.sql @@ -0,0 +1,46 @@ +CREATE TABLE test_dml_multi_routine_load_auth_tb1 +( + k00 INT NOT NULL, + k01 DATE NOT NULL, + k02 BOOLEAN NULL, + k03 TINYINT NULL, + k04 SMALLINT NULL, + k05 INT NULL, + k06 BIGINT NULL, + k07 LARGEINT NULL, + k08 FLOAT NULL, + k09 DOUBLE NULL, + k10 DECIMAL(9,1) NULL, + k11 DECIMALV3(9,1) NULL, + k12 DATETIME NULL, + k13 DATEV2 NULL, + k14 DATETIMEV2 NULL, + k15 CHAR NULL, + k16 VARCHAR NULL, + k17 STRING NULL, + k18 JSON NULL, + + INDEX idx_inverted_k104 (`k05`) USING INVERTED, + INDEX idx_inverted_k110 (`k11`) USING INVERTED, + INDEX idx_inverted_k113 (`k13`) USING INVERTED, + INDEX idx_inverted_k114 (`k14`) USING INVERTED, + INDEX idx_inverted_k117 (`k17`) USING INVERTED PROPERTIES("parser" = "english"), + INDEX idx_ngrambf_k115 (`k15`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k116 (`k16`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k117 (`k17`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + + INDEX idx_bitmap_k104 (`k02`) USING BITMAP + +) +DUPLICATE KEY(k00) +PARTITION BY RANGE(k01) +( + PARTITION p1 VALUES [('2023-08-01'), ('2023-08-11')), + PARTITION p2 VALUES [('2023-08-11'), ('2023-08-21')), + PARTITION p3 VALUES [('2023-08-21'), ('2023-09-01')) +) +DISTRIBUTED BY HASH(k00) BUCKETS 32 +PROPERTIES ( + "bloom_filter_columns"="k05", + "replication_num" = "1" +); diff --git a/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb2.sql b/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb2.sql new file mode 100644 index 00000000000000..e6230c2b4a29bf --- /dev/null +++ b/regression-test/suites/auth_call/ddl/test_dml_multi_routine_load_auth_tb2.sql @@ -0,0 +1,46 @@ +CREATE TABLE test_dml_multi_routine_load_auth_tb2 +( + k00 INT NOT NULL, + k01 DATE NOT NULL, + k02 BOOLEAN NULL, + k03 TINYINT NULL, + k04 SMALLINT NULL, + k05 INT NULL, + k06 BIGINT NULL, + k07 LARGEINT NULL, + k08 FLOAT NULL, + k09 DOUBLE NULL, + k10 DECIMAL(9,1) NULL, + k11 DECIMALV3(9,1) NULL, + k12 DATETIME NULL, + k13 DATEV2 NULL, + k14 DATETIMEV2 NULL, + k15 CHAR NULL, + k16 VARCHAR NULL, + k17 STRING NULL, + k18 JSON NULL, + + INDEX idx_inverted_k104 (`k05`) USING INVERTED, + INDEX idx_inverted_k110 (`k11`) USING INVERTED, + INDEX idx_inverted_k113 (`k13`) USING INVERTED, + INDEX idx_inverted_k114 (`k14`) USING INVERTED, + INDEX idx_inverted_k117 (`k17`) USING INVERTED PROPERTIES("parser" = "english"), + INDEX idx_ngrambf_k115 (`k15`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k116 (`k16`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + INDEX idx_ngrambf_k117 (`k17`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256"), + + INDEX idx_bitmap_k104 (`k02`) USING BITMAP + +) +DUPLICATE KEY(k00) +PARTITION BY RANGE(k01) +( + PARTITION p1 VALUES [('2023-08-01'), ('2023-08-11')), + PARTITION p2 VALUES [('2023-08-11'), ('2023-08-21')), + PARTITION p3 VALUES [('2023-08-21'), ('2023-09-01')) +) +DISTRIBUTED BY HASH(k00) BUCKETS 32 +PROPERTIES ( + "bloom_filter_columns"="k05", + "replication_num" = "1" +); diff --git a/regression-test/suites/auth_call/test_account_management_grant_auth.groovy b/regression-test/suites/auth_call/test_account_management_grant_auth.groovy new file mode 100644 index 00000000000000..28cb71cdeacadb --- /dev/null +++ b/regression-test/suites/auth_call/test_account_management_grant_auth.groovy @@ -0,0 +1,100 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_account_management_grant_auth","p0,auth_call") { + + String user = 'test_account_management_grant_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_account_management_grant_auth_db' + String user_derive = 'test_account_management_grant_user_derive_role' + String role_derive = 'test_account_management_grant_role_derive_role' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql("DROP USER ${user_derive}") + try_sql """drop database if exists ${dbName}""" + + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + exception "denied" + } + test { + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + exception "denied" + } + test { + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + exception "denied" + } + test { + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + exception "denied" + } + test { + sql """DROP user ${user_derive}""" + exception "denied" + } + test { + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + exception "denied" + } + } + sql """grant grant_priv on *.*.* to '${user}'""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + test { + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + exception "denied" + } + sql """DROP user ${user_derive}""" + test { + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + exception "denied" + } + } + sql """revoke grant_priv on *.*.* from '${user}'""" + sql """grant admin_priv on *.*.* to '${user}'""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + sql """DROP user ${user_derive}""" + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") + try_sql("DROP role ${user_derive}") +} diff --git a/regression-test/suites/auth_call/test_account_management_role_auth.groovy b/regression-test/suites/auth_call/test_account_management_role_auth.groovy new file mode 100644 index 00000000000000..2a90eeedf67bc6 --- /dev/null +++ b/regression-test/suites/auth_call/test_account_management_role_auth.groovy @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_account_management_role_auth","p0,auth_call") { + + String user = 'test_account_management_role_auth_user' + String role = 'test_account_management_role_auth_role' + String pwd = 'C123_567p' + String dbName = 'test_account_management_role_auth_db' + + String role_derive = 'test_account_management_role_auth_derive_role' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql("DROP role ${role}") + try_sql("DROP role ${role_derive}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE ROLE ${role}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """GRANT '${role}' TO ${user};""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE ROLE ${role_derive}""" + exception "denied" + } + test { + sql """ALTER ROLE ${role_derive} COMMENT "this is my first role";""" + exception "denied" + } + test { + sql """DROP ROLE ${role_derive}""" + exception "denied" + } + } + sql """grant grant_priv on *.*.* to ROLE '${role}'""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE ROLE ${role_derive}""" + sql """ALTER ROLE ${role_derive} COMMENT "this is my first role";""" + sql """DROP ROLE ${role_derive}""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") + try_sql("DROP role ${role}") + try_sql("DROP role ${role_derive}") +} diff --git a/regression-test/suites/auth_call/test_account_management_user_auth.groovy b/regression-test/suites/auth_call/test_account_management_user_auth.groovy new file mode 100644 index 00000000000000..e6b0c203dd367e --- /dev/null +++ b/regression-test/suites/auth_call/test_account_management_user_auth.groovy @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_account_management_user_auth","p0,auth_call") { + + String user = 'test_account_management_user_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_account_management_user_auth_db' + String user_derive = 'test_account_management_user_derive_role' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql("DROP USER ${user_derive}") + try_sql """drop database if exists ${dbName}""" + + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + exception "denied" + } + test { + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + exception "denied" + } + test { + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + exception "denied" + } + test { + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + exception "denied" + } + test { + sql """DROP user ${user_derive}""" + exception "denied" + } + test { + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + exception "denied" + } + } + sql """grant grant_priv on *.*.* to '${user}'""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + test { + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + exception "denied" + } + sql """DROP user ${user_derive}""" + test { + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + exception "denied" + } + } + sql """revoke grant_priv on *.*.* from '${user}'""" + sql """grant admin_priv on *.*.* to '${user}'""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE USER ${user_derive} IDENTIFIED BY '${pwd}';""" + sql """ALTER USER ${user_derive} IDENTIFIED BY "${pwd}";""" + sql """SET PASSWORD FOR '${user_derive}' = PASSWORD('${pwd}')""" + sql """SET PROPERTY FOR '${user_derive}' 'max_user_connections' = '1000';""" + sql """DROP user ${user_derive}""" + sql """SET LDAP_ADMIN_PASSWORD = PASSWORD('${pwd}')""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") + try_sql("DROP role ${user_derive}") +} diff --git a/regression-test/suites/auth_call/test_assistant_command_auth.groovy b/regression-test/suites/auth_call/test_assistant_command_auth.groovy new file mode 100644 index 00000000000000..339bd86a6c1645 --- /dev/null +++ b/regression-test/suites/auth_call/test_assistant_command_auth.groovy @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_assistant_command_auth","p0,auth_call") { + + String user = 'test_assistant_command_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_assistant_command_auth_db' + String tableName = 'test_assistant_command_auth_tb' + String catalogName = 'test_assistant_command_auth_catalog' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + PARTITION BY RANGE(id) () + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """alter table ${dbName}.${tableName} add partition p1 VALUES [("1"), ("2"));""" + def insert_res = sql """insert into ${dbName}.${tableName} values (1, "111");""" + logger.info("insert_res: " + insert_res) + + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """use ${dbName}""" + exception "denied" + } + + test { + sql """DESC ${dbName}.${tableName} ALL;""" + exception "denied" + } + + sql """switch internal;""" + test { + sql """REFRESH CATALOG ${catalogName};""" + exception "denied" + } + + sql """SYNC;""" + } + + sql """grant select_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """DESC ${dbName}.${tableName} ALL;""" + } + + sql """grant select_PRIV on ${catalogName}.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """REFRESH CATALOG ${catalogName};""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_cluster_management_auth.groovy b/regression-test/suites/auth_call/test_cluster_management_auth.groovy new file mode 100644 index 00000000000000..396bcdc0f9a544 --- /dev/null +++ b/regression-test/suites/auth_call/test_cluster_management_auth.groovy @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite ("test_cluster_management_auth","nonConcurrent,p0,auth_call") { + + def follower_ip = "" + def follower_host = "" + def observer_ip = "" + def observer_host = "" + def backend_ip = "" + def backend_host = "" + def backend_id = "" + + def is_exists_follower = { + def res = sql """show frontends;""" + for (int i = 0; i < res.size(); i++) { + if (res[i][7] == "FOLLOWER" && res[i][8] == "false" && res[i][11] == "true") { + follower_ip = res[i][1] + follower_host = res[i][2] + return true + } + } + return false; + } + def is_exists_observer = { + def res = sql """show frontends;""" + for (int i = 0; i < res.size(); i++) { + if (res[i][7] == "OBSERVER" && res[i][8] == "false" && res[i][11] == "true") { + observer_ip = res[i][1] + observer_host = res[i][2] + return true; + } + } + return false; + } + def is_exists_backends = { + def res = sql """show backends;""" + assertTrue(res.size() > 0) + backend_ip = res[0][1] + backend_host = res[0][2] + backend_id = res[0][0] + return true + } + + String user = 'test_cluster_management_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + // pipeline can't support delete node, it can affect other case + if (is_exists_follower()) { + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show frontends""" + exception "denied" + } + test { + sql """ALTER SYSTEM add FOLLOWER '${follower_ip}:${follower_host}'""" + exception "denied" + } + test { + sql """ALTER SYSTEM DROP FOLLOWER '${follower_ip}:${follower_host}'""" + exception "denied" + } + } + } + + if (is_exists_observer()) { + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show frontends""" + exception "denied" + } + test { + sql """ALTER SYSTEM add OBSERVER '${observer_ip}:${observer_host}'""" + exception "denied" + } + test { + sql """ALTER SYSTEM DROP OBSERVER '${observer_ip}:${observer_host}'""" + exception "denied" + } + } + } + + if (is_exists_backends()) { + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show backends""" + exception "denied" + } + test { + sql """ALTER SYSTEM add backend '${backend_ip}:${backend_host}'""" + exception "denied" + } + test { + sql """ALTER SYSTEM MODIFY BACKEND "${backend_id}" SET ("tag.location" = "default");""" + exception "denied" + } + test { + sql """ALTER SYSTEM DECOMMISSION BACKEND '${backend_id}'""" + exception "denied" + } + test { + sql """ALTER SYSTEM DROP backend '${backend_ip}:${backend_host}'""" + exception "denied" + } + } + } + + try_sql("DROP USER ${user}") + +} diff --git a/regression-test/suites/auth_call/test_database_management_auth.groovy b/regression-test/suites/auth_call/test_database_management_auth.groovy new file mode 100644 index 00000000000000..33a26959619e5f --- /dev/null +++ b/regression-test/suites/auth_call/test_database_management_auth.groovy @@ -0,0 +1,143 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_database_management_auth","p0,auth_call") { + + String user = 'test_database_management_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_database_management_auth_db' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW FRONTEND CONFIG""" + exception "denied" + } + test { + sql """ADMIN SET FRONTEND CONFIG ("disable_balance" = "true");""" + exception "denied" + } + test { + sql """SET global time_zone = "Asia/Shanghai";""" + exception "denied" + } + test { + sql """INSTALL PLUGIN FROM "";""" + exception "denied" + } + test { + sql """UNINSTALL PLUGIN demo;""" + exception "denied" + } + test { + sql """ADMIN SET REPLICA STATUS PROPERTIES("tablet_id" = "000", "backend_id" = "000", "status" = "ok");""" + exception "denied" + } + test { + sql """ADMIN SET REPLICA VERSION PROPERTIES("tablet_id" = "0", "backend_id" = "0", "version" = "0");""" + exception "denied" + } + test { + sql """ADMIN SET TABLE tb PARTITION VERSION PROPERTIES("partition_id" = "0", "visible_version" = "0");""" + exception "denied" + } + test { + sql """admin set table tbl status properties("state" = "NORMAL");""" + exception "denied" + } + test { + sql """SHOW REPLICA DISTRIBUTION FROM tbl;""" + exception "denied" + } + test { + sql """SHOW REPLICA STATUS FROM db1.tbl1;""" + exception "denied" + } + test { + sql """ADMIN REPAIR TABLE tbl;""" + exception "denied" + } + test { + sql """ADMIN CANCEL REPAIR TABLE tbl PARTITION(p1);""" + exception "denied" + } + test { + sql """ADMIN CHECK TABLET (10000, 10001) PROPERTIES("type" = "consistency");""" + exception "denied" + } + test { + sql """SHOW TABLET DIAGNOSIS 0;""" + exception "denied" + } + test { + sql """ADMIN COPY TABLET 10010 PROPERTIES("backend_id" = "10001");""" + exception "denied" + } + test { + sql """show tablet storage format verbose;""" + exception "denied" + } + test { + sql """ADMIN CLEAN TRASH;""" + exception "denied" + } + test { + sql """RECOVER DATABASE db_name;""" + exception "denied" + } + test { + sql """ADMIN REBALANCE DISK;""" + exception "denied" + } + test { + sql """ADMIN CANCEL REBALANCE DISK;""" + exception "denied" + } + test { + sql """UNSET GLOBAL VARIABLE ALL;""" + exception "denied" + } + test { + sql """clean all query stats;""" + exception "denied" + } + test { + sql """REFRESH LDAP ALL;""" + exception "denied" + } + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_backup_auth.groovy b/regression-test/suites/auth_call/test_ddl_backup_auth.groovy new file mode 100644 index 00000000000000..3e2d8d005d32ce --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_backup_auth.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import java.util.UUID + +suite("test_ddl_backup_auth","p0,auth_call") { + UUID uuid = UUID.randomUUID() + String randomValue = uuid.toString() + int hashCode = randomValue.hashCode() + hashCode = hashCode > 0 ? hashCode : hashCode * (-1) + + String user = 'test_ddl_backup_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_backup_auth_db' + String tableName = 'test_ddl_backup_auth_tb' + String repositoryName = 'test_ddl_backup_auth_rps' + String backupLabelName = 'test_ddl_backup_auth_backup_label' + hashCode.toString() + + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName") + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + sql """CREATE REPOSITORY `${repositoryName}` + WITH S3 + ON LOCATION "s3://${bucket}/${repositoryName}" + PROPERTIES + ( + "s3.endpoint" = "http://${endpoint}", + "s3.region" = "${region}", + "s3.access_key" = "${ak}", + "s3.secret_key" = "${sk}" + )""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """BACKUP SNAPSHOT ${dbName}.${backupLabelName} + TO ${repositoryName} + ON (${tableName}) + PROPERTIES ("type" = "full");""" + exception "denied" + } + test { + sql """CANCEL BACKUP FROM ${dbName};""" + exception "denied" + } + + test { + sql """SHOW BACKUP FROM ${dbName};""" + exception "denied" + } + + test { + sql """SHOW SNAPSHOT ON ${repositoryName};""" + exception "denied" + } + } + sql """grant LOAD_PRIV on ${dbName}.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """BACKUP SNAPSHOT ${dbName}.${backupLabelName} + TO ${repositoryName} + ON (${tableName}) + PROPERTIES ("type" = "full");""" + def res = sql """SHOW BACKUP FROM ${dbName};""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + + sql """CANCEL BACKUP FROM ${dbName};""" + res = sql """SHOW BACKUP FROM ${dbName};""" + logger.info("res: " + res) + assertTrue(res[0][3] == "CANCELLED") + + test { + sql """SHOW SNAPSHOT ON ${repositoryName};""" + exception "denied" + } + } + + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy new file mode 100644 index 00000000000000..87c256c770e3b3 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_catalog_auth","p0,auth_call") { + String user = 'test_ddl_catalog_auth_user' + String pwd = 'C123_567p' + String catalogName = 'test_ddl_catalog_auth_catalog' + String catalogNameNew = 'test_ddl_catalog_auth_catalog_new' + String catalogNameOther = 'test_ddl_catalog_auth_catalog_other' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + sql """create catalog if not exists ${catalogNameOther} properties ( + 'type'='hms' + );""" + + try_sql("DROP USER ${user}") + try_sql """drop catalog if exists ${catalogName}""" + try_sql """drop catalog if exists ${catalogNameNew}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + exception "denied" + } + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 1) + } + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + sql """grant Create_priv on ${catalogName}.*.* to ${user}""" + sql """drop catalog ${catalogName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + sql """show create catalog ${catalogName}""" + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 2) + } + + // ddl alter + // user alter + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ALTER CATALOG ${catalogName} RENAME ${catalogNameNew};""" + exception "denied" + } + } + sql """grant ALTER_PRIV on ${catalogName}.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ALTER CATALOG ${catalogName} RENAME ${catalogNameNew};""" + test { + sql """show create catalog ${catalogNameNew}""" + exception "denied" + } + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 1) + } + // root alter + sql """ALTER CATALOG ${catalogNameNew} RENAME ${catalogName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show create catalog ${catalogName}""" + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 2) + } + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """drop CATALOG ${catalogName};""" + exception "denied" + } + } + sql """grant DROP_PRIV on ${catalogName}.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """drop CATALOG ${catalogName};""" + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 1) + } + + sql """drop catalog if exists ${catalogName}""" + sql """drop catalog if exists ${catalogNameOther}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_colocate_group_auth.groovy b/regression-test/suites/auth_call/test_ddl_colocate_group_auth.groovy new file mode 100644 index 00000000000000..3c243facdb4fbb --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_colocate_group_auth.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_colocate_group_auth","p0,auth_call") { + String user = 'test_ddl_colocate_group_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_colocate_group_auth_db' + String tableName = 'test_ddl_colocate_group_auth_tb' + String colocateGroupName = 'test_ddl_colocate_group_auth_cg' + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1", + "colocate_with" = "${colocateGroupName}" + );""" + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ALTER COLOCATE GROUP ${dbName}.${colocateGroupName} + SET ( + "replication_num"="1" + );""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ALTER COLOCATE GROUP ${dbName}.${colocateGroupName} + SET ( + "replication_num"="1" + );""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_database_auth.groovy b/regression-test/suites/auth_call/test_ddl_database_auth.groovy new file mode 100644 index 00000000000000..80e6b1b6e76695 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_database_auth.groovy @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_database_auth","p0,auth_call") { + String user = 'test_ddl_database_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_database_auth_db' + String dbNameNew = 'test_ddl_database_auth_db_new' + String tableName = 'test_ddl_database_auth_tb' + String tableNameNew = 'test_ddl_database_auth_tb_new' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql """drop database if exists ${dbNameNew}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create database ${dbName};""" + exception "denied" + } + def db_res = sql """show databases;""" + assertTrue(db_res.size() == 3 || db_res.size() == 1) + } + sql """create database ${dbName};""" + sql """grant Create_priv on ${dbName}.* to ${user}""" + sql """drop database ${dbName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create database ${dbName};""" + sql """show create database ${dbName}""" + def db_res = sql """show databases;""" + assertTrue(db_res.size() == 4 || db_res.size() == 2) + } + + // ddl alter + // user alter + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ALTER database ${dbName} RENAME ${dbNameNew};""" + exception "denied" + } + } + sql """grant ALTER_PRIV on ${dbName}.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ALTER database ${dbName} RENAME ${dbNameNew};""" + test { + sql """show create database ${dbNameNew}""" + exception "denied" + } + def db_res = sql """show databases;""" + assertTrue(db_res.size() == 3 || db_res.size() == 1) + } + // root alter + sql """ALTER database ${dbNameNew} RENAME ${dbName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show create database ${dbName}""" + def db_res = sql """show databases;""" + assertTrue(db_res.size() == 4 || db_res.size() == 2) + } + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """drop database ${dbName};""" + exception "denied" + } + } + sql """grant DROP_PRIV on ${dbName}.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """drop database ${dbName};""" + def ctl_res = sql """show databases;""" + assertTrue(ctl_res.size() == 3 || ctl_res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_encryptkey_auth.groovy b/regression-test/suites/auth_call/test_ddl_encryptkey_auth.groovy new file mode 100644 index 00000000000000..e24c88d55864b5 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_encryptkey_auth.groovy @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_encryptkey_auth","p0,auth_call") { + String user = 'test_ddl_encryptkey_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_encryptkey_auth_db' + String encryptkeyName = 'test_ddl_encryptkey_auth_ecyk' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE ENCRYPTKEY ${encryptkeyName} AS "ABCD123456789";""" + exception "denied" + } + test { + sql """SHOW ENCRYPTKEYS FROM ${dbName}""" + exception "denied" + } + test { + sql """DROP ENCRYPTKEY ${encryptkeyName};""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """CREATE ENCRYPTKEY ${encryptkeyName} AS "ABCD123456789";""" + def res = sql """SHOW ENCRYPTKEYS FROM ${dbName}""" + assertTrue(res.size() == 1) + def cur_secret_data = sql """SELECT HEX(AES_ENCRYPT("Doris is Great", KEY ${encryptkeyName}));""" + def cur_decrypt_data = sql """SELECT AES_DECRYPT(UNHEX('${cur_secret_data[0][0]}'), KEY ${encryptkeyName});""" + logger.info("cur_decrypt_data: " + cur_decrypt_data) + assertTrue(cur_decrypt_data[0][0] == "Doris is Great") + sql """DROP ENCRYPTKEY ${encryptkeyName};""" + res = sql """SHOW ENCRYPTKEYS FROM ${dbName}""" + assertTrue(res.size() == 0) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_file_auth.groovy b/regression-test/suites/auth_call/test_ddl_file_auth.groovy new file mode 100644 index 00000000000000..77ca5e6703f977 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_file_auth.groovy @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_file_auth","p0,auth_call") { + String user = 'test_ddl_file_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_file_auth_db' + String fileName = 'test_ddl_file_auth_file' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + String s3_endpoint = getS3Endpoint() + String bucket = context.config.otherConfigs.get("s3BucketName"); + def dataFilePath = "https://"+"${bucket}"+"."+"${s3_endpoint}"+"/regression/auth_test.key" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE FILE "${fileName}" IN ${dbName} + PROPERTIES + ( + "url" = "${dataFilePath}", + "catalog" = "internal" + );""" + exception "denied" + } + test { + sql """SHOW FILE FROM ${dbName};""" + exception "denied" + } + test { + sql """DROP FILE "${fileName}" from ${dbName} properties("catalog" = "internal");""" + exception "denied" + } + } + sql """grant select_priv on ${dbName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW FILE FROM ${dbName};""" + } + sql """revoke select_priv on ${dbName} from ${user}""" + + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE FILE "${fileName}" IN ${dbName} + PROPERTIES + ( + "url" = "${dataFilePath}", + "catalog" = "internal" + );""" + sql """use ${dbName}""" + def res = sql """SHOW FILE FROM ${dbName};""" + assertTrue(res.size() == 1) + + sql """DROP FILE "${fileName}" from ${dbName} properties("catalog" = "internal");""" + res = sql """SHOW FILE FROM ${dbName};""" + assertTrue(res.size() == 0) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_function_auth.groovy b/regression-test/suites/auth_call/test_ddl_function_auth.groovy new file mode 100644 index 00000000000000..f8fd51cf1cf641 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_function_auth.groovy @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_function_auth","p0,auth_call") { + String user = 'test_ddl_function_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_function_auth_db' + String functionName = 'test_ddl_function_auth_fct' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP FUNCTION ${dbName}.${functionName}(INT)""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """grant select_priv on ${dbName}.* to ${user}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE ALIAS FUNCTION ${dbName}.${functionName}(INT) WITH PARAMETER(id) AS CONCAT(LEFT(id, 3), '****', RIGHT(id, 4));""" + exception "denied" + } + + sql """use ${dbName}""" + def res = sql """show functions""" + assertTrue(res.size() == 0) + + test { + sql """DROP FUNCTION ${dbName}.${functionName}(INT)""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE ALIAS FUNCTION ${dbName}.${functionName}(INT) WITH PARAMETER(id) AS CONCAT(LEFT(id, 3), '****', RIGHT(id, 4));""" + sql """use ${dbName}""" + def res = sql """show functions""" + assertTrue(res.size() == 1) + + sql """select ${functionName}(1)""" + sql """DROP FUNCTION ${dbName}.${functionName}(INT)""" + res = sql """show functions""" + assertTrue(res.size() == 0) + } + sql """revoke admin_priv on *.*.* from ${user}""" + + // show + sql """CREATE ALIAS FUNCTION ${dbName}.${functionName}(INT) WITH PARAMETER(id) AS CONCAT(LEFT(id, 3), '****', RIGHT(id, 4));""" + sql """use ${dbName}""" + def func_res = sql """show functions""" + assertTrue(func_res.size() == 1) + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + def res = sql """SHOW CREATE FUNCTION ${dbName}.${functionName}(INT)""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + } + + try_sql("""DROP FUNCTION ${dbName}.${functionName}(INT)""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_index_auth.groovy b/regression-test/suites/auth_call/test_ddl_index_auth.groovy new file mode 100644 index 00000000000000..baa8d79f8c79c8 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_index_auth.groovy @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_index_auth","p0,auth_call") { + String user = 'test_ddl_index_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_index_auth_db' + String tableName = 'test_ddl_index_auth_tb' + String indexName = 'test_ddl_index_auth_index' + + def waitingColumnTaskFinished = { def cur_db_name, def cur_table_name -> + Thread.sleep(2000) + String showTasks = "SHOW ALTER TABLE COLUMN from ${cur_db_name} where TableName='${cur_table_name}' ORDER BY CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(9) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000) + } while (timeoutTimestamp > System.currentTimeMillis() && (status != 'FINISHED')) + if (status != "FINISHED") { + logger.info("status is not success") + return false + } + Assert.assertEquals("FINISHED", status) + return true + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + logger.info("cluster:" + clusters) + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + sql """create database ${dbName}""" + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} ( + id BIGINT, + username1 VARCHAR(30), + username2 VARCHAR(30), + username3 VARCHAR(30) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + ); + """ + + def sc_index_res_tmp = sql """SHOW ALTER TABLE COLUMN from ${dbName} where TableName='${tableName}' ORDER BY CreateTime ASC;""" + assertTrue(sc_index_res_tmp.size() == 0) + def index_res_tmp = sql """SHOW INDEX FROM ${dbName}.${tableName};""" + assertTrue(index_res_tmp.size() == 0) + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE INDEX IF NOT EXISTS ${indexName} ON ${dbName}.${tableName} (username3) USING INVERTED COMMENT 'balabala';""" + exception "denied" + } + + test { + sql """DROP INDEX IF EXISTS ${indexName} ON ${dbName}.${tableName};""" + exception "denied" + } + + test { + sql """show index FROM ${dbName}.${tableName};""" + exception "denied" + } + } + sql """grant ALTER_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def ss = sql """show grants""" + logger.info("ss:" + ss) + sql """use ${dbName}""" + sql """CREATE INDEX IF NOT EXISTS ${indexName} ON ${dbName}.${tableName} (username3) USING INVERTED COMMENT 'balabala';""" + sql """show create table ${tableName}""" + + def sc_index_res = sql """SHOW ALTER TABLE COLUMN from ${dbName} where TableName='${tableName}' ORDER BY CreateTime ASC;""" + assertTrue(sc_index_res.size() == 1) + waitingColumnTaskFinished(dbName, tableName) + def index_res = sql """SHOW INDEX FROM ${dbName}.${tableName};""" + assertTrue(index_res.size() == 1) + + sql """DROP INDEX IF EXISTS ${indexName} ON ${dbName}.${tableName};""" + sc_index_res = sql """SHOW ALTER TABLE COLUMN from ${dbName} where TableName='${tableName}' ORDER BY CreateTime ASC;""" + assertTrue(sc_index_res.size() == 2) + waitingColumnTaskFinished(dbName, tableName) + index_res = sql """SHOW INDEX FROM ${dbName}.${tableName};""" + assertTrue(index_res.size() == 0) + + def show_index_res = sql """show index FROM ${dbName}.${tableName};""" + logger.info("show_index_res: " + show_index_res) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_job_auth.groovy b/regression-test/suites/auth_call/test_ddl_job_auth.groovy new file mode 100644 index 00000000000000..376cf8efbe90a6 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_job_auth.groovy @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_job_auth","p0,auth_call") { + String user = 'test_ddl_job_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_job_auth_db' + String tableName = 'test_ddl_job_auth_tb' + String tableNameDst = 'test_ddl_job_auth_tb_dst' + String jobName = 'test_ddl_job_auth_job' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP JOB where jobName='${jobName}';""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """create table ${dbName}.${tableNameDst} like ${dbName}.${tableName}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE JOB ${jobName} ON SCHEDULE AT '2020-01-01 00:00:00' DO INSERT INTO ${dbName}.${tableNameDst} SELECT * FROM ${dbName}.${tableName};""" + exception "denied" + } + test { + sql """PAUSE JOB where jobname='${jobName}';""" + exception "denied" + } + test { + sql """RESUME JOB where jobName= '${jobName}';""" + exception "denied" + } + + test { + sql """DROP JOB where jobName='${jobName}';""" + exception "denied" + } + + test { + sql """select * from jobs("type"="insert") where Name="${jobName}";""" + exception "ADMIN priv" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE JOB ${jobName} ON SCHEDULE AT '2100-01-01 00:00:00' DO INSERT INTO ${dbName}.${tableNameDst} SELECT * FROM ${dbName}.${tableName};""" + def res = sql """select * from jobs("type"="insert") where Name="${jobName}";""" + assertTrue(res.size() == 1) + + sql """PAUSE JOB where jobname='${jobName}';""" + res = sql """select * from jobs("type"="insert") where Name="${jobName}";""" + assertTrue(res[0][5] == "PAUSED") + + sql """RESUME JOB where jobName= '${jobName}';""" + res = sql """select * from jobs("type"="insert") where Name="${jobName}";""" + assertTrue(res[0][5] == "RUNNING") + + sql """DROP JOB where jobName='${jobName}';""" + res = sql """select * from jobs("type"="insert") where Name="${jobName}";""" + assertTrue(res.size() == 0) + } + + try_sql("""DROP JOB where jobName='${jobName}';""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_mask_view_auth.groovy b/regression-test/suites/auth_call/test_ddl_mask_view_auth.groovy new file mode 100644 index 00000000000000..319eec2963ee4d --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_mask_view_auth.groovy @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_mask_view_auth","p0,auth_call") { + String user = 'test_ddl_mask_view_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_mask_view_auth_db' + String tableName = 'test_ddl_mask_view_auth_tb' + String viewName = 'test_ddl_mask_view_auth_view' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + INSERT INTO ${dbName}.${tableName} (id, username) + VALUES (1, "111aaaAAA"), + (2, "222bbbBBB"), + (3, "333cccCCC") + """ + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT mask(id) as k1, mask(username) as v1 FROM ${dbName}.${tableName} GROUP BY k1, v1;""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """select * from ${dbName}.${viewName};""" + exception "denied" + } + } + sql """grant select_PRIV on ${dbName}.${viewName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """select * from ${dbName}.${viewName};""" + assertTrue(res[0][0] == "n") + assertTrue(res[0][1] == "nnnxxxXXX") + } + + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_mtmv_auth.groovy b/regression-test/suites/auth_call/test_ddl_mtmv_auth.groovy new file mode 100644 index 00000000000000..3aa146f58890e5 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_mtmv_auth.groovy @@ -0,0 +1,189 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_mtmv_auth","p0,auth_call") { + String user = 'test_ddl_mtmv_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_mtmv_auth_db' + String tableName = 'test_ddl_mtmv_auth_tb' + String mtmvName = 'test_ddl_mtmv_auth_mtmv' + String mtmvNameNew = 'test_ddl_mtmv_auth_mtmv_new' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE MATERIALIZED VIEW ${dbName}.${mtmvName} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 1 + PROPERTIES ('replication_num' = '1') + AS select username, sum(id) as sum_id from ${dbName}.${tableName} group by username""" + exception "denied" + } + } + sql """CREATE MATERIALIZED VIEW ${dbName}.${mtmvName} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 1 + PROPERTIES ('replication_num' = '1') + AS select username, sum(id) as sum_id from ${dbName}.${tableName} group by username""" + sql """grant Create_priv on ${dbName}.${mtmvName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE MATERIALIZED VIEW ${dbName}.${mtmvName} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 1 + PROPERTIES ('replication_num' = '1') + AS select username, sum(id) as sum_id from ${dbName}.${tableName} group by username""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + sql """drop MATERIALIZED VIEW ${dbName}.${mtmvName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE MATERIALIZED VIEW ${dbName}.${mtmvName} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + DISTRIBUTED BY RANDOM BUCKETS 1 + PROPERTIES ('replication_num' = '1') + AS select username, sum(id) as sum_id from ${dbName}.${tableName} group by username""" + sql """show create materialized view ${dbName}.${mtmvName}""" + sql """use ${dbName}""" + def tb_res = sql """show tables;""" + assertTrue(tb_res.size() == 2) + } + sql """revoke select_priv on ${dbName}.${tableName} from ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """refresh MATERIALIZED VIEW ${mtmvName} auto;""" + + // insert and refresh mtmv + def job_name = getJobName(dbName, mtmvName) + + // cancel + def mv_tasks_res = sql """select * from tasks("type"="mv") where MvName="${mtmvName}";""" + assertTrue(mv_tasks_res.size() > 0) + try { + sql """CANCEL MATERIALIZED VIEW TASK ${mv_tasks_res[0][0]} on ${mtmvName};""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().contains("no running task")) + } + + // pause + def job_status = sql """select * from jobs("type"="mv") where Name="${job_name}";""" + assertTrue(job_status[0][8] == "RUNNING") + sql """PAUSE MATERIALIZED VIEW JOB ON ${mtmvName};""" + job_status = sql """select * from jobs("type"="mv") where Name="${job_name}";""" + assertTrue(job_status[0][8] == "PAUSED") + + // resume + sql """RESUME MATERIALIZED VIEW JOB ON ${mtmvName};""" + job_status = sql """select * from jobs("type"="mv") where Name="${job_name}";""" + assertTrue(job_status[0][8] == "RUNNING") + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + + // ddl alter + // user alter + sql """revoke Create_priv on ${dbName}.${mtmvName} from ${user};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ALTER MATERIALIZED VIEW ${mtmvName} rename ${mtmvNameNew};""" + exception "denied" + } + test { + sql """show create materialized view ${dbName}.${mtmvName}""" + exception "denied" + } + } + sql """grant ALTER_PRIV on ${dbName}.${mtmvName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create materialized view ${mtmvName}""" + sql """ALTER MATERIALIZED VIEW ${mtmvName} rename ${mtmvNameNew};""" + test { + sql """show create materialized view ${mtmvNameNew}""" + exception "denied" + } + def tb_res = sql """show tables;""" + assertTrue(tb_res.size() == 1) + } + + // root alter + sql """use ${dbName}""" + sql """ALTER MATERIALIZED VIEW ${mtmvNameNew} RENAME ${mtmvName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create materialized view ${mtmvName}""" + def db_res = sql """show tables;""" + assertTrue(db_res.size() == 2) + } + + // dml select + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """select username from ${dbName}.${mtmvName}""" + exception "denied" + } + } + sql """grant select_priv(username) on ${dbName}.${mtmvName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """select username from ${dbName}.${mtmvName}""" + } + sql """revoke select_priv(username) on ${dbName}.${mtmvName} from ${user}""" + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """use ${dbName}""" + sql """drop materialized view ${mtmvName};""" + exception "denied" + } + } + sql """grant DROP_PRIV on ${dbName}.${mtmvName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """drop materialized view ${mtmvName};""" + def ctl_res = sql """show tables;""" + assertTrue(ctl_res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_mv_auth.groovy b/regression-test/suites/auth_call/test_ddl_mv_auth.groovy new file mode 100644 index 00000000000000..485343df67392c --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_mv_auth.groovy @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_mv_auth","p0,auth_call") { + String user = 'test_ddl_mv_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_mv_auth_db' + String tableName = 'test_ddl_mv_auth_tb' + String mvName = 'test_ddl_mv_auth_mv' + String rollupName = 'test_ddl_mv_auth_rollup' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create materialized view ${mvName} as select username from ${dbName}.${tableName};""" + exception "denied" + } + test { + sql """alter table ${dbName}.${tableName} add rollup ${rollupName}(username)""" + exception "denied" + } + } + sql """grant select_priv(username) on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + test { + sql """create materialized view ${mvName} as select username from ${dbName}.${tableName};""" + exception "denied" + } + test { + sql """alter table ${dbName}.${tableName} add rollup ${rollupName}(username)""" + exception "denied" + } + } + sql """grant alter_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """create materialized view ${mvName} as select username from ${dbName}.${tableName};""" + waitingMVTaskFinishedByMvName(dbName, tableName) + sql """alter table ${dbName}.${tableName} add rollup ${rollupName}(username)""" + waitingMVTaskFinishedByMvName(dbName, tableName) + + def mv_res = sql """desc ${dbName}.${tableName} all;""" + logger.info("mv_res: " + mv_res) + assertTrue(mv_res.size() == 6) + } + sql """revoke alter_priv on ${dbName}.${tableName} from ${user}""" + sql """revoke select_priv(username) on ${dbName}.${tableName} from ${user}""" + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """DROP MATERIALIZED VIEW IF EXISTS ${mvName} ON ${dbName}.${tableName};""" + exception "denied" + } + test { + sql """ALTER TABLE ${dbName}.${tableName} DROP ROLLUP ${rollupName};""" + exception "denied" + } + } + sql """grant alter_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """DROP MATERIALIZED VIEW IF EXISTS ${mvName} ON ${tableName};""" + sql """ALTER TABLE ${dbName}.${tableName} DROP ROLLUP ${rollupName};""" + def mv_res = sql """desc ${dbName}.${tableName} all;""" + logger.info("mv_res: " + mv_res) + assertTrue(mv_res.size() == 2) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_part_table_auth.groovy b/regression-test/suites/auth_call/test_ddl_part_table_auth.groovy new file mode 100644 index 00000000000000..0d1bfb8551fb11 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_part_table_auth.groovy @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_part_table_auth","p0,auth_call") { + String user = 'test_ddl_part_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_part_table_auth_db' + String tableName = 'test_ddl_part_table_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + PARTITION BY RANGE(id) () + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """alter table ${dbName}.${tableName} add partition p1 VALUES [("1"), ("2"));""" + def insert_res = sql """insert into ${dbName}.${tableName} values (1, "111");""" + logger.info("insert_res: " + insert_res) + + def partition_info = sql """show partitions from ${dbName}.${tableName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show partitions from ${dbName}.${tableName}""" + exception "denied" + } + test { + sql """show partition ${partition_info[0][0]}""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show partitions from ${dbName}.${tableName}""" + sql """show query stats""" + } + sql """revoke select_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """show partition ${partition_info[0][0]}""" + assertTrue(res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_policy_storage_auth.groovy b/regression-test/suites/auth_call/test_ddl_policy_storage_auth.groovy new file mode 100644 index 00000000000000..0b79c6d7f08d3a --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_policy_storage_auth.groovy @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_policy_storage_auth","p0,auth_call") { + String user = 'test_ddl_policy_storage_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_policy_storage_auth_db' + String storagePolicyName = 'test_ddl_policy_storage_auth_policy' + String resourceName = 'test_ddl_policy_storage_auth_rs' + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql """DROP STORAGE POLICY if exists ${storagePolicyName}""" + try_sql("""DROP RESOURCE '${resourceName}'""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """CREATE RESOURCE IF NOT EXISTS "${resourceName}" + PROPERTIES( + "type" = "s3", + "AWS_ENDPOINT" = "bj.s3.comaaaa", + "AWS_REGION" = "bj", + "AWS_ROOT_PATH" = "path/to/rootaaaa", + "AWS_ACCESS_KEY" = "bbba", + "AWS_SECRET_KEY" = "aaaa", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "test-bucket", + "s3_validity_check" = "false" + );""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use regression_test;""" + test { + sql """CREATE STORAGE POLICY ${storagePolicyName} + PROPERTIES( + "storage_resource" = "${resourceName}", + "cooldown_datetime" = "2022-06-08 00:00:00" + );""" + exception "denied" + } + test { + sql """ALTER STORAGE POLICY ${storagePolicyName} PROPERTIES("cooldown_datetime" = "2023-06-09 00:00:00");""" + exception "denied" + } + test { + sql """DROP STORAGE POLICY ${storagePolicyName}""" + exception "denied" + } + test { + sql """SHOW STORAGE POLICY;""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE STORAGE POLICY ${storagePolicyName} + PROPERTIES( + "storage_resource" = "${resourceName}", + "cooldown_datetime" = "2022-06-08 00:00:00" + );""" + def res = sql """SHOW STORAGE POLICY;""" + assertTrue(res.size() >= 1) + sql """ALTER STORAGE POLICY ${storagePolicyName} PROPERTIES("cooldown_datetime" = "2023-06-09 00:00:00");""" + sql """DROP STORAGE POLICY ${storagePolicyName}""" + sql """SHOW STORAGE POLICY;""" + } + + try_sql("""DROP RESOURCE '${resourceName}'""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_repository_auth.groovy b/regression-test/suites/auth_call/test_ddl_repository_auth.groovy new file mode 100644 index 00000000000000..8e127778f5680e --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_repository_auth.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_repository_auth","p0,auth_call") { + String user = 'test_ddl_repository_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_repository_auth_db' + String repositoryName = 'test_ddl_repository_auth_rps' + + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName"); + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE REPOSITORY `${repositoryName}` + WITH S3 + ON LOCATION "s3://${bucket}/${repositoryName}" + PROPERTIES + ( + "s3.endpoint" = "http://${endpoint}", + "s3.region" = "${region}", + "s3.access_key" = "${ak}", + "s3.secret_key" = "${sk}" + )""" + exception "denied" + } + test { + sql """SHOW CREATE REPOSITORY for ${repositoryName};""" + exception "denied" + } + + test { + sql """DROP REPOSITORY `${repositoryName}`;""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE REPOSITORY `${repositoryName}` + WITH S3 + ON LOCATION "s3://${bucket}/${repositoryName}" + PROPERTIES + ( + "s3.endpoint" = "http://${endpoint}", + "s3.region" = "${region}", + "s3.access_key" = "${ak}", + "s3.secret_key" = "${sk}" + )""" + def res = sql """SHOW CREATE REPOSITORY for ${repositoryName};""" + assertTrue(res.size() > 0) + + sql """DROP REPOSITORY `${repositoryName}`;""" + test { + sql """SHOW CREATE REPOSITORY for ${repositoryName};""" + exception "repository not exist" + } + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_resource_auth.groovy b/regression-test/suites/auth_call/test_ddl_resource_auth.groovy new file mode 100644 index 00000000000000..a9d64ed901735f --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_resource_auth.groovy @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_resource_auth","p0,auth_call") { + String user = 'test_ddl_resource_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_resource_auth_db' + String resourceName = 'test_ddl_resource_auth_rs' + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP RESOURCE '${resourceName}'""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE RESOURCE IF NOT EXISTS "${resourceName}" + PROPERTIES( + "type" = "s3", + "AWS_ENDPOINT" = "bj.s3.comaaaa", + "AWS_REGION" = "bj", + "AWS_ROOT_PATH" = "path/to/rootaaaa", + "AWS_ACCESS_KEY" = "bbba", + "AWS_SECRET_KEY" = "aaaa", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "test-bucket", + "s3_validity_check" = "false" + );""" + exception "denied" + } + test { + sql """ALTER RESOURCE '${resourceName}' PROPERTIES ("s3.connection.maximum" = "100");""" + exception "denied" + } + + def res = sql """SHOW RESOURCES WHERE NAME = '${resourceName}'""" + assertTrue(res.size() == 0) + + test { + sql """DROP RESOURCE '${resourceName}'""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE RESOURCE IF NOT EXISTS "${resourceName}" + PROPERTIES( + "type" = "s3", + "AWS_ENDPOINT" = "bj.s3.comaaaa", + "AWS_REGION" = "bj", + "AWS_ROOT_PATH" = "path/to/rootaaaa", + "AWS_ACCESS_KEY" = "bbba", + "AWS_SECRET_KEY" = "aaaa", + "AWS_MAX_CONNECTIONS" = "50", + "AWS_REQUEST_TIMEOUT_MS" = "3000", + "AWS_CONNECTION_TIMEOUT_MS" = "1000", + "AWS_BUCKET" = "test-bucket", + "s3_validity_check" = "false" + );""" + def res = sql """SHOW RESOURCES WHERE NAME = '${resourceName}'""" + assertTrue(res.size() > 0) + sql """ALTER RESOURCE '${resourceName}' PROPERTIES ("s3.connection.maximum" = "100");""" + sql """DROP RESOURCE '${resourceName}'""" + res = sql """SHOW RESOURCES WHERE NAME = '${resourceName}'""" + assertTrue(res.size() == 0) + } + + try_sql("""DROP RESOURCE '${resourceName}'""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_restore_auth.groovy b/regression-test/suites/auth_call/test_ddl_restore_auth.groovy new file mode 100644 index 00000000000000..91b7c1378fd9a0 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_restore_auth.groovy @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import java.util.UUID + +suite("test_ddl_restore_auth","p0,auth_call") { + UUID uuid = UUID.randomUUID() + String randomValue = uuid.toString() + int hashCode = randomValue.hashCode() + hashCode = hashCode > 0 ? hashCode : hashCode * (-1) + + def waitingBackupTaskFinished = { def curDbName -> + Thread.sleep(2000) + String showTasks = "SHOW BACKUP FROM ${curDbName};" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + assertTrue(result.size() == 1) + if (!result.isEmpty()) { + status = result.last().get(3) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status != 'FINISHED')) + if (status != "FINISHED") { + logger.info("status is not success") + } + assertTrue(status == "FINISHED") + return result[0][1] + } + + String user = 'test_ddl_restore_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_restore_auth_db' + String tableName = 'test_ddl_restore_auth_tb' + String repositoryName = 'test_ddl_restore_auth_rps' + String restoreLabelName = 'test_ddl_restore_auth_restore_label' + hashCode.toString() + + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName"); + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + sql """CREATE REPOSITORY `${repositoryName}` + WITH S3 + ON LOCATION "s3://${bucket}/${repositoryName}" + PROPERTIES + ( + "s3.endpoint" = "http://${endpoint}", + "s3.region" = "${region}", + "s3.access_key" = "${ak}", + "s3.secret_key" = "${sk}", + "delete_if_exists" = "true" + )""" + sql """BACKUP SNAPSHOT ${dbName}.${restoreLabelName} + TO ${repositoryName} + ON (${tableName}) + PROPERTIES ("type" = "full");""" + def real_label = waitingBackupTaskFinished(dbName) + def backup_timestamp = sql """SHOW SNAPSHOT ON ${repositoryName};""" + logger.info("backup_timestamp: " + backup_timestamp) + def real_timestamp + for (int i = 0; i < backup_timestamp.size(); i++) { + if (backup_timestamp[i][0] == real_label) { + real_timestamp = backup_timestamp[i][1] + break + } + } + + sql """truncate table ${dbName}.`${tableName}`""" + + sql """grant admin_PRIV on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def show_snapshot_res = sql """SHOW SNAPSHOT ON ${repositoryName};""" + logger.info("show_snapshot_res: " + show_snapshot_res) + } + sql """revoke admin_PRIV on *.*.* from ${user}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW SNAPSHOT ON ${repositoryName};""" + exception "denied" + } + test { + sql """RESTORE SNAPSHOT ${dbName}.`${restoreLabelName}` + FROM `${repositoryName}` + ON ( `${tableName}` ) + PROPERTIES + ( + "backup_timestamp"="${real_timestamp}", + "replication_num" = "1" + );""" + exception "denied" + } + test { + sql """CANCEL RESTORE FROM ${dbName};""" + exception "denied" + } + test { + sql """SHOW RESTORE FROM ${dbName};""" + exception "denied" + } + test { + sql """SHOW SYNC JOB FROM `${dbName}`;""" + exception "denied" + } + } + sql """grant LOAD_PRIV on ${dbName}.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """RESTORE SNAPSHOT ${dbName}.`${restoreLabelName}` + FROM `${repositoryName}` + ON ( `${tableName}` ) + PROPERTIES + ( + "backup_timestamp"="${real_timestamp}", + "replication_num" = "1" + );""" + def res = sql """SHOW RESTORE FROM ${dbName};""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + + sql """CANCEL RESTORE FROM ${dbName};""" + res = sql """SHOW RESTORE FROM ${dbName};""" + logger.info("res: " + res) + assertTrue(res[0][4] == "CANCELLED") + + sql """SHOW SYNC JOB FROM `${dbName}`;""" + } + + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_row_policy_auth.groovy b/regression-test/suites/auth_call/test_ddl_row_policy_auth.groovy new file mode 100644 index 00000000000000..106e22d20e9024 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_row_policy_auth.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_row_policy_auth","p0,auth_call") { + String user = 'test_ddl_row_policy_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_row_policy_auth_db' + String tableName = 'test_ddl_row_policy_auth_tb' + String rowPolicyName = 'test_ddl_row_policy_auth_rp' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql """DROP ROW POLICY ${rowPolicyName} on ${dbName}.${tableName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE ROW POLICY ${rowPolicyName} ON ${dbName}.${tableName} AS RESTRICTIVE TO ${user} USING (id = 1);""" + exception "denied" + } + test { + sql """SHOW ROW POLICY FOR ${user}""" + exception "denied" + } + test { + sql """DROP ROW POLICY ${rowPolicyName} on ${dbName}.${tableName} for ${user}""" + exception "denied" + } + + } + sql """grant grant_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE ROW POLICY ${rowPolicyName} ON ${dbName}.${tableName} AS RESTRICTIVE TO ${user} USING (id = 1);""" + + test { + sql """SHOW ROW POLICY FOR ${user}""" + exception "denied" + } + + sql """DROP ROW POLICY ${rowPolicyName} on ${dbName}.${tableName} for ${user}""" + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE ROW POLICY ${rowPolicyName} ON ${dbName}.${tableName} AS RESTRICTIVE TO ${user} USING (id = 1);""" + def res = sql """SHOW ROW POLICY FOR ${user}""" + assertTrue(res.size() == 1) + + sql """DROP ROW POLICY ${rowPolicyName} on ${dbName}.${tableName} for ${user}""" + res = sql """SHOW ROW POLICY FOR ${user}""" + assertTrue(res.size() == 0) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_sql_block_rule_auth.groovy b/regression-test/suites/auth_call/test_ddl_sql_block_rule_auth.groovy new file mode 100644 index 00000000000000..f5693686fada17 --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_sql_block_rule_auth.groovy @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_sql_block_rule_auth","p0,auth_call") { + String user = 'test_ddl_sbr_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_sbr_auth_db' + String tableName = 'test_ddl_sbr_auth_tb' + String sqlBlockRuleName = 'test_ddl_sbr_auth_sbr' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP SQL_BLOCK_RULE ${sqlBlockRuleName};""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE SQL_BLOCK_RULE ${sqlBlockRuleName} + PROPERTIES( + "sql"="select \\* from ${dbName}.${tableName}", + "partition_num" = "30", + "global"="false", + "enable"="true" + );""" + exception "denied" + } + test { + sql """ALTER SQL_BLOCK_RULE ${sqlBlockRuleName} PROPERTIES("partition_num" = "10")""" + exception "denied" + } + + test { + sql """SHOW SQL_BLOCK_RULE FOR ${sqlBlockRuleName};""" + exception "denied" + } + + test { + sql """DROP SQL_BLOCK_RULE ${sqlBlockRuleName};""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE SQL_BLOCK_RULE ${sqlBlockRuleName} + PROPERTIES( + "sql"="select \\\\* from ${dbName}\\\\.${tableName}", + "global"="false", + "enable"="true" + );""" + def res = sql """SHOW SQL_BLOCK_RULE FOR ${sqlBlockRuleName};""" + assertTrue(res.size() > 0) + sql """SET PROPERTY FOR '${user}' 'sql_block_rules' = '${sqlBlockRuleName}';""" + test { + sql """select * from ${dbName}.${tableName}""" + exception "sql block rule" + } + sql """ALTER SQL_BLOCK_RULE ${sqlBlockRuleName} PROPERTIES("enable"="true")""" + sql """DROP SQL_BLOCK_RULE ${sqlBlockRuleName};""" + res = sql """SHOW SQL_BLOCK_RULE FOR ${sqlBlockRuleName};""" + assertTrue(res.size() == 0) + sql """select * from ${dbName}.${tableName}""" + } + + try_sql("""DROP SQL_BLOCK_RULE ${sqlBlockRuleName};""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_table_auth.groovy b/regression-test/suites/auth_call/test_ddl_table_auth.groovy new file mode 100644 index 00000000000000..4c74699f55fcbf --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_table_auth.groovy @@ -0,0 +1,267 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_table_auth","p0,auth_call") { + String user = 'test_ddl_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_table_auth_db' + String tableName = 'test_ddl_table_auth_tb' + String tableNameNew = 'test_ddl_table_auth_tb_new' + String cteLikeDstDb = 'test_ddl_table_cte_like_dst_db' + String cteLikeDstTb = 'test_ddl_table_cte_like_dst_tb' + String cteSelectDstDb = 'test_ddl_table_cte_select_dst_db' + String cteSelectDstTb = 'test_ddl_table_cte_select_dst_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql """drop database if exists ${cteLikeDstDb}""" + try_sql """drop database if exists ${cteSelectDstDb}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + def waitingChangeTaskFinished = { def curDb -> + Thread.sleep(2000) + sql """use ${curDb}""" + String showTasks = "SHOW ALTER TABLE COLUMN order by CreateTime;" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(9) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status != 'FINISHED')) + if (status != "FINISHED") { + logger.info("status is not success") + } + Assert.assertEquals("FINISHED", status) + } + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + exception "denied" + } + def res = sql """show query stats;""" + logger.info("res:" + res) + + test { + sql """SHOW FULL COLUMNS FROM ${dbName}.${tableName};""" + exception "denied" + } + } + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """grant Create_priv on ${dbName}.${tableName} to ${user}""" + sql """drop table ${dbName}.${tableName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """use ${dbName}""" + sql """show create table ${tableName}""" + def db_res = sql """show tables;""" + assertTrue(db_res.size() == 1) + + def col_res = sql """SHOW FULL COLUMNS FROM ${dbName}.${tableName};""" + logger.info("col_res: " + col_res) + assertTrue(col_res.size() == 2) + } + + // ddl alter + // user alter + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ALTER table ${tableName} RENAME ${tableNameNew};""" + exception "denied" + } + test { + sql """ALTER TABLE ${dbName}.${tableName} ADD COLUMN new_col INT KEY DEFAULT "0";""" + exception "denied" + } + def res = sql """SHOW ALTER TABLE COLUMN;""" + assertTrue(res.size() == 0) + } + sql """grant ALTER_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """ALTER table ${tableName} RENAME ${tableNameNew};""" + + test { + sql """show create table ${tableNameNew}""" + exception "denied" + } + def tb_res = sql """show tables;""" + assertTrue(tb_res.size() == 0) + } + // root alter + sql """use ${dbName}""" + sql """ALTER table ${tableNameNew} RENAME ${tableName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create table ${tableName}""" + def db_res = sql """show tables;""" + assertTrue(db_res.size() == 1) + } + + // show + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """ALTER TABLE ${tableName} ADD COLUMN new_col INT KEY DEFAULT "0";""" + def res = sql """SHOW ALTER TABLE COLUMN;""" + assertTrue(res.size() == 1) + } + + // dml select + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """select id from ${dbName}.${tableName}""" + exception "denied" + } + } + sql """grant select_priv(id) on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """select id from ${dbName}.${tableName}""" + } + sql """revoke select_priv(id) on ${dbName}.${tableName} from ${user}""" + + // ddl create table like + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${cteLikeDstDb}.${cteLikeDstTb} like ${dbName}.${tableName};""" + exception "denied" + } + } + sql """create database ${cteLikeDstDb}""" + sql """create table ${cteLikeDstDb}.${cteLikeDstTb} like ${dbName}.${tableName};""" + sql """grant Create_priv on ${cteLikeDstDb}.${cteLikeDstTb} to ${user}""" + sql """drop table ${cteLikeDstDb}.${cteLikeDstTb};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${cteLikeDstDb}.${cteLikeDstTb} like ${dbName}.${tableName};""" + exception "denied" + } + } + sql """grant SELECT_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create table ${cteLikeDstDb}.${cteLikeDstTb} like ${dbName}.${tableName};""" + } + sql """revoke SELECT_PRIV on ${dbName}.${tableName} from ${user}""" + + // ddl create table select + sql """create database ${cteSelectDstDb}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + exception "denied" + } + } + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + sql """grant Create_priv on ${cteSelectDstDb}.${cteSelectDstTb} to ${user}""" + sql """drop table ${cteSelectDstDb}.${cteSelectDstTb}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + exception "denied" + } + } + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + sql """grant LOAD_PRIV on ${cteSelectDstDb}.${cteSelectDstTb} to ${user}""" + sql """drop table ${cteSelectDstDb}.${cteSelectDstTb}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + exception "denied" + } + } + sql """grant select_priv(username) on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create table ${cteSelectDstDb}.${cteSelectDstTb}(username) PROPERTIES("replication_num" = "1") as select username from ${dbName}.${tableName};""" + } + + waitingChangeTaskFinished(dbName) + // ddl truncate + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """use ${dbName}""" + sql """truncate table ${tableName};""" + exception "denied" + } + } + sql """grant LOAD_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """truncate table ${tableName};""" + } + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """use ${dbName}""" + sql """drop table ${tableName};""" + exception "denied" + } + } + sql """grant DROP_PRIV on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """drop table ${tableName};""" + def ctl_res = sql """show tables;""" + assertTrue(ctl_res.size() == 0) + } + + + sql """drop database if exists ${dbName}""" + sql """drop database if exists ${cteLikeDstDb}""" + sql """drop database if exists ${cteSelectDstDb}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_view_auth.groovy b/regression-test/suites/auth_call/test_ddl_view_auth.groovy new file mode 100644 index 00000000000000..bf967e54fcb02c --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_view_auth.groovy @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_view_auth","p0,auth_call") { + String user = 'test_ddl_view_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_view_auth_db' + String tableName = 'test_ddl_view_auth_tb' + String viewName = 'test_ddl_view_auth_view' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + INSERT INTO ${dbName}.${tableName} (id, username) + VALUES (1, "111"), + (2, "222"), + (3, "333") + """ + + // ddl create + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + exception "denied" + } + test { + sql """SHOW VIEW from ${tableName} from ${dbName}""" + exception 'denied' + } + } + sql """grant select_priv(id) on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + exception 'denied' + } + def res = sql """SHOW VIEW from ${tableName} from ${dbName}""" + assertTrue(res.size() == 0) + } + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + sql """grant Create_priv on ${dbName}.${viewName} to ${user}""" + sql """drop view ${dbName}.${viewName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + + def res = sql """SHOW VIEW from ${tableName} from ${dbName}""" + assertTrue(res.size() == 1) + } + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT username as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + exception 'denied' + } + } + + // ddl alter + // user alter + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """alter VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + exception 'denied' + } + } + sql """grant ALTER_PRIV on ${dbName}.${viewName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """alter VIEW ${dbName}.${viewName} (k1, v1) + AS + SELECT id as k1, SUM(id) FROM ${dbName}.${tableName} + WHERE id = 1 GROUP BY k1;""" + } + + // dml show + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """select * from ${dbName}.${viewName};""" + exception "denied" + } + } + sql """grant select_PRIV on ${dbName}.${viewName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """select * from ${dbName}.${viewName};""" + } + + // ddl drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """drop table ${dbName}.${viewName};""" + exception 'denied' + } + } + sql """grant DROP_PRIV on ${dbName}.${viewName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """drop view ${viewName};""" + def ctl_res = sql """show tables;""" + assertTrue(ctl_res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_ddl_workload_group_auth.groovy b/regression-test/suites/auth_call/test_ddl_workload_group_auth.groovy new file mode 100644 index 00000000000000..7657055d9d4fcb --- /dev/null +++ b/regression-test/suites/auth_call/test_ddl_workload_group_auth.groovy @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_ddl_workload_group_auth","p0,auth_call") { + String user = 'test_ddl_wg_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_ddl_wg_auth_db' + String workloadGroupName = 'test_ddl_wg_auth_wg' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""drop workload group if exists ${workloadGroupName};""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + // ddl create,show,drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """CREATE WORKLOAD GROUP "${workloadGroupName}" + PROPERTIES ( + "cpu_share"="10" + );""" + exception "denied" + } + test { + sql """alter workload group ${workloadGroupName} + properties ( + "cpu_share"="20" + );""" + exception "denied" + } + + def res = sql """show workload groups like '${workloadGroupName}'""" + assertTrue(res.size() == 0) + + test { + sql """drop workload group if exists ${workloadGroupName};""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """CREATE WORKLOAD GROUP "${workloadGroupName}" + PROPERTIES ( + "cpu_share"="10" + );""" + def res = sql """show workload groups like '${workloadGroupName}'""" + assertTrue(res.size() > 0) + sql """alter workload group ${workloadGroupName} + properties ( + "cpu_share"="20" + );""" + sql """drop workload group if exists ${workloadGroupName};""" + res = sql """show workload groups like '${workloadGroupName}'""" + assertTrue(res.size() == 0) + } + + try_sql("""drop workload group if exists ${workloadGroupName};""") + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_analyze_auth.groovy b/regression-test/suites/auth_call/test_dml_analyze_auth.groovy new file mode 100644 index 00000000000000..5db5da61f945f0 --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_analyze_auth.groovy @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_analyze_auth","p0,auth_call") { + + String user = 'test_dml_analyze_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_analyze_auth_db' + String tableName = 'test_dml_analyze_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + analyze table ${dbName}.${tableName} with sync; + """ + exception "denied" + } + test { + sql """show table stats ${dbName}.${tableName};""" + exception "denied" + } + test { + sql """show table status from ${dbName};""" + exception "denied" + } + test { + sql """show column stats ${dbName}.${tableName};""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + analyze table ${dbName}.${tableName} with sync; + """ + def col_stats = sql """show column stats ${dbName}.${tableName};""" + logger.info("col_stats: " + col_stats) + assertTrue(col_stats.size() == 2) + + sql """show table stats ${dbName}.${tableName};""" + } + sql """grant select_priv on ${dbName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show table status from ${dbName};""" + } + + def res = sql """show column stats ${dbName}.${tableName};""" + logger.info("res: " + res) + assertTrue(res.size() == 2) + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_broker_load_auth.groovy b/regression-test/suites/auth_call/test_dml_broker_load_auth.groovy new file mode 100644 index 00000000000000..524513a570c0dd --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_broker_load_auth.groovy @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_broker_load_auth","p0,auth_call") { + + UUID uuid = UUID.randomUUID() + String randomValue = uuid.toString() + int hashCode = randomValue.hashCode() + hashCode = hashCode > 0 ? hashCode : hashCode * (-1) + + String user = 'test_dml_broker_load_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_broker_load_auth_db' + String tableName = 'test_dml_broker_load_auth_tb' + String loadLabelName = 'test_dml_broker_load_auth_label' + hashCode.toString() + + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = getS3BucketName() + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME VARCHAR(25) NOT NULL, + C_ADDRESS VARCHAR(40) NOT NULL, + C_NATIONKEY INTEGER NOT NULL, + C_PHONE CHAR(15) NOT NULL, + C_ACCTBAL DECIMAL(15,2) NOT NULL, + C_MKTSEGMENT CHAR(10) NOT NULL, + C_COMMENT VARCHAR(117) NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """use ${dbName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + LOAD LABEL ${loadLabelName} ( + DATA INFILE("s3://${bucket}/regression/tpch/sf0.01/customer.csv.gz") + INTO TABLE ${tableName} + COLUMNS TERMINATED BY "|" + (c_custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, c_mktsegment, c_comment, temp) + ) + WITH S3 ( + "AWS_ACCESS_KEY" = "$ak", + "AWS_SECRET_KEY" = "$sk", + "AWS_ENDPOINT" = "$endpoint", + "AWS_REGION" = "$region", + "compress_type" = "GZ" + ) + properties( + "timeout" = "28800", + "exec_mem_limit" = "8589934592" + ) + """ + exception "denied" + } + + def res = sql """SHOW LOAD FROM ${dbName} WHERE LABEL LIKE '${loadLabelName}'""" + assertTrue(res.size() == 0) + + res = sql """SHOW STREAM LOAD FROM ${dbName} WHERE LABEL = "${loadLabelName}";""" + assertTrue(res.size() == 0) + + test { + sql """CLEAN LABEL ${loadLabelName} FROM ${dbName};""" + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName};""" + sql """ + LOAD LABEL ${loadLabelName} ( + DATA INFILE("s3://${bucket}/regression/tpch/sf0.01/customer.csv.gz") + INTO TABLE ${tableName} + COLUMNS TERMINATED BY "|" + (c_custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, c_mktsegment, c_comment, temp) + ) + WITH S3 ( + "AWS_ACCESS_KEY" = "$ak", + "AWS_SECRET_KEY" = "$sk", + "AWS_ENDPOINT" = "$endpoint", + "AWS_REGION" = "$region", + "compress_type" = "GZ" + ) + properties( + "timeout" = "28800", + "exec_mem_limit" = "8589934592" + ) + """ + + def res = sql """SHOW LOAD FROM ${dbName} WHERE LABEL LIKE '${loadLabelName}'""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + + try { + sql """CANCEL LOAD + FROM ${dbName} + WHERE LABEL = "${loadLabelName}";""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().contains("does not exist")) + } + + test { + sql """CLEAN LABEL ${loadLabelName} FROM ${dbName};""" + exception "denied" + } + + def warn_res = sql """SHOW LOAD WARNINGS FROM ${dbName} WHERE LABEL = '${loadLabelName}';""" + logger.info("warn_res: " + warn_res) + } + + sql """grant load_priv on ${dbName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW LOAD FROM ${dbName} WHERE LABEL LIKE '${loadLabelName}'""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + + test { + sql """SHOW TRANSACTION WHERE LABEL = "${loadLabelName}";""" + exception "denied" + } + + try { + sql """CANCEL LOAD + FROM ${dbName} + WHERE LABEL = "${loadLabelName}";""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().contains("does not exist")) + } + + res = sql """SHOW LOAD FROM ${dbName} WHERE LABEL LIKE '${loadLabelName}'""" + logger.info("res: " + res) + + sql """CLEAN LABEL ${loadLabelName} FROM ${dbName};""" + } + sql """revoke load_priv on ${dbName} from ${user}""" + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_cancel_profile_auth.groovy b/regression-test/suites/auth_call/test_dml_cancel_profile_auth.groovy new file mode 100644 index 00000000000000..82656726e659ce --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_cancel_profile_auth.groovy @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_cancel_profile_auth","p0,auth_call,nonConcurrent") { + + String user = 'test_dml_cancel_profile_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + CLEAN ALL PROFILE; + """ + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + CLEAN ALL PROFILE; + """ + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_delete_table_auth.groovy b/regression-test/suites/auth_call/test_dml_delete_table_auth.groovy new file mode 100644 index 00000000000000..405e9c830ebe03 --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_delete_table_auth.groovy @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_delete_table_auth","p0,auth_call") { + + String user = 'test_dml_delete_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_delete_table_auth_db' + String tableName = 'test_dml_delete_table_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """DELETE FROM ${dbName}.${tableName} WHERE id = 3;""" + exception "denied" + } + + def del_res = sql """show DELETE from ${dbName}""" + assertTrue(del_res.size() == 0) + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """DELETE FROM ${dbName}.${tableName} WHERE id = 3;""" + exception "denied" + } + def del_res = sql """show DELETE from ${dbName}""" + assertTrue(del_res.size() == 0) + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """DELETE FROM ${dbName}.${tableName} WHERE id = 3;""" + def del_res = sql """show DELETE from ${dbName}""" + logger.info("del_res: " + del_res) + assertTrue(del_res.size() == 1) + } + + def res = sql """select count(*) from ${dbName}.${tableName};""" + assertTrue(res[0][0] == 2) + + String tableName1 = 'test_dml_delete_table_auth_tb1' + String tableName2 = 'test_dml_delete_table_auth_tb2' + String tableName3 = 'test_dml_delete_table_auth_tb3' + sql """CREATE TABLE ${dbName}.${tableName1} + (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) + UNIQUE KEY (id) + DISTRIBUTED BY HASH (id) + PROPERTIES('replication_num'='1', "function_column.sequence_col" = "c4");""" + sql """CREATE TABLE ${dbName}.${tableName2} + (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) + DISTRIBUTED BY HASH (id) + PROPERTIES('replication_num'='1');""" + sql """CREATE TABLE ${dbName}.${tableName3} + (id INT) + DISTRIBUTED BY HASH (id) + PROPERTIES('replication_num'='1');""" + sql """INSERT INTO ${dbName}.${tableName1} VALUES + (1, 1, '1', 1.0, '2000-01-01'), + (2, 2, '2', 2.0, '2000-01-02'), + (3, 3, '3', 3.0, '2000-01-03');""" + sql """INSERT INTO ${dbName}.${tableName2} VALUES + (1, 10, '10', 10.0, '2000-01-10'), + (2, 20, '20', 20.0, '2000-01-20'), + (3, 30, '30', 30.0, '2000-01-30'), + (4, 4, '4', 4.0, '2000-01-04'), + (5, 5, '5', 5.0, '2000-01-05');""" + sql """INSERT INTO ${dbName}.${tableName3} VALUES + (1), + (4), + (5);""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """DELETE FROM ${dbName}.${tableName1} + USING ${dbName}.${tableName2} INNER JOIN ${dbName}.${tableName3} + ON ${dbName}.${tableName2}.id = ${dbName}.${tableName3}.id + WHERE ${dbName}.${tableName1}.id = ${dbName}.${tableName2}.id;""" + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName1} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """DELETE FROM ${dbName}.${tableName1} + USING ${dbName}.${tableName2} INNER JOIN ${dbName}.${tableName3} + ON ${dbName}.${tableName2}.id = ${dbName}.${tableName3}.id + WHERE ${dbName}.${tableName1}.id = ${dbName}.${tableName2}.id;""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName1} to ${user}""" + sql """grant select_priv on ${dbName}.${tableName2} to ${user}""" + sql """grant select_priv on ${dbName}.${tableName3} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """DELETE FROM ${dbName}.${tableName1} + USING ${dbName}.${tableName2} INNER JOIN ${dbName}.${tableName3} + ON ${dbName}.${tableName2}.id = ${dbName}.${tableName3}.id + WHERE ${dbName}.${tableName1}.id = ${dbName}.${tableName2}.id;""" + } + res = sql """select count(*) from ${dbName}.${tableName1};""" + assertTrue(res[0][0] == 2) + + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_export_table_auth.groovy b/regression-test/suites/auth_call/test_dml_export_table_auth.groovy new file mode 100644 index 00000000000000..8d13bc4bddb71b --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_export_table_auth.groovy @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_export_table_auth","p0,auth_call") { + + UUID uuid = UUID.randomUUID() + String randomValue = uuid.toString() + int hashCode = randomValue.hashCode() + hashCode = hashCode > 0 ? hashCode : hashCode * (-1) + + String user = 'test_dml_export_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_export_table_auth_db' + String tableName = 'test_dml_export_table_auth_tb' + String exportLabel = 'test_dml_export_table_auth_label' + hashCode.toString() + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + UNIQUE KEY (id) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName"); + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """EXPORT TABLE ${dbName}.${tableName} TO "s3://${bucket}/test_outfile/exp_${exportLabel}" + PROPERTIES( + "format" = "csv", + "max_file_size" = "2048MB" + ) + WITH s3 ( + "s3.endpoint" = "${endpoint}", + "s3.region" = "${region}", + "s3.secret_key"="${sk}", + "s3.access_key" = "${ak}" + );""" + exception "denied" + } + try { + sql """CANCEL EXPORT + FROM ${dbName} + WHERE STATE = "EXPORTING";""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().indexOf("denied") == -1) + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """EXPORT TABLE ${dbName}.${tableName} TO "s3://${bucket}/test_outfile/exp_${exportLabel}" + PROPERTIES( + "format" = "csv", + "max_file_size" = "2048MB" + ) + WITH s3 ( + "s3.endpoint" = "${endpoint}", + "s3.region" = "${region}", + "s3.secret_key"="${sk}", + "s3.access_key" = "${ak}" + );""" + sql """use ${dbName}""" + def res = sql """show export;""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + } + sql """grant select_priv on ${dbName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + def res = sql """show export;""" + logger.info("res: " + res) + assertTrue(res.size() == 1) + res = sql """show grants;""" + logger.info("res:" + res) + try { + sql """CANCEL EXPORT + FROM ${dbName} + WHERE STATE = "EXPORTING";""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().indexOf("not exist") != -1) + } + + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_insert_auth.groovy b/regression-test/suites/auth_call/test_dml_insert_auth.groovy new file mode 100644 index 00000000000000..6a04281b1c0aeb --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_insert_auth.groovy @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_insert_auth","p0,auth_call") { + + String user = 'test_dml_insert_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_insert_auth_db' + String tableName = 'test_dml_insert_auth_tb' + String srcTableName = 'test_dml_insert_auth_tb_src' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """create table ${dbName}.${srcTableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + INSERT INTO ${dbName}.${srcTableName} (id, username) + VALUES (1, "111"), + (2, "222"), + (3, "333") + """ + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def insert_res = sql """SHOW LAST INSERT""" + logger.info("insert_res: " + insert_res) + test { + sql """ + INSERT INTO ${dbName}.${tableName} (id, username) + VALUES (1, "111"), + (2, "222"), + (3, "333") + """ + exception "denied" + } + test { + sql """ + INSERT OVERWRITE table ${dbName}.${tableName} VALUES (4, "444"); + """ + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + INSERT INTO ${dbName}.${tableName} (id, username) + VALUES (1, "111"), + (2, "222"), + (3, "333") + """ + def insert_res = sql """SHOW LAST INSERT""" + logger.info("insert_res: " + insert_res) + test { + sql """select count() from ${dbName}.${tableName}""" + exception "denied" + } + } + def rows = sql """select count() from ${dbName}.${tableName}""" + assertTrue(rows[0][0] == 3) + + // insert overwrite + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + INSERT OVERWRITE table ${dbName}.${tableName} VALUES (4, "444"); + """ + } + rows = sql """select count() from ${dbName}.${tableName}""" + assertTrue(rows[0][0] == 1) + + sql """grant select_priv on ${dbName}.${srcTableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """INSERT OVERWRITE table ${dbName}.${tableName} SELECT * FROM ${dbName}.${srcTableName};""" + } + rows = sql """select count() from ${dbName}.${tableName}""" + assertTrue(rows[0][0] == 3) + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_multi_routine_load_auth.groovy b/regression-test/suites/auth_call/test_dml_multi_routine_load_auth.groovy new file mode 100644 index 00000000000000..1d9c6b8395603d --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_multi_routine_load_auth.groovy @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.ProducerConfig + +suite("test_dml_multi_routine_load_auth","p0,auth_call") { + + String user = 'test_dml_multi_routine_load_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_multi_routine_load_auth_db' + String tableName1 = 'test_dml_multi_routine_load_auth_tb1' + String tableName2 = 'test_dml_multi_routine_load_auth_tb2' + String labelName = 'test_dml_multi_routine_load_auth_label' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + String enabled = context.config.otherConfigs.get("enableKafkaTest") + String kafka_port = context.config.otherConfigs.get("kafka_port") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + def kafka_broker = "${externalEnvIp}:${kafka_port}" + if (enabled != null && enabled.equalsIgnoreCase("true")) { + // define kafka + String topic = "zfr_test_dml_multi_routine_load_auth_topic" + def props = new Properties() + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "${kafka_broker}".toString()) + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + def producer = new KafkaProducer<>(props) + def txt = new File("""${context.file.parent}/data/multi_table_csv.csv""").text + def lines = txt.readLines() + lines.each { line -> + logger.info("=====${line}========") + def record = new ProducerRecord<>(topic, null, line) + producer.send(record) + } + + sql """use ${dbName}""" + sql "drop table if exists ${tableName1}" + sql "drop table if exists ${tableName2}" + sql new File("""${context.file.parent}/ddl/${tableName1}.sql""").text + sql new File("""${context.file.parent}/ddl/${tableName2}.sql""").text + + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + test { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} + COLUMNS TERMINATED BY "|" + PROPERTIES + ( + "max_batch_interval" = "5", + "max_batch_rows" = "300000", + "max_batch_size" = "209715200" + ) + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + ); + """ + exception "denied" + } + } + + sql """grant load_priv on ${dbName}.${tableName1} to ${user}""" + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + test { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} + COLUMNS TERMINATED BY "|" + PROPERTIES + ( + "max_batch_interval" = "5", + "max_batch_rows" = "300000", + "max_batch_size" = "209715200" + ) + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + ); + """ + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName2} to ${user}""" + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + test { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} + COLUMNS TERMINATED BY "|" + PROPERTIES + ( + "max_batch_interval" = "5", + "max_batch_rows" = "300000", + "max_batch_size" = "209715200" + ) + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + ); + """ + exception "denied" + } + } + sql """grant load_priv on ${dbName}.* to ${user}""" + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} + COLUMNS TERMINATED BY "|" + PROPERTIES + ( + "max_batch_interval" = "5", + "max_batch_rows" = "300000", + "max_batch_size" = "209715200" + ) + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + );""" + } + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_mysql_load_auth.groovy b/regression-test/suites/auth_call/test_dml_mysql_load_auth.groovy new file mode 100644 index 00000000000000..6010b4b8b744d9 --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_mysql_load_auth.groovy @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_mysql_load_auth","p0,auth_call") { + + String user = 'test_dml_mysql_load_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_mysql_load_auth_db' + String tableName = 'test_dml_mysql_load_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """use ${dbName}""" + def path_file = "${context.file.parent}/../../data/auth_call/stream_load_data.csv" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + LOAD DATA LOCAL + INFILE '${path_file}' + INTO TABLE ${dbName}.${tableName} + COLUMNS TERMINATED BY ',' + (a,b) + PROPERTIES ("timeout"="100") + """ + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName};""" + sql """ + LOAD DATA LOCAL + INFILE '${path_file}' + INTO TABLE ${dbName}.${tableName} + COLUMNS TERMINATED BY ',' + (a,b) + PROPERTIES ("timeout"="100") + """ + } + + def rows = sql """select count() from ${dbName}.${tableName}""" + assertTrue(rows[0][0] == 3) + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_outfile_auth.groovy b/regression-test/suites/auth_call/test_dml_outfile_auth.groovy new file mode 100644 index 00000000000000..5318b1dbe31ccf --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_outfile_auth.groovy @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_outfile_auth","p0,auth_call") { + UUID uuid = UUID.randomUUID() + String randomValue = uuid.toString() + int hashCode = randomValue.hashCode() + hashCode = hashCode > 0 ? hashCode : hashCode * (-1) + + String user = 'test_dml_outfile_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_outfile_auth_db' + String tableName = 'test_dml_outfile_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + String ak = getS3AK() + String sk = getS3SK() + String s3_endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName") + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + SELECT * FROM ${dbName}.${tableName} t ORDER BY id + INTO OUTFILE "s3://${bucket}/outfile/auth/exp_${hashCode}" + FORMAT AS parquet + PROPERTIES ( + "s3.endpoint" = "${s3_endpoint}", + "s3.region" = "${region}", + "s3.secret_key"="${sk}", + "s3.access_key" = "${ak}" + );""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + SELECT * FROM ${dbName}.${tableName} t ORDER BY id + INTO OUTFILE "s3://${bucket}/outfile/auth/exp_" + FORMAT AS parquet + PROPERTIES ( + "s3.endpoint" = "${s3_endpoint}", + "s3.region" = "${region}", + "s3.secret_key"="${sk}", + "s3.access_key" = "${ak}" + );""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_routine_load_auth.groovy b/regression-test/suites/auth_call/test_dml_routine_load_auth.groovy new file mode 100644 index 00000000000000..8ee5236960baa7 --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_routine_load_auth.groovy @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.ProducerConfig + +suite("test_dml_routine_load_auth","p0,auth_call") { + + String user = 'test_dml_routine_load_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_routine_load_auth_db' + String tableName = 'test_dml_routine_load_auth_tb' + String labelName = 'test_dml_routine_load_auth_label' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + String enabled = context.config.otherConfigs.get("enableKafkaTest") + String kafka_port = context.config.otherConfigs.get("kafka_port") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + def kafka_broker = "${externalEnvIp}:${kafka_port}" + if (enabled != null && enabled.equalsIgnoreCase("true")) { + // define kafka + String topic = "zfr_test_dml_routine_load_auth_topic" + def props = new Properties() + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "${kafka_broker}".toString()) + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + def producer = new KafkaProducer<>(props) + def filepath = getLoalFilePath "routine_load_data.csv" + def txt = new File("${filepath}").text + def lines = txt.readLines() + lines.each { line -> + logger.info("=====${line}========") + def record = new ProducerRecord<>(topic, null, line) + producer.send(record) + } + sql "drop table if exists ${tableName}" + sql """create table ${dbName}.${tableName}(a int,c double generated always as (abs(a+b)) not null,b int, d int generated always as(c+1)) + DISTRIBUTED BY HASH(a) + PROPERTIES("replication_num" = "1"); + ;""" + + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + test { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} ON ${tableName} + COLUMNS(a,b), + COLUMNS TERMINATED BY "," + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + ); + """ + exception "denied" + } + } + + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + sql """ + CREATE ROUTINE LOAD ${dbName}.${labelName} ON ${tableName} + COLUMNS(a,b), + COLUMNS TERMINATED BY "," + FROM KAFKA + ( + "kafka_broker_list" = "${externalEnvIp}:${kafka_port}", + "kafka_topic" = "${topic}", + "property.kafka_default_offsets" = "OFFSET_BEGINNING" + ); + """ + } + sql """revoke load_priv on ${dbName}.${tableName} from ${user}""" + + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + test { + sql """ + ALTER ROUTINE LOAD FOR ${dbName}.${labelName} + PROPERTIES + ( + "desired_concurrent_number" = "1" + ); + """ + exception "denied" + } + test { + sql """PAUSE ROUTINE LOAD FOR ${dbName}.${labelName};""" + exception "denied" + } + test { + sql """RESUME ROUTINE LOAD FOR ${dbName}.${labelName};""" + exception "denied" + } + test { + sql """STOP ROUTINE LOAD FOR ${dbName}.${labelName};""" + exception "denied" + } + test { + sql """show routine load for ${dbName}.${labelName}""" + exception "no job" + } + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user = user, password = "${pwd}", url = context.config.jdbcUrl) { + sql """PAUSE ROUTINE LOAD FOR ${dbName}.${labelName};""" + sql """ + ALTER ROUTINE LOAD FOR ${dbName}.${labelName} + PROPERTIES + ( + "desired_concurrent_number" = "1" + ); + """ + sql """RESUME ROUTINE LOAD FOR ${dbName}.${labelName};""" + sql """STOP ROUTINE LOAD FOR ${dbName}.${labelName};""" + sql """use ${dbName};""" + def res = sql """show all routine load""" + assertTrue(res.size() == 1) + } + sql """revoke load_priv on ${dbName}.${tableName} from ${user}""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_select_udf_auth.groovy b/regression-test/suites/auth_call/test_dml_select_udf_auth.groovy new file mode 100644 index 00000000000000..72413b3e292ceb --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_select_udf_auth.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +import java.nio.charset.StandardCharsets +import java.nio.file.Files +import java.nio.file.Paths + +suite("test_dml_select_udf_auth","p0,auth_call") { + + String user = 'test_dml_select_udf_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_select_udf_auth_db' + String tableName = 'test_dml_select_udf_auth_tb' + String udfName = 'test_dml_select_udf_auth_udf' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + def jarPath = """${context.file.parent}/../javaudf_p0/jars/java-udf-case-jar-with-dependencies.jar""" + scp_udf_file_to_all_be(jarPath) + log.info("Jar path: ${jarPath}".toString()) + + sql """ CREATE FUNCTION ${dbName}.${udfName}(string) RETURNS int PROPERTIES ( + "file"="file://${jarPath}", + "symbol"="org.apache.doris.udf.collect.MurmurHash3UDF", + "type"="JAVA_UDF" + ); """ + + sql """ + CREATE TABLE ${dbName}.${tableName} ( + `col_1` varchar(10) NOT NULL + ) + DISTRIBUTED BY HASH(col_1) PROPERTIES("replication_num" = "1"); + """ + sql """ INSERT INTO ${dbName}.${tableName} VALUES ("abc"), ("123"), ("123"); """ + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ SELECT ${dbName}.${udfName}(col_1) as a FROM ${dbName}.${tableName} ORDER BY a; """ + exception "Can not found function" + } + } + sql """grant select_priv on ${dbName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ SELECT ${dbName}.${udfName}(col_1) as a FROM ${dbName}.${tableName} ORDER BY a; """ + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_stream_load_auth.groovy b/regression-test/suites/auth_call/test_dml_stream_load_auth.groovy new file mode 100644 index 00000000000000..240fdde6f6926d --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_stream_load_auth.groovy @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_stream_load_auth","p0,auth_call") { + String user = 'test_dml_stream_load_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_stream_load_auth_db' + String tableName = 'test_dml_stream_load_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + def write_to_file = { cur_path, content -> + File file = new File(cur_path) + file.write(content) + } + + def jdbcUrl = context.config.jdbcUrl + def urlWithoutSchema = jdbcUrl.substring(jdbcUrl.indexOf("://") + 3) + def sql_ip = urlWithoutSchema.substring(0, urlWithoutSchema.indexOf(":")) + String feHttpAddress = context.config.feHttpAddress + def http_port = feHttpAddress.substring(feHttpAddress.indexOf(":") + 1) + + def path_file = "${context.file.parent}/../../data/auth_call/stream_load_data.csv" + def load_path = "${context.file.parent}/../../data/auth_call/stream_load_cm.sh" + def cm = """curl --location-trusted -u ${user}:${pwd} -H "column_separator:," -T ${path_file} http://${sql_ip}:${http_port}/api/${dbName}/${tableName}/_stream_load""" + logger.info("cm: " + cm) + write_to_file(load_path, cm) + cm = "bash " + load_path + logger.info("cm:" + cm) + + + def proc = cm.execute() + def sout = new StringBuilder(), serr = new StringBuilder() + proc.consumeProcessOutput(sout, serr) + proc.waitForOrKill(7200000) + logger.info("std out: " + sout + "std err: " + serr) + assertTrue(sout.toString().indexOf("FAILED") != -1) + assertTrue(sout.toString().indexOf("denied") != -1) + + + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + + proc = cm.execute() + sout = new StringBuilder() + serr = new StringBuilder() + proc.consumeProcessOutput(sout, serr) + proc.waitForOrKill(7200000) + logger.info("std out: " + sout + "std err: " + serr) + assertTrue(sout.toString().indexOf("Success") != -1) + + int pos1 = sout.indexOf("TxnId") + int pos2 = sout.indexOf(",", pos1) + int pos3 = sout.indexOf(":", pos1) + def tsc_id = sout.substring(pos3+2, pos2) + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW TRANSACTION FROM ${dbName} WHERE ID=${tsc_id};""" + exception "denied" + } + } + + def res = sql """select count() from ${dbName}.${tableName}""" + assertTrue(res[0][0] == 3) + + def stream_res = sql """SHOW STREAM LOAD FROM ${dbName};""" + logger.info("stream_res: " + stream_res) + + sql """grant admin_priv on *.*.* to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def transaction_res = sql """SHOW TRANSACTION FROM ${dbName} WHERE ID=${tsc_id};""" + assertTrue(transaction_res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_dml_update_table_auth.groovy b/regression-test/suites/auth_call/test_dml_update_table_auth.groovy new file mode 100644 index 00000000000000..10c9a3fcb80f2a --- /dev/null +++ b/regression-test/suites/auth_call/test_dml_update_table_auth.groovy @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_dml_update_table_auth","p0,auth_call") { + + String user = 'test_dml_update_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_dml_update_table_auth_db' + String tableName = 'test_dml_update_table_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + UNIQUE KEY (id) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """UPDATE ${dbName}.${tableName} SET username = "444" WHERE id=1;""" + exception "denied" + } + } + sql """grant load_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """UPDATE ${dbName}.${tableName} SET username = "444" WHERE id=1;""" + exception "denied" + } + } + sql """grant select_priv(id) on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """UPDATE ${dbName}.${tableName} SET username = "444" WHERE id=1;""" + } + + def res = sql """select count(*) from ${dbName}.${tableName};""" + logger.info("res: " + res) + assertTrue(res[0][0] == 3) + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_hive_base_case_auth.groovy b/regression-test/suites/auth_call/test_hive_base_case_auth.groovy new file mode 100644 index 00000000000000..b1f432ddfb293d --- /dev/null +++ b/regression-test/suites/auth_call/test_hive_base_case_auth.groovy @@ -0,0 +1,200 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_hive_base_case_auth", "p0,auth_call") { + + String user = 'test_hive_base_case_auth_user' + String pwd = 'C123_567p' + String catalogName = 'test_hive_base_case_auth_catalog' + String dbName = 'test_hive_base_case_auth_db' + String tableName = 'test_hive_base_case_auth_tb' + String tableNameNew = 'test_hive_base_case_auth_tb_new' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + String enabled = context.config.otherConfigs.get("enableHiveTest") + if (enabled == null || !enabled.equalsIgnoreCase("true")) { + logger.info("diable Hive test.") + return; + } + + for (String hivePrefix : ["hive2", "hive3"]) { + setHivePrefix(hivePrefix) + + String hms_port = context.config.otherConfigs.get(hivePrefix + "HmsPort") + String hdfs_port = context.config.otherConfigs.get(hivePrefix + "HdfsPort") + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + + try_sql("DROP USER ${user}") + try_sql """drop catalog if exists ${catalogName}""" + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + // create catalog + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + exception "denied" + } + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 1) + } + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms' + );""" + sql """grant Create_priv on ${catalogName}.*.* to ${user}""" + try_sql """drop catalog if exists ${catalogName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create catalog if not exists ${catalogName} properties ( + 'type'='hms', + 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}', + 'fs.defaultFS' = 'hdfs://${externalEnvIp}:${hdfs_port}', + 'use_meta_cache' = 'true' + );""" + sql """show create catalog ${catalogName}""" + def ctl_res = sql """show catalogs;""" + assertTrue(ctl_res.size() == 2) + } + sql """revoke Create_priv on ${catalogName}.*.* from ${user}""" + + // create database + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create database ${catalogName}.${dbName};""" + exception "denied" + } + } + sql """create database ${catalogName}.${dbName};""" + sql """grant Create_priv on ${catalogName}.${dbName}.* to ${user}""" + sql """drop table if exists ${catalogName}.${dbName}.${tableName};""" + sql """drop database ${catalogName}.${dbName};""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create database ${catalogName}.${dbName};""" + } + sql """revoke Create_priv on ${catalogName}.${dbName}.* from ${user}""" + + // create table + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """create table ${catalogName}.${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) ENGINE=hive + PROPERTIES ( + 'file_format'='parquet' + );""" + exception "denied" + } + } + sql """create table ${catalogName}.${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) ENGINE=hive + PROPERTIES ( + 'file_format'='parquet' + );""" + sql """grant Create_priv on ${catalogName}.${dbName}.${tableName} to ${user}""" + sql """drop table ${catalogName}.${dbName}.${tableName}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """create table ${catalogName}.${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) ENGINE=hive + PROPERTIES ( + 'file_format'='parquet' + );""" + sql """switch ${catalogName}""" + sql """use ${dbName}""" + sql """show create table ${tableName}""" + def db_res = sql """show tables;""" + assertTrue(db_res.size() == 1) + } + sql """revoke Create_priv on ${catalogName}.${dbName}.${tableName} from ${user}""" + + // load + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """ + insert into ${catalogName}.${dbName}.${tableName} values + (1, "111"), + (2, "222"); + """ + exception "denied" + } + } + sql """grant LOAD_PRIV on ${catalogName}.${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """ + insert into ${catalogName}.${dbName}.${tableName} values + (1, "111"), + (2, "222"); + """ + } + sql """revoke LOAD_PRIV on ${catalogName}.${dbName}.${tableName} from ${user}""" + + // alter +// connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { +// test { +// sql """ALTER table ${catalogName}.${dbName}.${tableName} RENAME ${tableNameNew};""" +// exception "denied" +// } +// } +// sql """grant ALTER_PRIV on ${catalogName}.${dbName}.${tableName} to ${user}""" +// connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { +// sql """ALTER table ${catalogName}.${dbName}.${tableName} RENAME ${tableNameNew};""" +// } +// sql """revoke ALTER_PRIV on ${catalogName}.${dbName}.${tableName} from ${user}""" +// sql """ALTER table ${catalogName}.${dbName}.${tableNameNew} RENAME ${tableName};""" + + // drop + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """drop catalog ${catalogName}""" + exception "denied" + } + test { + sql """drop database ${catalogName}.${dbName}""" + exception "denied" + } + test { + sql """drop table ${catalogName}.${dbName}.${tableName}""" + exception "denied" + } + } + sql """grant DROP_PRIV on ${catalogName}.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """drop table ${catalogName}.${dbName}.${tableName}""" + sql """drop database ${catalogName}.${dbName}""" + sql """drop catalog ${catalogName}""" + } + + sql """drop catalog if exists ${catalogName}""" + try_sql("DROP USER ${user}") + } + + + +} diff --git a/regression-test/suites/auth_call/test_show_backend_auth.groovy b/regression-test/suites/auth_call/test_show_backend_auth.groovy new file mode 100644 index 00000000000000..16df2a6c10b398 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_backend_auth.groovy @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_backend_auth","p0,auth_call") { + String user = 'test_show_backend_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW BACKENDS""" + exception "denied" + } + } + sql """grant node_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW BACKENDS""" + assertTrue(res.size() > 0) + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_broker_auth.groovy b/regression-test/suites/auth_call/test_show_broker_auth.groovy new file mode 100644 index 00000000000000..6ce1c4b361ac66 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_broker_auth.groovy @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_broker_auth","p0,auth_call") { + String user = 'test_show_broker_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW BROKER;""" + exception "denied" + } + } + sql """grant node_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW BROKER;""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_charset_auth.groovy b/regression-test/suites/auth_call/test_show_charset_auth.groovy new file mode 100644 index 00000000000000..803810dc204ab2 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_charset_auth.groovy @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_no_auth","p0,auth_call") { + String user = 'test_show_charset_auth_user' + String user1 = 'test_show_charset_auth_user1' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql("DROP USER ${user1}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """CREATE USER '${user1}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW CHARSET""" + sql """SHOW DATA TYPES""" + sql """SHOW ENGINES""" + sql """show collation;""" + sql """show variables;""" + sql """SHOW PROPERTY;""" + def res1 = sql """SHOW PROCESSLIST""" + logger.info("res1: " + res1) + assertTrue(res1.size() >= 1) + + test { + sql """show PROPERTY for ${user1}""" + exception "denied" + } + test { + sql """SHOW TRASH;""" + exception "denied" + } + } + sql """grant grant_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """show PROPERTY for ${user1}""" + logger.info("res: " + res) + assertTrue(res.size() > 0) + + def res1 = sql """SHOW PROCESSLIST""" + logger.info("res1: " + res1) + assertTrue(res1.size() == 1) + } + sql """revoke grant_priv on *.*.* from ${user}""" + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW TRASH;""" + logger.info("res: " + res) + assertTrue(res.size() >= 1) + } + + try_sql("DROP USER ${user}") + try_sql("DROP USER ${user1}") +} diff --git a/regression-test/suites/auth_call/test_show_convert_light_sc_auth.groovy b/regression-test/suites/auth_call/test_show_convert_light_sc_auth.groovy new file mode 100644 index 00000000000000..c7a765fe9a3599 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_convert_light_sc_auth.groovy @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_convert_light_sc_auth","p0,auth_call") { + String user = 'test_show_convert_light_sc_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW CONVERT_LIGHT_SCHEMA_CHANGE_PROCESS;""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW CONVERT_LIGHT_SCHEMA_CHANGE_PROCESS;""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_create_table_auth.groovy b/regression-test/suites/auth_call/test_show_create_table_auth.groovy new file mode 100644 index 00000000000000..6152806ce6fbb6 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_create_table_auth.groovy @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_create_table_auth","p0,auth_call") { + String user = 'test_show_create_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_create_table_auth_db' + String tableName = 'test_show_create_table_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show create table ${dbName}.${tableName}""" + exception "denied" + } + test { + sql """SHOW DATA SKEW FROM ${dbName}.${tableName};""" + exception "denied" + } + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create table ${tableName}""" + sql """SHOW DATA SKEW FROM ${tableName};""" + } + sql """revoke select_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant create_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create table ${tableName}""" + sql """SHOW DATA SKEW FROM ${tableName};""" + } + sql """revoke create_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant drop_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create table ${tableName}""" + sql """SHOW DATA SKEW FROM ${tableName};""" + } + sql """revoke drop_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant alter_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show create table ${tableName}""" + sql """SHOW DATA SKEW FROM ${tableName};""" + } + sql """revoke alter_priv on ${dbName}.${tableName} from ${user}""" + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_data_auth.groovy b/regression-test/suites/auth_call/test_show_data_auth.groovy new file mode 100644 index 00000000000000..84bf9497f546e8 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_data_auth.groovy @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_data_auth","p0,auth_call") { + String user = 'test_show_data_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_data_auth_db' + String tableName = 'test_show_data_auth_tb' + String tableName2 = 'test_show_data_auth_tb2' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + sql """create table ${dbName}.${tableName2} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show data from ${dbName}.${tableName}""" + exception "denied" + } + test { + sql """show data from ${dbName}.${tableName2}""" + exception "denied" + } + sql """SHOW DATA;""" + } + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use ${dbName}""" + sql """show data from ${dbName}.${tableName}""" + test { + sql """show data from ${dbName}.${tableName2}""" + exception "denied" + } + sql """SHOW DATA;""" + } + sql """revoke select_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW DATA;""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_database_id_auth.groovy b/regression-test/suites/auth_call/test_show_database_id_auth.groovy new file mode 100644 index 00000000000000..0a0d44be07476d --- /dev/null +++ b/regression-test/suites/auth_call/test_show_database_id_auth.groovy @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_database_id_auth","p0,auth_call") { + String user = 'test_show_database_id_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_database_id_auth_db' + String tableName = 'test_show_database_id_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """show database 1001""" + exception "denied" + } + test { + sql """show table 1001""" + exception "denied" + } + test { + sql """SHOW CATALOG RECYCLE BIN;""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show database 1001""" + sql """show table 1001""" + sql """SHOW CATALOG RECYCLE BIN;""" + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_dynamic_table_auth.groovy b/regression-test/suites/auth_call/test_show_dynamic_table_auth.groovy new file mode 100644 index 00000000000000..e144d87749c193 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_dynamic_table_auth.groovy @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_dynamic_table_auth","p0,auth_call") { + String user = 'test_show_dynamic_table_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_dynamic_table_auth_db' + String tableName = 'test_show_dynamic_table_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + PARTITION BY RANGE(id) () + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "8", + "dynamic_partition.start_day_of_month" = "3" + );""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW DYNAMIC PARTITION TABLES from ${dbName};""" + assertTrue(res.size() == 0) + } + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW DYNAMIC PARTITION TABLES from ${dbName};""" + assertTrue(res.size() == 1) + } + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_frontend_auth.groovy b/regression-test/suites/auth_call/test_show_frontend_auth.groovy new file mode 100644 index 00000000000000..2844ee992c2a98 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_frontend_auth.groovy @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_frontend_auth","p0,auth_call") { + String user = 'test_show_frontend_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW frontends""" + exception "denied" + } + test { + sql """show frontends disks""" + exception "denied" + } + } + sql """grant node_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW frontends""" + assertTrue(res.size() > 0) + + sql """show frontends disks""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_grant_auth.groovy b/regression-test/suites/auth_call/test_show_grant_auth.groovy new file mode 100644 index 00000000000000..5c6c3f069e32bf --- /dev/null +++ b/regression-test/suites/auth_call/test_show_grant_auth.groovy @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_grant_auth","p0,auth_call") { + String user = 'test_show_grant_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show grants;""" + test { + sql """show all grants;""" + exception "denied" + } + + test { + sql """show roles;""" + exception "denied" + } + } + sql """grant grant_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """show grants;""" + sql """show all grants;""" + sql """show roles;""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_proc_auth.groovy b/regression-test/suites/auth_call/test_show_proc_auth.groovy new file mode 100644 index 00000000000000..725a9785a7af5c --- /dev/null +++ b/regression-test/suites/auth_call/test_show_proc_auth.groovy @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_proc_auth","p0,auth_call") { + String user = 'test_show_proc_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW PROC "/";""" + exception "denied" + } + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW PROC "/";""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_query_stats_auth.groovy b/regression-test/suites/auth_call/test_show_query_stats_auth.groovy new file mode 100644 index 00000000000000..1a9e8898fff432 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_query_stats_auth.groovy @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_query_stats_auth","p0,auth_call") { + String user = 'test_show_query_stats_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + logger.info("context.config.jdbcUrl: " + context.config.jdbcUrl) + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """show query stats;""" + logger.info("res:" + res) + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_repository_auth.groovy b/regression-test/suites/auth_call/test_show_repository_auth.groovy new file mode 100644 index 00000000000000..5d7c51ed59c3b1 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_repository_auth.groovy @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_repository_auth","p0,auth_call") { + String user = 'test_show_repository_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_repository_auth_db' + String repositoryName = 'test_show_repository_auth_rps' + + String ak = getS3AK() + String sk = getS3SK() + String endpoint = getS3Endpoint() + String region = getS3Region() + String bucket = context.config.otherConfigs.get("s3BucketName"); + + //cloud-mode + if (isCloudMode()) { + return + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + try_sql("""DROP REPOSITORY `${repositoryName}`;""") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + + sql """CREATE REPOSITORY `${repositoryName}` + WITH S3 + ON LOCATION "s3://${bucket}/${repositoryName}" + PROPERTIES + ( + "s3.endpoint" = "http://${endpoint}", + "s3.region" = "${region}", + "s3.access_key" = "${ak}", + "s3.secret_key" = "${sk}" + )""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW CREATE REPOSITORY for ${repositoryName};""" + exception "denied" + } + test { + sql """SHOW REPOSITORIES;""" + exception "denied" + } + + } + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """SHOW CREATE REPOSITORY for ${repositoryName};""" + + def res = sql """SHOW REPOSITORIES;""" + logger.info("res: " + res) + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_tablet_auth.groovy b/regression-test/suites/auth_call/test_show_tablet_auth.groovy new file mode 100644 index 00000000000000..03f8ed58a8ed40 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_tablet_auth.groovy @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_tablet_auth","p0,auth_call") { + String user = 'test_show_tablet_auth_user' + String pwd = 'C123_567p' + String dbName = 'test_show_tablet_auth_db' + String tableName = 'test_show_tablet_auth_tb' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + try_sql """drop database if exists ${dbName}""" + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + sql """create database ${dbName}""" + sql """create table ${dbName}.${tableName} ( + id BIGINT, + username VARCHAR(20) + ) + DISTRIBUTED BY HASH(id) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1" + );""" + sql """ + insert into ${dbName}.`${tableName}` values + (1, "111"), + (2, "222"), + (3, "333"); + """ + + sql """grant select_priv on ${dbName}.${tableName} to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + test { + sql """SHOW TABLETS FROM ${dbName}.${tableName}""" + exception "denied" + } + test { + sql """SHOW TABLET 1000""" + exception "denied" + } + test { + sql """SHOW TABLETS BELONG 1000""" + exception "denied" + } + test { + sql """SHOW PLUGINS""" + exception "denied" + } + } + sql """revoke select_priv on ${dbName}.${tableName} from ${user}""" + + sql """grant admin_priv on *.*.* to ${user}""" + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + def res = sql """SHOW TABLETS FROM ${dbName}.${tableName}""" + + def tablet_res = sql """SHOW TABLET ${res[0][0]}""" + assertTrue(tablet_res.size() == 1) + + tablet_res = sql """SHOW TABLETS BELONG ${res[0][0]}""" + assertTrue(tablet_res.size() == 1) + + sql """SHOW PLUGINS""" + } + + + sql """drop database if exists ${dbName}""" + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/auth_call/test_show_typecast_auth.groovy b/regression-test/suites/auth_call/test_show_typecast_auth.groovy new file mode 100644 index 00000000000000..104e8107e1fb76 --- /dev/null +++ b/regression-test/suites/auth_call/test_show_typecast_auth.groovy @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("test_show_typecast_auth","p0,auth_call") { + String user = 'test_show_typecast_auth_user' + String pwd = 'C123_567p' + + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}"""; + } + + try_sql("DROP USER ${user}") + sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" + sql """grant select_priv on regression_test to ${user}""" + + connect(user=user, password="${pwd}", url=context.config.jdbcUrl) { + sql """use regression_test""" + sql """show type_cast;""" + } + + try_sql("DROP USER ${user}") +} diff --git a/regression-test/suites/bloom_filter_p0/test_bloom_filter.groovy b/regression-test/suites/bloom_filter_p0/test_bloom_filter.groovy index 89140593d0edf0..23e1c7ed596f62 100644 --- a/regression-test/suites/bloom_filter_p0/test_bloom_filter.groovy +++ b/regression-test/suites/bloom_filter_p0/test_bloom_filter.groovy @@ -115,4 +115,37 @@ suite("test_bloom_filter") { sql """ALTER TABLE ${test_map_tb} SET("bloom_filter_columns" = "k1,m1")""" exception "not supported in bloom filter index" } + + // bloom filter index for json column + def test_json_tb = "test_json_bloom_filter_tb" + sql """DROP TABLE IF EXISTS ${test_json_tb}""" + + test { + sql """CREATE TABLE IF NOT EXISTS ${test_json_tb} ( + `k1` int(11) NOT NULL, + `j1` json NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`k1`) + DISTRIBUTED BY HASH(`k1`) BUCKETS 5 + PROPERTIES ( + "replication_num" = "1", + "bloom_filter_columns" = "k1,j1" + )""" + exception "not supported in bloom filter index" + } + + sql """CREATE TABLE IF NOT EXISTS ${test_json_tb} ( + `k1` int(11) NOT NULL, + `j1` json NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`k1`) + DISTRIBUTED BY HASH(`k1`) BUCKETS 5 + PROPERTIES ( + "replication_num" = "1", + "bloom_filter_columns" = "k1" + )""" + test { + sql """ALTER TABLE ${test_json_tb} SET("bloom_filter_columns" = "k1,j1")""" + exception "not supported in bloom filter index" + } } diff --git a/regression-test/suites/bloom_filter_p0/test_bloom_filter_drop_column.groovy b/regression-test/suites/bloom_filter_p0/test_bloom_filter_drop_column.groovy index a2de2426832854..39c4cdb6141de1 100644 --- a/regression-test/suites/bloom_filter_p0/test_bloom_filter_drop_column.groovy +++ b/regression-test/suites/bloom_filter_p0/test_bloom_filter_drop_column.groovy @@ -50,6 +50,29 @@ suite("test_bloom_filter_drop_column") { } assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") } + + def assertShowCreateTableWithRetry = { tableName, expectedCondition, maxRetries, waitSeconds -> + int attempt = 0 + while (attempt < maxRetries) { + def res = sql """SHOW CREATE TABLE ${tableName}""" + log.info("Attempt ${attempt + 1}: show table: ${res}") + if (res && res.size() > 0 && res[0][1].contains(expectedCondition)) { + logger.info("Attempt ${attempt + 1}: Condition met.") + return + } else { + logger.warn("Attempt ${attempt + 1}: Condition not met. Retrying after ${waitSeconds} second(s)...") + } + attempt++ + if (attempt < maxRetries) { + sleep(waitSeconds * 1000) + } + } + def finalRes = sql """SHOW CREATE TABLE ${tableName}""" + log.info("Final attempt: show table: ${finalRes}") + assertTrue(finalRes && finalRes.size() > 0, "SHOW CREATE TABLE return empty or null") + assertTrue(finalRes[0][1].contains(expectedCondition), "expected\"${expectedCondition}\",actural: ${finalRes[0][1]}") + } + sql """INSERT INTO ${table_name} values ('1', '1')""" qt_select """select * from ${table_name} order by a""" @@ -58,10 +81,8 @@ suite("test_bloom_filter_drop_column") { sql """ALTER TABLE ${table_name} DROP COLUMN c1""" wait_for_latest_op_on_table_finish(table_name, timeout) - // show create table - def res = sql """SHOW CREATE TABLE ${table_name}""" - log.info("show table:{}", res); - assert res[0][1].contains("\"bloom_filter_columns\" = \"\"") + // show create table with retry logic + assertShowCreateTableWithRetry(table_name, "\"bloom_filter_columns\" = \"\"", 3, 30) // add new column c1 sql """ALTER TABLE ${table_name} ADD COLUMN c1 ARRAY""" diff --git a/regression-test/suites/bloom_filter_p0/test_bloom_filter_hit_with_renamed_column.groovy b/regression-test/suites/bloom_filter_p0/test_bloom_filter_hit_with_renamed_column.groovy index 46d2e766109174..059d7c95445c0f 100644 --- a/regression-test/suites/bloom_filter_p0/test_bloom_filter_hit_with_renamed_column.groovy +++ b/regression-test/suites/bloom_filter_p0/test_bloom_filter_hit_with_renamed_column.groovy @@ -112,7 +112,7 @@ suite("test_bloom_filter_hit_with_renamed_column") { sql """ SET enable_profile = true """ sql """ set parallel_scan_min_rows_per_scanner = 2097152; """ - sql """ select C_COMMENT_NEW from ${tableName} where C_COMMENT_NEW='OK' """ + //sql """ select C_COMMENT_NEW from ${tableName} where C_COMMENT_NEW='OK' """ // get and check profile with retry logic def getProfileIdWithRetry = { query, maxRetries, waitSeconds -> @@ -122,8 +122,9 @@ suite("test_bloom_filter_hit_with_renamed_column") { int attempt = 0 while (attempt < maxRetries) { + sql """ ${query} """ profiles = httpGet(profileUrl) - log.debug("profiles attempt ${attempt + 1}: {}", profiles) + log.info("profiles attempt ${attempt + 1}: {}", profiles) if (profiles == null) { log.warn("Failed to fetch profiles on attempt ${attempt + 1}") } else { @@ -156,7 +157,7 @@ suite("test_bloom_filter_hit_with_renamed_column") { } def query = """select C_COMMENT_NEW from ${tableName} where C_COMMENT_NEW='OK'""" - def profileId = getProfileIdWithRetry(query, 3, 1) + def profileId = getProfileIdWithRetry(query, 3, 30) log.info("profileId:{}", profileId) def profileDetail = httpGet("/rest/v1/query_profile/" + profileId) log.info("profileDetail:{}", profileDetail) diff --git a/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy b/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy new file mode 100644 index 00000000000000..2dad8283cc692a --- /dev/null +++ b/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +import org.apache.doris.regression.suite.ClusterOptions +import org.junit.Assert + +suite("test_default_cluster", "docker") { + def options = new ClusterOptions() + options.cloudMode = true + + def getProperty = { property -> + def result = null + result = sql_return_maparray """SHOW PROPERTY""" + result.find { + it.Key == property as String + } + } + + def setAndCheckDefaultCluster = { validCluster -> + sql """set property 'DEFAULT_CLOUD_CLUSTER' = '$validCluster'""" + def ret1 = getProperty("default_cloud_cluster") + def ret2 = getProperty("default_compute_group") + assertEquals(ret1.Value as String, validCluster) + assertEquals(ret1.Value as String, ret2.Value as String) + } + + docker(options) { + // not admin + def user1 = "default_user1" + // admin role + def user2 = "default_user2" + + sql """CREATE USER $user1 IDENTIFIED BY 'Cloud123456' DEFAULT ROLE 'admin'""" + sql """CREATE USER $user2 IDENTIFIED BY 'Cloud123456'""" + sql """GRANT SELECT_PRIV on *.*.* to ${user2}""" + + def clusters = sql " SHOW CLUSTERS " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + + // admin set himself + setAndCheckDefaultCluster validCluster + + // user1 + connectInDocker(user = user1, password = 'Cloud123456') { + setAndCheckDefaultCluster validCluster + def ret = sql """show grants""" + log.info("ret = {}", ret) + } + + connectInDocker(user = user2, password = 'Cloud123456') { + //java.sql.SQLException: errCode = 2, detailMessage = set default compute group failed, user default_user2 has no permission to use compute group 'compute_cluster', please + //grant use privilege first , ComputeGroupException: CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP, you canuse SQL `GRANT USAGE_PRIV ON COMPUTE GROUP {compute_group_name} TO + //{user}` + try { + sql """set property 'DEFAULT_CLOUD_CLUSTER' = '$validCluster'""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().contains("CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP")) + } + } + + try { + // admin set user2, failed not give user2 cluster auth + sql """set property for $user2 'DEFAULT_CLOUD_CLUSTER' = '$validCluster'""" + } catch (Exception e) { + assertTrue(e.getMessage().contains("CURRENT_USER_NO_AUTH_TO_USE_COMPUTE_GROUP")) + } + sql """GRANT USAGE_PRIV ON COMPUTE GROUP $validCluster TO $user2""" + // succ + setAndCheckDefaultCluster validCluster + // admin clean + sql """set property for $user2 'DEFAULT_CLOUD_CLUSTER' = '' """ + + connectInDocker(user = user2, password = 'Cloud123456') { + // user set himself + setAndCheckDefaultCluster validCluster + sql """set property 'DEFAULT_CLOUD_CLUSTER' = '' """ + def ret = getProperty("default_cloud_cluster") + assertEquals(ret.Value as String, "") + } + } +} diff --git a/regression-test/suites/control_p0/test_tablet_io_error.groovy b/regression-test/suites/control_p0/test_tablet_io_error.groovy new file mode 100644 index 00000000000000..3ce3aa216f7727 --- /dev/null +++ b/regression-test/suites/control_p0/test_tablet_io_error.groovy @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.apache.doris.regression.suite.ClusterOptions + +import groovy.io.FileType + +suite('test_tablet_io_error', 'docker') { + def runTest = { isRead, debugPointName, isDropTabletDir -> + GetDebugPoint().clearDebugPointsForAllBEs() + def tbl = 'tbl_test_tablet_io_error' + sql "create table ${tbl} (k int) distributed by hash(k) buckets 1 properties('replication_num' = '2')" + sql "insert into ${tbl} values (1)" + sql "insert into ${tbl} values (2)" + sql "insert into ${tbl} values (3)" + def tablets = sql_return_maparray "SHOW TABLETS FROM ${tbl}" + assertEquals(2, tablets.size()) + def tabletId = tablets[0].TabletId.toLong() + def injectBe = cluster.getBeByBackendId(tablets[0].BackendId.toLong()) + assertNotNull(injectBe) + + sql 'set use_fix_replica = 0' + + def tabletOnInjectBe = sql_return_maparray("SHOW TABLETS FROM ${tbl}").find { it.BackendId.toLong() == injectBe.backendId } + assertNotNull(tabletOnInjectBe) + + if (debugPointName != null) { + GetDebugPoint().enableDebugPoint(injectBe.host, injectBe.httpPort, injectBe.getNodeType(), + debugPointName, [ sub_path : "/${tabletId}/" ]) + } + + if (isDropTabletDir) { + // the docker files owner is root, need change its permission + cluster.addRWPermToAllFiles() + def dataPath = new File("${injectBe.path}/storage/1.HDD/data") + dataPath.eachFile(FileType.DIRECTORIES) { shardPath -> + shardPath.eachFile(FileType.DIRECTORIES) { tabletPath -> + try { + if (tabletPath.getName().toLong() == tabletId) { + logger.info("delete tablet path: ${tabletPath}") + tabletPath.deleteDir() + } + } catch (Throwable t) { + logger.warn('delete tablet path exception: ', t) + } + } + } + } + + boolean hasExcept = false + try { + if (isRead) { + sql "select * from ${tbl}" + } else { + sql "insert into ${tbl} values (1)" + } + } catch (Throwable e) { + logger.info("exec exeption: ${e.getMessage()}") + hasExcept = true + } + assertTrue(hasExcept) + + sleep 8000 + + // be will report tablet as bad, then fe will drop it + tabletOnInjectBe = sql_return_maparray("SHOW TABLETS FROM ${tbl}").find { it.BackendId.toLong() == injectBe.backendId } + assertNull(tabletOnInjectBe) + sql "insert into ${tbl} values (1)" + sql "select * from ${tbl}" + + sql "drop table ${tbl} force" + } + + def options = new ClusterOptions() + options.cloudMode = false + options.enableDebugPoints() + options.feConfigs += [ + 'disable_balance=true', + 'tablet_checker_interval_ms=500', + 'schedule_batch_size=1000', + 'schedule_slot_num_per_hdd_path=1000', + ] + options.beConfigs += [ + 'report_tablet_interval_seconds=1', + 'max_tablet_io_errors=1', + 'disable_page_cache=true', + ] + + docker(options) { + runTest(true, 'LocalFileReader::read_at_impl.io_error', false) + runTest(true, 'LocalFileSystem.create_file_impl.open_file_failed', false) + runTest(true, null, true) + runTest(false, 'LocalFileWriter::appendv.io_error', false) + runTest(false, 'LocalFileSystem.create_file_impl.open_file_failed', false) + runTest(false, null, true) + } +} diff --git a/regression-test/suites/doc/table-design/auto-increment.md.groovy b/regression-test/suites/doc/table-design/auto-increment.md.groovy new file mode 100644 index 00000000000000..f78d53c1b13af3 --- /dev/null +++ b/regression-test/suites/doc/table-design/auto-increment.md.groovy @@ -0,0 +1,296 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/auto-increment.md") { + try { + multi_sql "create database if not exists demo; use demo;" + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` BIGINT NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `id` BIGINT NOT NULL AUTO_INCREMENT(100), + `value` BIGINT NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `uid` BIGINT NOT NULL, + `name` BIGINT NOT NULL, + `id` BIGINT NOT NULL AUTO_INCREMENT, + `value` BIGINT NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`uid`, `name`) + DISTRIBUTED BY HASH(`uid`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `name` varchar(65533) NOT NULL, + `value` int(11) NOT NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `text` varchar(65533) NOT NULL, + `id` BIGINT NOT NULL AUTO_INCREMENT, + ) ENGINE=OLAP + UNIQUE KEY(`text`) + DISTRIBUTED BY HASH(`text`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `text` varchar(65533) NOT NULL, + `id` BIGINT NOT NULL AUTO_INCREMENT, + ) ENGINE=OLAP + UNIQUE KEY(`text`) + DISTRIBUTED BY HASH(`text`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `demo`.`tbl`" + sql """ + CREATE TABLE `demo`.`tbl` ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `name` varchar(65533) NOT NULL, + `value` int(11) NOT NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + multi_sql """ + insert into tbl(name, value) values("Bob", 10), ("Alice", 20), ("Jack", 30); + select * from tbl order by id; + """ + + cmd """ + curl --location-trusted -u ${context.config.jdbcUser}:${context.config.jdbcPassword} -H "columns:name,value" -H "column_separator:," -T ${context.file.parent}/test_data/test.csv http://${context.config.feHttpAddress}/api/demo/tbl/_stream_load + """ + sql "select * from tbl order by id" + + multi_sql """ + insert into tbl(id, name, value) values(null, "Doris", 60), (null, "Nereids", 70); + select * from tbl order by id; + """ + + sql "drop table if exists `demo`.`tbl2`" + multi_sql """ + CREATE TABLE `demo`.`tbl2` ( + `id` BIGINT NOT NULL AUTO_INCREMENT, + `name` varchar(65533) NOT NULL, + `value` int(11) NOT NULL DEFAULT "0" + ) ENGINE=OLAP + UNIQUE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true" + ); + + insert into tbl2(id, name, value) values(1, "Bob", 10), (2, "Alice", 20), (3, "Jack", 30); + select * from tbl2 order by id; + + set enable_unique_key_partial_update=true; + set enable_insert_strict=false; + insert into tbl2(id, name) values(1, "modified"), (4, "added"); + + select * from tbl2 order by id; + """ + + sql "drop table if exists `demo`.`tbl3`" + multi_sql """ + CREATE TABLE `demo`.`tbl3` ( + `id` BIGINT NOT NULL, + `name` varchar(100) NOT NULL, + `score` BIGINT NOT NULL, + `aid` BIGINT NOT NULL AUTO_INCREMENT + ) ENGINE=OLAP + UNIQUE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true" + ); + + insert into tbl3(id, name, score) values(1, "Doris", 100), (2, "Nereids", 200), (3, "Bob", 300); + select * from tbl3 order by id; + + set enable_unique_key_partial_update=true; + set enable_insert_strict=false; + insert into tbl3(id, score) values(1, 999), (2, 888); + select * from tbl3 order by id; + + insert into tbl3(id, aid) values(1, 1000), (3, 500); + select * from tbl3 order by id; + """ + + sql "drop table if exists `demo`.`dwd_dup_tbl`" + sql """ + CREATE TABLE `demo`.`dwd_dup_tbl` ( + `user_id` varchar(50) NOT NULL, + `dim1` varchar(50) NOT NULL, + `dim2` varchar(50) NOT NULL, + `dim3` varchar(50) NOT NULL, + `dim4` varchar(50) NOT NULL, + `dim5` varchar(50) NOT NULL, + `visit_time` DATE NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`user_id`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 32 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql "drop table if exists `demo`.`dictionary_tbl`" + sql """ + CREATE TABLE `demo`.`dictionary_tbl` ( + `user_id` varchar(50) NOT NULL, + `aid` BIGINT NOT NULL AUTO_INCREMENT + ) ENGINE=OLAP + UNIQUE KEY(`user_id`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 32 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true" + ) + """ + sql """ + insert into dictionary_tbl(user_id) + select user_id from dwd_dup_tbl group by user_id + """ + sql """ + insert into dictionary_tbl(user_id) + select dwd_dup_tbl.user_id from dwd_dup_tbl left join dictionary_tbl + on dwd_dup_tbl.user_id = dictionary_tbl.user_id where dwd_dup_tbl.visit_time > '2023-12-10' and dictionary_tbl.user_id is NULL + """ + sql "drop table if exists `demo`.`dws_agg_tbl`" + sql """ + CREATE TABLE `demo`.`dws_agg_tbl` ( + `dim1` varchar(50) NOT NULL, + `dim3` varchar(50) NOT NULL, + `dim5` varchar(50) NOT NULL, + `user_id_bitmap` BITMAP BITMAP_UNION NOT NULL, + `pv` BIGINT SUM NOT NULL + ) ENGINE=OLAP + AGGREGATE KEY(`dim1`,`dim3`,`dim5`) + DISTRIBUTED BY HASH(`dim1`) BUCKETS 32 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql """ + insert into dws_agg_tbl + select dwd_dup_tbl.dim1, dwd_dup_tbl.dim3, dwd_dup_tbl.dim5, BITMAP_UNION(TO_BITMAP(dictionary_tbl.aid)), COUNT(1) + from dwd_dup_tbl INNER JOIN dictionary_tbl on dwd_dup_tbl.user_id = dictionary_tbl.user_id + group by dwd_dup_tbl.dim1, dwd_dup_tbl.dim3, dwd_dup_tbl.dim5 + """ + sql """ + select dim1, dim3, dim5, bitmap_count(user_id_bitmap) as uv, pv from dws_agg_tbl + """ + + sql "drop table if exists `demo`.`records_tbl`" + sql """ + CREATE TABLE `demo`.`records_tbl` ( + `user_id` int(11) NOT NULL COMMENT "", + `name` varchar(26) NOT NULL COMMENT "", + `address` varchar(41) NOT NULL COMMENT "", + `city` varchar(11) NOT NULL COMMENT "", + `nation` varchar(16) NOT NULL COMMENT "", + `region` varchar(13) NOT NULL COMMENT "", + `phone` varchar(16) NOT NULL COMMENT "", + `mktsegment` varchar(11) NOT NULL COMMENT "" + ) DUPLICATE KEY (`user_id`, `name`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql "select * from records_tbl order by user_id, name limit 100" + sql "select * from records_tbl order by user_id, name limit 100 offset 100" + + sql "drop table if exists `demo`.`records_tbl2`" + sql """ + CREATE TABLE `demo`.`records_tbl2` ( + `user_id` int(11) NOT NULL COMMENT "", + `name` varchar(26) NOT NULL COMMENT "", + `address` varchar(41) NOT NULL COMMENT "", + `city` varchar(11) NOT NULL COMMENT "", + `nation` varchar(16) NOT NULL COMMENT "", + `region` varchar(13) NOT NULL COMMENT "", + `phone` varchar(16) NOT NULL COMMENT "", + `mktsegment` varchar(11) NOT NULL COMMENT "", + `unique_value` BIGINT NOT NULL AUTO_INCREMENT + ) DUPLICATE KEY (`user_id`, `name`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql "select * from records_tbl2 order by unique_value limit 100" + sql "select * from records_tbl2 where unique_value > 99 order by unique_value limit 100" + sql """ + select user_id, name, address, city, nation, region, phone, mktsegment + from records_tbl2, (select unique_value as max_value from records_tbl2 order by unique_value limit 1 offset 9999) as previous_data + where records_tbl2.unique_value > previous_data.max_value + order by unique_value limit 100 + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/auto-increment.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/best-practice.md.groovy b/regression-test/suites/doc/table-design/best-practice.md.groovy new file mode 100644 index 00000000000000..44ce1c5a5ae8ac --- /dev/null +++ b/regression-test/suites/doc/table-design/best-practice.md.groovy @@ -0,0 +1,193 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/best-practice.md") { + try { + sql "drop table if exists session_data" + sql """ + -- 例如 允许 KEY 重复仅追加新数据的日志数据分析 + CREATE TABLE session_data + ( + visitorid SMALLINT, + sessionid BIGINT, + visittime DATETIME, + city CHAR(20), + province CHAR(20), + ip varchar(32), + brower CHAR(20), + url VARCHAR(1024) + ) + DUPLICATE KEY(visitorid, sessionid) -- 只用于指定排序列,相同的 KEY 行不会合并 + DISTRIBUTED BY HASH(sessionid, visitorid) BUCKETS 10 + PROPERTIES ("replication_num" = "1") + """ + + sql "drop table if exists site_visit" + sql """ + -- 例如 网站流量分析 + CREATE TABLE site_visit + ( + siteid INT, + city SMALLINT, + username VARCHAR(32), + pv BIGINT SUM DEFAULT '0' -- PV 浏览量计算 + ) + AGGREGATE KEY(siteid, city, username) -- 相同的 KEY 行会合并,非 KEY 列会根据指定的聚合函数进行聚合 + DISTRIBUTED BY HASH(siteid) BUCKETS 10 + PROPERTIES ("replication_num" = "1") + """ + + sql "drop table if exists sales_order" + sql """ + -- 例如 订单去重分析 + CREATE TABLE sales_order + ( + orderid BIGINT, + status TINYINT, + username VARCHAR(32), + amount BIGINT DEFAULT '0' + ) + UNIQUE KEY(orderid) -- 相同的 KEY 行会合并 + DISTRIBUTED BY HASH(orderid) BUCKETS 10 + PROPERTIES ("replication_num" = "1") + """ + + sql "drop table if exists sale_detail_bloom" + sql """ + -- 创建示例:通过在建表语句的 PROPERTIES 里加上"bloom_filter_columns"="k1,k2,k3" + -- 例如下面我们对表里的 saler_id,category_id 创建了 BloomFilter 索引。 + CREATE TABLE IF NOT EXISTS sale_detail_bloom ( + sale_date date NOT NULL COMMENT "销售时间", + customer_id int NOT NULL COMMENT "客户编号", + saler_id int NOT NULL COMMENT "销售员", + sku_id int NOT NULL COMMENT "商品编号", + category_id int NOT NULL COMMENT "商品分类", + sale_count int NOT NULL COMMENT "销售数量", + sale_price DECIMAL(12,2) NOT NULL COMMENT "单价", + sale_amt DECIMAL(20,2) COMMENT "销售总金额" + ) + Duplicate KEY(sale_date, customer_id,saler_id,sku_id,category_id) + DISTRIBUTED BY HASH(saler_id) BUCKETS 10 + PROPERTIES ( + "bloom_filter_columns"="saler_id,category_id", + "replication_num" = "1" + ) + """ + + sql "drop table if exists nb_table" + sql """ + -- 创建示例:表创建时指定 + CREATE TABLE `nb_table` ( + `siteid` int(11) NULL DEFAULT "10" COMMENT "", + `citycode` smallint(6) NULL COMMENT "", + `username` varchar(32) NULL DEFAULT "" COMMENT "", + INDEX idx_ngrambf (`username`) USING NGRAM_BF PROPERTIES("gram_size"="3", "bf_size"="256") COMMENT 'username ngram_bf index' + ) ENGINE=OLAP + AGGREGATE KEY(`siteid`, `citycode`, `username`) COMMENT "OLAP" + DISTRIBUTED BY HASH(`siteid`) BUCKETS 10 + PROPERTIES ( + "replication_num" = "1" + ) + -- PROPERTIES("gram_size"="3", "bf_size"="256"),分别表示 gram 的个数和 bloom filter 的字节数。 + -- gram 的个数跟实际查询场景相关,通常设置为大部分查询字符串的长度,bloom filter 字节数,可以通过测试得出,通常越大过滤效果越好,可以从 256 开始进行验证测试看看效果。当然字节数越大也会带来索引存储、内存 cost 上升。 + -- 如果数据基数比较高,字节数可以不用设置过大,如果基数不是很高,可以通过增加字节数来提升过滤效果。 + """ + + multi_sql """ + drop table if exists tbl_unique_merge_on_write; + drop table if exists tbl_unique_merge_on_write_p; + """ + multi_sql """ + -- 以 Unique 模型的 Merge-on-Write 表为例 + -- Unique 模型的写时合并实现,与聚合模型就是完全不同的两种模型了,查询性能更接近于 duplicate 模型, + -- 在有主键约束需求的场景上相比聚合模型有较大的查询性能优势,尤其是在聚合查询以及需要用索引过滤大量数据的查询中。 + + -- 非分区表 + CREATE TABLE IF NOT EXISTS tbl_unique_merge_on_write + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", + `register_time` DATE COMMENT "用户注册时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `phone` LARGEINT COMMENT "用户电话", + `address` VARCHAR(500) COMMENT "用户地址" + ) + UNIQUE KEY(`user_id`, `username`) + -- 3-5G 的数据量 + DISTRIBUTED BY HASH(`user_id`) BUCKETS 10 + PROPERTIES ( + -- 在 1.2.0 版本中,作为一个新的 feature,写时合并默认关闭,用户可以通过添加下面的 property 来开启 + "enable_unique_key_merge_on_write" = "true" , + "replication_num" = "1" + ); + + -- 分区表 + CREATE TABLE IF NOT EXISTS tbl_unique_merge_on_write_p + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", + `register_time` DATE COMMENT "用户注册时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `phone` LARGEINT COMMENT "用户电话", + `address` VARCHAR(500) COMMENT "用户地址" + ) + UNIQUE KEY(`user_id`, `username`, `register_time`) + PARTITION BY RANGE(`register_time`) ( + PARTITION p00010101_1899 VALUES [('0001-01-01'), ('1900-01-01')), + PARTITION p19000101 VALUES [('1900-01-01'), ('1900-01-02')), + PARTITION p19000102 VALUES [('1900-01-02'), ('1900-01-03')), + PARTITION p19000103 VALUES [('1900-01-03'), ('1900-01-04')), + PARTITION p19000104_1999 VALUES [('1900-01-04'), ('2000-01-01')), + FROM ("2000-01-01") TO ("2022-01-01") INTERVAL 1 YEAR, + PARTITION p30001231 VALUES [('3000-12-31'), ('3001-01-01')), + PARTITION p99991231 VALUES [('9999-12-31'), (MAXVALUE)) + ) + -- 默认 3-5G 的数据量 + DISTRIBUTED BY HASH(`user_id`) BUCKETS 10 + PROPERTIES ( + -- 在 1.2.0 版本中,作为一个新的 feature,写时合并默认关闭,用户可以通过添加下面的 property 来开启 + "enable_unique_key_merge_on_write" = "true", + -- 动态分区调度的单位。可指定为 HOUR、DAY、WEEK、MONTH、YEAR。分别表示按小时、按天、按星期、按月、按年进行分区创建或删除。 + "dynamic_partition.time_unit" = "MONTH", + -- 动态分区的起始偏移,为负数。根据 time_unit 属性的不同,以当天(星期/月)为基准,分区范围在此偏移之前的分区将会被删除(TTL)。如果不填写,则默认为 -2147483648,即不删除历史分区。 + "dynamic_partition.start" = "-3000", + -- 动态分区的结束偏移,为正数。根据 time_unit 属性的不同,以当天(星期/月)为基准,提前创建对应范围的分区。 + "dynamic_partition.end" = "10", + -- 动态创建的分区名前缀(必选)。 + "dynamic_partition.prefix" = "p", + -- 动态创建的分区所对应的分桶数量。 + "dynamic_partition.buckets" = "10", + "dynamic_partition.enable" = "true", + -- 动态创建的分区所对应的副本数量,如果不填写,则默认为该表创建时指定的副本数量 3。 + "dynamic_partition.replication_num" = "1", + "replication_num" = "1" + ); + + -- 分区创建查看 + -- 实际创建的分区数需要结合 dynamic_partition.start、end 以及 PARTITION BY RANGE 的设置共同决定 + show partitions from tbl_unique_merge_on_write_p; + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/best-practice.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/data-model/aggregate.md.groovy b/regression-test/suites/doc/table-design/data-model/aggregate.md.groovy new file mode 100644 index 00000000000000..cc6cc576e974e3 --- /dev/null +++ b/regression-test/suites/doc/table-design/data-model/aggregate.md.groovy @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/data-model/aggregate.md") { + try { + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_agg1 + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间" + ) + AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + + multi_sql """ + insert into example_tbl_agg1 values + (10000,"2017-10-01","北京",20,0,"2017-10-01 06:00:00",20,10,10), + (10000,"2017-10-01","北京",20,0,"2017-10-01 07:00:00",15,2,2), + (10001,"2017-10-01","北京",30,1,"2017-10-01 17:05:45",2,22,22), + (10002,"2017-10-02","上海",20,1,"2017-10-02 12:59:12",200,5,5), + (10003,"2017-10-02","广州",32,0,"2017-10-02 11:20:00",30,11,11), + (10004,"2017-10-01","深圳",35,0,"2017-10-01 10:00:15",100,3,3), + (10004,"2017-10-03","深圳",35,0,"2017-10-03 10:20:22",11,6,6); + """ + + multi_sql """ + insert into example_tbl_agg1 values + (10004,"2017-10-03","深圳",35,0,"2017-10-03 11:22:00",44,19,19), + (10005,"2017-10-03","长沙",29,1,"2017-10-03 18:11:02",3,1,1); + """ + + sql "drop table if exists aggstate" + multi_sql """ + set enable_agg_state=true; + create table aggstate( + k1 int null, + k2 agg_state generic, + k3 agg_state generic + ) + aggregate key (k1) + distributed BY hash(k1) buckets 3 + properties("replication_num" = "1"); + """ + + multi_sql """ + insert into aggstate values(1,sum_state(1),group_concat_state('a')); + insert into aggstate values(1,sum_state(2),group_concat_state('b')); + insert into aggstate values(1,sum_state(3),group_concat_state('c')); + """ + + multi_sql "insert into aggstate values(2,sum_state(4),group_concat_state('d'));" + multi_sql "select sum_merge(k2) from aggstate;" + multi_sql "select group_concat_merge(k3) from aggstate;" + multi_sql "insert into aggstate select 3,sum_union(k2),group_concat_union(k3) from aggstate;" + multi_sql """ + select sum_merge(k2) , group_concat_merge(k3)from aggstate; + select sum_merge(k2) , group_concat_merge(k3)from aggstate where k1 != 2; + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/data-model/aggregate.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/data-model/duplicate.md.groovy b/regression-test/suites/doc/table-design/data-model/duplicate.md.groovy new file mode 100644 index 00000000000000..e8360f480f2145 --- /dev/null +++ b/regression-test/suites/doc/table-design/data-model/duplicate.md.groovy @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/data-model/duplicate.md") { + try { + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_by_default + ( + `timestamp` DATETIME NOT NULL COMMENT "日志时间", + `type` INT NOT NULL COMMENT "日志类型", + `error_code` INT COMMENT "错误码", + `error_msg` VARCHAR(1024) COMMENT "错误详细信息", + `op_id` BIGINT COMMENT "负责人id", + `op_time` DATETIME COMMENT "处理时间" + ) + DISTRIBUTED BY HASH(`type`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + desc example_tbl_by_default; + """ + + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_duplicate_without_keys_by_default + ( + `timestamp` DATETIME NOT NULL COMMENT "日志时间", + `type` INT NOT NULL COMMENT "日志类型", + `error_code` INT COMMENT "错误码", + `error_msg` VARCHAR(1024) COMMENT "错误详细信息", + `op_id` BIGINT COMMENT "负责人id", + `op_time` DATETIME COMMENT "处理时间" + ) + DISTRIBUTED BY HASH(`type`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_duplicate_without_keys_by_default" = "true" + ); + desc example_tbl_duplicate_without_keys_by_default; + """ + + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_duplicate + ( + `timestamp` DATETIME NOT NULL COMMENT "日志时间", + `type` INT NOT NULL COMMENT "日志类型", + `error_code` INT COMMENT "错误码", + `error_msg` VARCHAR(1024) COMMENT "错误详细信息", + `op_id` BIGINT COMMENT "负责人id", + `op_time` DATETIME COMMENT "处理时间" + ) + DUPLICATE KEY(`timestamp`, `type`, `error_code`) + DISTRIBUTED BY HASH(`type`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + desc example_tbl_duplicate; + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/data-model/duplicate.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/data-model/unique.md.groovy b/regression-test/suites/doc/table-design/data-model/unique.md.groovy new file mode 100644 index 00000000000000..7c5e7783b065e9 --- /dev/null +++ b/regression-test/suites/doc/table-design/data-model/unique.md.groovy @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/data-model/unique.md") { + try { + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_unique + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `phone` LARGEINT COMMENT "用户电话", + `address` VARCHAR(500) COMMENT "用户地址", + `register_time` DATETIME COMMENT "用户注册时间" + ) + UNIQUE KEY(`user_id`, `username`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + + multi_sql """ + CREATE TABLE IF NOT EXISTS example_tbl_unique_merge_on_write + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `username` VARCHAR(50) NOT NULL COMMENT "用户昵称", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `phone` LARGEINT COMMENT "用户电话", + `address` VARCHAR(500) COMMENT "用户地址", + `register_time` DATETIME COMMENT "用户注册时间" + ) + UNIQUE KEY(`user_id`, `username`) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "enable_unique_key_merge_on_write" = "true" + ); + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/data-model/unique.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/data-partition.md.groovy b/regression-test/suites/doc/table-design/data-partition.md.groovy new file mode 100644 index 00000000000000..ac81c6d8dbe059 --- /dev/null +++ b/regression-test/suites/doc/table-design/data-partition.md.groovy @@ -0,0 +1,311 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/data-partition.md") { + try { + sql "drop table if exists example_range_tbl" + multi_sql """ + -- Range Partition + CREATE TABLE IF NOT EXISTS example_range_tbl + ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `timestamp` DATETIME NOT NULL COMMENT "数据灌入的时间戳", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间" + ) + ENGINE=OLAP + AGGREGATE KEY(`user_id`, `date`, `timestamp`, `city`, `age`, `sex`) + PARTITION BY RANGE(`date`) + ( + PARTITION `p201701` VALUES LESS THAN ("2017-02-01"), + PARTITION `p201702` VALUES LESS THAN ("2017-03-01"), + PARTITION `p201703` VALUES LESS THAN ("2017-04-01"), + PARTITION `p2018` VALUES [("2018-01-01"), ("2019-01-01")) + ) + DISTRIBUTED BY HASH(`user_id`) BUCKETS 16 + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + sql "show create table example_range_tbl" + sql "show partitions from example_range_tbl" + sql """ALTER TABLE example_range_tbl ADD PARTITION p201704 VALUES LESS THAN("2020-05-01") DISTRIBUTED BY HASH(`user_id`) BUCKETS 5""" + + sql "drop table if exists null_list" + multi_sql """ + create table null_list( + k0 varchar null + ) + partition by list (k0) + ( + PARTITION pX values in ((NULL)) + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 1 + properties("replication_num" = "1"); + insert into null_list values (null); + select * from null_list; + """ + + sql "drop table if exists null_range" + multi_sql """ + create table null_range( + k0 int null + ) + partition by range (k0) + ( + PARTITION p10 values less than (10), + PARTITION p100 values less than (100), + PARTITION pMAX values less than (maxvalue) + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 1 + properties("replication_num" = "1"); + insert into null_range values (null); + select * from null_range partition(p10); + """ + + sql "drop table if exists null_range2" + sql """ + create table null_range2( + k0 int null + ) + partition by range (k0) + ( + PARTITION p200 values [("100"), ("200")) + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 1 + properties("replication_num" = "1") + """ + try { + sql " insert into null_range2 values (null) " + Assertions.fail("The SQL above should throw an exception as follows:\n\t\terrCode = 2, detailMessage = Insert has filtered data in strict mode. url: http://127.0.0.1:8040/api/_load_error_log?file=__shard_0/error_log_insert_stmt_b3a6d1f1fac74750-b3bb5d6e92a66da4_b3a6d1f1fac74750_b3bb5d6e92a66da4") + } catch (Exception e) { + assertTrue(e.getMessage().contains("errCode = 2, detailMessage = Insert has filtered data in strict mode. url:")) + } + + sql "drop table if exists tbl1" + sql """ + CREATE TABLE tbl1 + ( + k1 DATE + ) + PARTITION BY RANGE(k1) () + DISTRIBUTED BY HASH(k1) + PROPERTIES + ( + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.start" = "-7", + "dynamic_partition.end" = "3", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "32", + "replication_num" = "1" + ) + """ + + sql "drop table if exists tbl1" + sql """ + CREATE TABLE tbl1 + ( + k1 DATETIME, + ) + PARTITION BY RANGE(k1) () + DISTRIBUTED BY HASH(k1) + PROPERTIES + ( + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "WEEK", + "dynamic_partition.start" = "-2", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "8", + "replication_num" = "1" + ) + """ + + sql "drop table if exists tbl1" + sql """ + CREATE TABLE tbl1 + ( + k1 DATE + ) + PARTITION BY RANGE(k1) () + DISTRIBUTED BY HASH(k1) + PROPERTIES + ( + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "MONTH", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "8", + "dynamic_partition.start_day_of_month" = "3", + "replication_num" = "1" + ) + """ + + sql "SHOW DYNAMIC PARTITION TABLES" + sql """ ADMIN SET FRONTEND CONFIG ("dynamic_partition_enable" = "true") """ + cmd """ curl --location-trusted -u ${context.config.jdbcUser}:${context.config.jdbcPassword} -XGET http://${context.config.feHttpAddress}/api/_set_config?dynamic_partition_enable=true """ + + sql """ ADMIN SET FRONTEND CONFIG ("dynamic_partition_check_interval_seconds" = "7200") """ + cmd """ curl --location-trusted -u ${context.config.jdbcUser}:${context.config.jdbcPassword} -XGET http://${context.config.feHttpAddress}/api/_set_config?dynamic_partition_check_interval_seconds=432000 """ + + sql "drop table if exists `DAILY_TRADE_VALUE`" + sql """ + CREATE TABLE `DAILY_TRADE_VALUE` + ( + `TRADE_DATE` datev2 NOT NULL COMMENT '交易日期', + `TRADE_ID` varchar(40) NOT NULL COMMENT '交易编号', + ) + UNIQUE KEY(`TRADE_DATE`, `TRADE_ID`) + PARTITION BY RANGE(`TRADE_DATE`) + ( + PARTITION p_2000 VALUES [('2000-01-01'), ('2001-01-01')), + PARTITION p_2001 VALUES [('2001-01-01'), ('2002-01-01')), + PARTITION p_2002 VALUES [('2002-01-01'), ('2003-01-01')), + PARTITION p_2003 VALUES [('2003-01-01'), ('2004-01-01')), + PARTITION p_2004 VALUES [('2004-01-01'), ('2005-01-01')), + PARTITION p_2005 VALUES [('2005-01-01'), ('2006-01-01')), + PARTITION p_2006 VALUES [('2006-01-01'), ('2007-01-01')), + PARTITION p_2007 VALUES [('2007-01-01'), ('2008-01-01')), + PARTITION p_2008 VALUES [('2008-01-01'), ('2009-01-01')), + PARTITION p_2009 VALUES [('2009-01-01'), ('2010-01-01')), + PARTITION p_2010 VALUES [('2010-01-01'), ('2011-01-01')), + PARTITION p_2011 VALUES [('2011-01-01'), ('2012-01-01')), + PARTITION p_2012 VALUES [('2012-01-01'), ('2013-01-01')), + PARTITION p_2013 VALUES [('2013-01-01'), ('2014-01-01')), + PARTITION p_2014 VALUES [('2014-01-01'), ('2015-01-01')), + PARTITION p_2015 VALUES [('2015-01-01'), ('2016-01-01')), + PARTITION p_2016 VALUES [('2016-01-01'), ('2017-01-01')), + PARTITION p_2017 VALUES [('2017-01-01'), ('2018-01-01')), + PARTITION p_2018 VALUES [('2018-01-01'), ('2019-01-01')), + PARTITION p_2019 VALUES [('2019-01-01'), ('2020-01-01')), + PARTITION p_2020 VALUES [('2020-01-01'), ('2021-01-01')), + PARTITION p_2021 VALUES [('2021-01-01'), ('2022-01-01')) + ) + DISTRIBUTED BY HASH(`TRADE_DATE`) BUCKETS 10 + PROPERTIES ( + "replication_num" = "1" + ) + """ + + sql "drop table if exists `date_table`" + sql """ + CREATE TABLE `date_table` ( + `TIME_STAMP` datev2 NOT NULL COMMENT '采集日期' + ) ENGINE=OLAP + DUPLICATE KEY(`TIME_STAMP`) + AUTO PARTITION BY RANGE (date_trunc(`TIME_STAMP`, 'month')) + ( + ) + DISTRIBUTED BY HASH(`TIME_STAMP`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists `str_table`" + sql """ + CREATE TABLE `str_table` ( + `str` varchar not null + ) ENGINE=OLAP + DUPLICATE KEY(`str`) + AUTO PARTITION BY LIST (`str`) + ( + ) + DISTRIBUTED BY HASH(`str`) BUCKETS 10 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql "drop table if exists auto_null_list" + multi_sql """ + create table auto_null_list( + k0 varchar null + ) + auto partition by list (k0) + ( + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 1 + properties("replication_num" = "1"); + + insert into auto_null_list values (null); + select * from auto_null_list; + select * from auto_null_list partition(pX); + """ + + try { + sql "drop table if exists `range_table_nullable`" + sql """ + CREATE TABLE `range_table_nullable` ( + `k1` INT, + `k2` DATETIMEV2(3), + `k3` DATETIMEV2(6) + ) ENGINE=OLAP + DUPLICATE KEY(`k1`) + AUTO PARTITION BY RANGE (date_trunc(`k2`, 'day')) + ( + ) + DISTRIBUTED BY HASH(`k1`) BUCKETS 16 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + Assertions.fail("The SQL above should throw an exception as follows:\n\t\terrCode = 2, detailMessage = AUTO RANGE PARTITION doesn't support NULL column") + } catch (Exception e) { + assertTrue(e.getMessage().contains("errCode = 2, detailMessage = AUTO RANGE PARTITION doesn't support NULL column")) + } + + sql "drop table if exists `DAILY_TRADE_VALUE`" + sql """ + CREATE TABLE `DAILY_TRADE_VALUE` + ( + `TRADE_DATE` datev2 NOT NULL COMMENT '交易日期', + `TRADE_ID` varchar(40) NOT NULL COMMENT '交易编号', + ) + UNIQUE KEY(`TRADE_DATE`, `TRADE_ID`) + AUTO PARTITION BY RANGE (date_trunc(`TRADE_DATE`, 'year')) + ( + ) + DISTRIBUTED BY HASH(`TRADE_DATE`) BUCKETS 10 + PROPERTIES ( + "replication_num" = "1" + ) + """ + def res1 = sql "show partitions from `DAILY_TRADE_VALUE`" + assertTrue(res1.isEmpty()) + + def res2 = multi_sql """ + insert into `DAILY_TRADE_VALUE` values ('2012-12-13', 1), ('2008-02-03', 2), ('2014-11-11', 3); + show partitions from `DAILY_TRADE_VALUE`; + """ + assertTrue(res2[1].size() == 3) + + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/data-partition.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/index/bloomfilter.md.groovy b/regression-test/suites/doc/table-design/index/bloomfilter.md.groovy new file mode 100644 index 00000000000000..7e0b44359d9327 --- /dev/null +++ b/regression-test/suites/doc/table-design/index/bloomfilter.md.groovy @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/index/bloomfilter.md") { + try { + multi_sql """ + CREATE TABLE IF NOT EXISTS sale_detail_bloom ( + sale_date date NOT NULL COMMENT "Sale date", + customer_id int NOT NULL COMMENT "Customer ID", + saler_id int NOT NULL COMMENT "Salesperson", + sku_id int NOT NULL COMMENT "Product ID", + category_id int NOT NULL COMMENT "Product category", + sale_count int NOT NULL COMMENT "Sales quantity", + sale_price DECIMAL(12,2) NOT NULL COMMENT "Unit price", + sale_amt DECIMAL(20,2) COMMENT "Total sales amount" + ) + DUPLICATE KEY(sale_date, customer_id, saler_id, sku_id, category_id) + DISTRIBUTED BY HASH(saler_id) BUCKETS 10 + PROPERTIES ( + "replication_num" = "1", + "bloom_filter_columns"="saler_id,category_id" + ); + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/index/bloomfilter.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/index/inverted-index.md.groovy b/regression-test/suites/doc/table-design/index/inverted-index.md.groovy new file mode 100644 index 00000000000000..0359245afdc042 --- /dev/null +++ b/regression-test/suites/doc/table-design/index/inverted-index.md.groovy @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/index/inverted-index.md.groovy") { + def waitUntilSchemaChangeDone = { tbl -> + waitForSchemaChangeDone({ + sql " SHOW ALTER TABLE COLUMN FROM test_inverted_index WHERE TableName='${tbl}' ORDER BY createtime DESC LIMIT 1 " + }) + } + try { + sql """ SELECT TOKENIZE('武汉长江大桥','"parser"="chinese","parser_mode"="fine_grained"') """ + sql """ SELECT TOKENIZE('武汉市长江大桥','"parser"="chinese","parser_mode"="fine_grained"') """ + sql """ SELECT TOKENIZE('武汉市长江大桥','"parser"="chinese","parser_mode"="coarse_grained"') """ + sql """ SELECT TOKENIZE('I love CHINA','"parser"="english"') """ + sql """ SELECT TOKENIZE('I love CHINA 我爱我的祖国','"parser"="unicode"') """ + + sql "DROP DATABASE IF EXISTS test_inverted_index;" + multi_sql """ + CREATE DATABASE test_inverted_index; + + USE test_inverted_index; + + -- 创建表的同时创建了 comment 的倒排索引 idx_comment + -- USING INVERTED 指定索引类型是倒排索引 + -- PROPERTIES("parser" = "english") 指定采用 "english" 分词,还支持 "chinese" 中文分词和 "unicode" 中英文多语言混合分词,如果不指定 "parser" 参数表示不分词 + + CREATE TABLE hackernews_1m + ( + `id` BIGINT, + `deleted` TINYINT, + `type` String, + `author` String, + `timestamp` DateTimeV2, + `comment` String, + `dead` TINYINT, + `parent` BIGINT, + `poll` BIGINT, + `children` Array, + `url` String, + `score` INT, + `title` String, + `parts` Array, + `descendants` INT, + INDEX idx_comment (`comment`) USING INVERTED PROPERTIES("parser" = "english") COMMENT 'inverted index for comment' + ) + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 10 + PROPERTIES ("replication_num" = "1"); + """ + + sql """ SELECT count() FROM hackernews_1m """ + sql """ SELECT count() FROM hackernews_1m WHERE comment LIKE '%OLAP%' """ + sql """ SELECT count() FROM hackernews_1m WHERE comment MATCH_ANY 'OLAP' """ + multi_sql """ + SELECT count() FROM hackernews_1m WHERE comment LIKE '%OLTP%'; + SELECT count() FROM hackernews_1m WHERE comment MATCH_ANY 'OLTP'; + """ + multi_sql """ + SELECT count() FROM hackernews_1m WHERE comment LIKE '%OLAP%' AND comment LIKE '%OLTP%'; + SELECT count() FROM hackernews_1m WHERE comment MATCH_ALL 'OLAP OLTP'; + """ + multi_sql """ + SELECT count() FROM hackernews_1m WHERE comment LIKE '%OLAP%' OR comment LIKE '%OLTP%'; + SELECT count() FROM hackernews_1m WHERE comment MATCH_ANY 'OLAP OLTP'; + """ + sql """ SELECT count() FROM hackernews_1m WHERE timestamp > '2007-08-23 04:17:00' """ + sql """ CREATE INDEX idx_timestamp ON hackernews_1m(timestamp) USING INVERTED """ + waitUntilSchemaChangeDone("hackernews_1m") + if (!isCloudMode()) { + sql """ BUILD INDEX idx_timestamp ON hackernews_1m """ + } + sql """ SHOW ALTER TABLE COLUMN """ + sql """ SHOW BUILD INDEX """ + sql """ SELECT count() FROM hackernews_1m WHERE timestamp > '2007-08-23 04:17:00' """ + + multi_sql """ + SELECT count() FROM hackernews_1m WHERE parent = 11189; + ALTER TABLE hackernews_1m ADD INDEX idx_parent(parent) USING INVERTED; + """ + + waitUntilSchemaChangeDone("hackernews_1m") + if (!isCloudMode()) { + sql "BUILD INDEX idx_parent ON hackernews_1m;" + } + multi_sql """ + SHOW ALTER TABLE COLUMN; + SHOW BUILD INDEX; + SELECT count() FROM hackernews_1m WHERE parent = 11189; + """ + multi_sql """ + SELECT count() FROM hackernews_1m WHERE author = 'faster'; + ALTER TABLE hackernews_1m ADD INDEX idx_author(author) USING INVERTED; + """ + waitUntilSchemaChangeDone("hackernews_1m") + if (!isCloudMode()) { + sql "BUILD INDEX idx_author ON hackernews_1m" + } + multi_sql """ + SHOW ALTER TABLE COLUMN; + SHOW BUILD INDEX order by CreateTime desc limit 1; + SELECT count() FROM hackernews_1m WHERE author = 'faster'; + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/index/inverted-index.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/index/ngram-bloomfilter-index.md.groovy b/regression-test/suites/doc/table-design/index/ngram-bloomfilter-index.md.groovy new file mode 100644 index 00000000000000..f42b455559b713 --- /dev/null +++ b/regression-test/suites/doc/table-design/index/ngram-bloomfilter-index.md.groovy @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions + +suite("docs/table-design/index/ngram-bloomfilter-index.md") { + try { + sql "DROP TABLE IF EXISTS `amazon_reviews`" + sql """ + CREATE TABLE `amazon_reviews` ( + `review_date` int(11) NULL, + `marketplace` varchar(20) NULL, + `customer_id` bigint(20) NULL, + `review_id` varchar(40) NULL, + `product_id` varchar(10) NULL, + `product_parent` bigint(20) NULL, + `product_title` varchar(500) NULL, + `product_category` varchar(50) NULL, + `star_rating` smallint(6) NULL, + `helpful_votes` int(11) NULL, + `total_votes` int(11) NULL, + `vine` boolean NULL, + `verified_purchase` boolean NULL, + `review_headline` varchar(500) NULL, + `review_body` string NULL + ) ENGINE=OLAP + DUPLICATE KEY(`review_date`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`review_date`) BUCKETS 16 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "compression" = "ZSTD" + ) + """ + + var f = new File("amazon_reviews_2010.snappy.parquet") + if (!f.exists()) { + f.delete() + } + cmd("wget ${getS3Url()}/regression/doc/amazon_reviews_2010.snappy.parquet") + cmd("""curl --location-trusted -u ${context.config.jdbcUser}:${context.config.jdbcPassword} -T amazon_reviews_2010.snappy.parquet -H "format:parquet" http://${context.config.feHttpAddress}/api/${curDbName}/amazon_reviews/_stream_load""") + + sql " SELECT COUNT() FROM amazon_reviews " + sql """ + SELECT + product_id, + any(product_title), + AVG(star_rating) AS rating, + COUNT() AS count + FROM + amazon_reviews + WHERE + review_body LIKE '%is super awesome%' + GROUP BY + product_id + ORDER BY + count DESC, + rating DESC, + product_id + LIMIT 5 + """ + sql """ ALTER TABLE amazon_reviews ADD INDEX review_body_ngram_idx(review_body) USING NGRAM_BF PROPERTIES("gram_size"="10", "bf_size"="10240") """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/index/ngram-bloomfilter-index.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/index/prefix-index.md.groovy b/regression-test/suites/doc/table-design/index/prefix-index.md.groovy new file mode 100644 index 00000000000000..b2740eb361c197 --- /dev/null +++ b/regression-test/suites/doc/table-design/index/prefix-index.md.groovy @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/index/prefix-index.md") { + try { + sql "DROP TABLE IF EXISTS tbale1" + sql "DROP TABLE IF EXISTS tbale2" + + sql """ + CREATE TABLE IF NOT EXISTS `table1` ( + user_id BIGINT, + age INT, + message VARCHAR(100), + max_dwell_time BIGINT, + min_dwell_time DATETIME + ) PROPERTIES ("replication_num" = "1") + """ + + sql """ + CREATE TABLE IF NOT EXISTS `table2` ( + user_id VARCHAR(20), + age INT, + message VARCHAR(100), + max_dwell_time BIGINT, + min_dwell_time DATETIME + ) PROPERTIES ("replication_num" = "1") + """ + + sql "SELECT * FROM table1 WHERE user_id=1829239 and age=20" + sql "SELECT * FROM table1 WHERE age=20" + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/index/prefix-index.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/row-store.md.groovy b/regression-test/suites/doc/table-design/row-store.md.groovy new file mode 100644 index 00000000000000..6a8d89d637405f --- /dev/null +++ b/regression-test/suites/doc/table-design/row-store.md.groovy @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/row-store.md") { + try { + sql "DROP TABLE IF EXISTS `tbl_point_query`" + multi_sql """ + CREATE TABLE `tbl_point_query` ( + `key` int(11) NULL, + `v1` decimal(27, 9) NULL, + `v2` varchar(30) NULL, + `v3` varchar(30) NULL, + `v4` date NULL, + `v5` datetime NULL, + `v6` float NULL, + `v7` datev2 NULL + ) ENGINE=OLAP + UNIQUE KEY(`key`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`key`) BUCKETS 1 + PROPERTIES ( + "enable_unique_key_merge_on_write" = "true", + "light_schema_change" = "true", + "row_store_columns" = "key,v1,v3,v5,v7", + "row_store_page_size" = "4096", + "replication_num" = "1" + ); + """ + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/row-store.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/schema-change.md.groovy b/regression-test/suites/doc/table-design/schema-change.md.groovy new file mode 100644 index 00000000000000..c23fc69f03404d --- /dev/null +++ b/regression-test/suites/doc/table-design/schema-change.md.groovy @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.jupiter.api.Assertions; + +suite("docs/table-design/schema-change.md") { + try { + def waitUntilSchemaChangeDone = { tbl -> + waitForSchemaChangeDone({ + sql " SHOW ALTER TABLE COLUMN FROM example_db WHERE TableName='${tbl}' ORDER BY createtime DESC LIMIT 1 " + }) + } + + multi_sql "create database if not exists example_db; use example_db; drop table if exists my_table;" + sql """ + CREATE TABLE IF NOT EXISTS example_db.my_table( + col1 int, + col2 int, + col3 int, + col4 int, + col5 int + ) DUPLICATE KEY(col1, col2, col3) + DISTRIBUTED BY RANDOM BUCKETS 1 + ROLLUP ( + example_rollup_index (col1, col3, col4, col5) + ) + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN new_key_col INT KEY DEFAULT "0" AFTER col1 + TO example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN new_val_col INT DEFAULT "0" AFTER col4 + TO example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + sql "drop table if exists example_db.my_table" + sql """ + CREATE TABLE IF NOT EXISTS example_db.my_table( + col1 int, + col2 int, + col3 int, + col4 int SUM, + col5 int MAX + ) AGGREGATE KEY(col1, col2, col3) + DISTRIBUTED BY HASH(col1) BUCKETS 1 + ROLLUP ( + example_rollup_index (col1, col3, col4, col5) + ) + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN new_key_col INT DEFAULT "0" AFTER col1 + TO example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN new_val_col INT SUM DEFAULT "0" AFTER col4 + TO example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN (c1 INT DEFAULT "1", c2 FLOAT SUM DEFAULT "0") + TO example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + sql """ + ALTER TABLE example_db.my_table + DROP COLUMN col3 + FROM example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + + sql "drop table if exists example_db.my_table" + sql """ + CREATE TABLE IF NOT EXISTS example_db.my_table( + col0 int, + col1 int DEFAULT "1", + col2 int, + col3 varchar(32), + col4 int SUM, + col5 varchar(32) REPLACE DEFAULT "abc" + ) AGGREGATE KEY(col0, col1, col2, col3) + DISTRIBUTED BY HASH(col0) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE example_db.my_table + MODIFY COLUMN col1 BIGINT KEY DEFAULT "1" AFTER col2 + """ + waitUntilSchemaChangeDone("my_table") + sql """ + ALTER TABLE example_db.my_table + MODIFY COLUMN col5 VARCHAR(64) REPLACE DEFAULT "abc" + """ + waitUntilSchemaChangeDone("my_table") + sql """ + ALTER TABLE example_db.my_table + MODIFY COLUMN col3 varchar(50) KEY NULL comment 'to 50' + """ + waitUntilSchemaChangeDone("my_table") + + sql "drop table if exists my_table" + sql """ + CREATE TABLE IF NOT EXISTS example_db.my_table( + k1 int DEFAULT "1", + k2 int, + k3 varchar(32), + k4 date, + v1 int SUM, + v2 int MAX, + ) AGGREGATE KEY(k1, k2, k3, k4) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + ROLLUP ( + example_rollup_index(k1, k2, k3, v1, v2) + ) + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE example_db.my_table + ORDER BY (k3,k1,k2,v2,v1) + FROM example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + sql "drop table if exists example_db.tbl1" + sql """ + CREATE TABLE IF NOT EXISTS example_db.tbl1( + k1 int, + k2 int, + k3 int + ) AGGREGATE KEY(k1, k2, k3) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + ROLLUP ( + rollup1 (k1, k2), + rollup2 (k2) + ) + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE tbl1 + ADD COLUMN k4 INT default "1" to rollup1, + ADD COLUMN k4 INT default "1" to rollup2, + ADD COLUMN k5 INT default "1" to rollup2 + """ + waitUntilSchemaChangeDone("tbl1") + + sql "drop table if exists example_db.my_table" + sql """ + CREATE TABLE IF NOT EXISTS example_db.my_table( + k1 int DEFAULT "1", + k2 int, + k3 varchar(32), + k4 date, + v1 int SUM, + ) AGGREGATE KEY(k1, k2, k3, k4) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + ROLLUP ( + example_rollup_index(k1, k3, k2, v1) + ) + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + ALTER TABLE example_db.my_table + ADD COLUMN v2 INT MAX DEFAULT "0" TO example_rollup_index, + ORDER BY (k3,k1,k2,v2,v1) FROM example_rollup_index + """ + waitUntilSchemaChangeDone("my_table") + + sql "SHOW ALTER TABLE COLUMN" + } catch (Throwable t) { + Assertions.fail("examples in docs/table-design/schema-change.md failed to exec, please fix it", t) + } +} diff --git a/regression-test/suites/doc/table-design/test_data/test.csv b/regression-test/suites/doc/table-design/test_data/test.csv new file mode 100644 index 00000000000000..c34e65603ab347 --- /dev/null +++ b/regression-test/suites/doc/table-design/test_data/test.csv @@ -0,0 +1,2 @@ +Tom,40 +John,50 diff --git a/regression-test/suites/external_table_p0/hive/ddl/test_hive_ddl.groovy b/regression-test/suites/external_table_p0/hive/ddl/test_hive_ddl.groovy index 626f6b2bfbf33b..ed605f15d08dce 100644 --- a/regression-test/suites/external_table_p0/hive/ddl/test_hive_ddl.groovy +++ b/regression-test/suites/external_table_p0/hive/ddl/test_hive_ddl.groovy @@ -96,8 +96,10 @@ suite("test_hive_ddl", "p0,external,hive,external_docker,external_docker_hive") // create and insert default value is supported on hive3, we can test default hive version 2.3 sql """switch ${catalog_name}""" - sql """ create database if not exists `test_hive_default_val` - """ + sql """ drop database if exists `test_hive_default_val` """ + + sql """ create database if not exists `test_hive_default_val` """ + sql """use `test_hive_default_val`""" test { sql """ @@ -111,7 +113,6 @@ suite("test_hive_ddl", "p0,external,hive,external_docker,external_docker_hive") """ exception "failed to create table from hms client. reason: java.lang.UnsupportedOperationException: Table with default values is not supported if the hive version is less than 3.0. Can set 'hive.version' to 3.0 in properties." } - sql """DROP DATABASE `test_hive_default_val`""" test { sql """ diff --git a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.groovy b/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.groovy deleted file mode 100644 index 4115b8321f73cb..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_clickhouse_jdbc_catalog_pool_test.groovy +++ /dev/null @@ -1,71 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_clickhouse_jdbc_catalog_pool_test", "p0,external,clickhouse,external_docker,external_docker_clickhouse") { - String enabled = context.config.otherConfigs.get("enableJdbcTest") - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String ex_db_name = "doris_test"; - String clickhouse_port = context.config.otherConfigs.get("clickhouse_22_port"); - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/clickhouse-jdbc-0.4.2-all.jar" - - def poolOptions = [true, false] - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "true" : "false" - String catalog_name = "clickhouse_catalog_pool_${poolState}"; - - sql """ drop catalog if exists ${catalog_name} """ - sql """ create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="default", - "password"="123456", - "jdbc_url" = "jdbc:clickhouse://${externalEnvIp}:${clickhouse_port}/doris_test", - "driver_url" = "${driver_url}", - "driver_class" = "com.clickhouse.jdbc.ClickHouseDriver", - "enable_connection_pool" = "${poolState}" - );""" - - def tasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use ${ex_db_name} """ - order_qt_type """ select * from type order by k1; """ - } - } - - tasks*.join() - - sql """refresh catalog ${catalog_name}""" - - def refreshTasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use ${ex_db_name} """ - order_qt_type_refresh """ select * from type order by k1; """ - } - } - - refreshTasks*.join() - - sql """ drop catalog if exists ${catalog_name} """ - } - } -} - diff --git a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.groovy b/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.groovy deleted file mode 100644 index 3b1150b3e8adc5..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_mysql_jdbc_catalog_pool_test.groovy +++ /dev/null @@ -1,73 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_mysql_jdbc_catalog_pool_test", "p0,external,mysql,external_docker,external_docker_mysql") { - String enabled = context.config.otherConfigs.get("enableJdbcTest") - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-j-8.3.0.jar" - - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String ex_db_name = "doris_test"; - String mysql_port = context.config.otherConfigs.get("mysql_57_port"); - - def poolOptions = [true, false] - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "pool_true" : "pool_false" - String catalog_name = "mysql_jdbc_catalog_${poolState}"; - - sql """ drop catalog if exists ${catalog_name} """ - sql """ create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", - "enable_connection_pool" = "${poolEnabled}" - );""" - - def tasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use doris_test; """ - qt_mysql_all_types """ select * from all_types order by tinyint_u; """ - } - } - - tasks*.join() - - sql """ refresh catalog ${catalog_name} """ - - def refreshTasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use doris_test; """ - qt_mysql_all_types_refresh """ select * from all_types order by tinyint_u; """ - } - } - - refreshTasks*.join() - - sql """ drop catalog if exists ${catalog_name} """ - } - } -} - - diff --git a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.groovy b/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.groovy deleted file mode 100644 index 8ec0da5c0ea739..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_oracle_jdbc_catalog_pool_test.groovy +++ /dev/null @@ -1,111 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_oracle_jdbc_catalog_pool_test", "p0,external,oracle,external_docker,external_docker_oracle") { - String enabled = context.config.otherConfigs.get("enableJdbcTest"); - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/ojdbc8.jar" - String driver6_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/ojdbc6.jar" - - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String ex_db_name = "DORIS_TEST"; - String oracle_port = context.config.otherConfigs.get("oracle_11_port"); - String SID = "XE"; - String test_all_types = "TEST_ALL_TYPES"; - - def poolOptions = [true, false] - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "pool_true" : "pool_false" - String catalog_name = "oracle_catalog_${poolState}"; - - sql """drop catalog if exists ${catalog_name} """ - - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="doris_test", - "password"="123456", - "jdbc_url" = "jdbc:oracle:thin:@${externalEnvIp}:${oracle_port}:${SID}", - "driver_url" = "${driver_url}", - "driver_class" = "oracle.jdbc.driver.OracleDriver", - "enable_connection_pool" = "${poolEnabled}" - );""" - - def tasks = (1..5).collect { - Thread.start { - sql """switch ${catalog_name}""" - sql """ use ${ex_db_name}""" - order_qt_select_all_types """select * from ${test_all_types}; """ - } - } - tasks*.join() - - sql """refresh catalog ${catalog_name}""" - - def refreshTasks = (1..5).collect { - Thread.start { - sql """switch ${catalog_name}""" - sql """ use ${ex_db_name}""" - order_qt_select_all_types_refresh """select * from ${test_all_types}; """ - } - } - refreshTasks*.join() - - sql """drop catalog if exists ${catalog_name} """ - } - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "pool_true" : "pool_false" - String catalog_name = "oracle_ojdbc6_${poolState}"; - - sql """drop catalog if exists ${catalog_name} """ - - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="doris_test", - "password"="123456", - "jdbc_url" = "jdbc:oracle:thin:@${externalEnvIp}:${oracle_port}:${SID}", - "driver_url" = "${driver6_url}", - "driver_class" = "oracle.jdbc.OracleDriver", - "enable_connection_pool" = "${poolEnabled}" - );""" - - - def tasks6 = (1..5).collect { - Thread.start { - sql """ use ${catalog_name}.DORIS_TEST; """ - qt_query_ojdbc6_all_types """ select * from ${catalog_name}.DORIS_TEST.TEST_ALL_TYPES order by 1; """ - } - } - tasks6*.join() - - sql """refresh catalog ${catalog_name};""" - - def refreshTasks6 = (1..5).collect { - Thread.start { - sql """ use ${catalog_name}.DORIS_TEST; """ - qt_query_ojdbc6_all_types_refresh """ select * from ${catalog_name}.DORIS_TEST.TEST_ALL_TYPES order by 1; """ - } - } - refreshTasks6*.join() - - sql """drop catalog if exists ${catalog_name}; """ - } - } -} diff --git a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.groovy b/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.groovy deleted file mode 100644 index 91f5c61b0baf9c..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_pg_jdbc_catalog_pool_test.groovy +++ /dev/null @@ -1,71 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_pg_jdbc_catalog_pool_test", "p0,external,pg,external_docker,external_docker_pg") { - String enabled = context.config.otherConfigs.get("enableJdbcTest") - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/postgresql-42.5.0.jar" - - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String ex_schema_name = "catalog_pg_test"; - String pg_port = context.config.otherConfigs.get("pg_14_port"); - String test_all_types = "test_all_types"; - - def poolOptions = [true, false] - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "pool_true" : "pool_false" - String catalog_name = "pg_jdbc_catalog_${poolState}"; - - sql """drop catalog if exists ${catalog_name} """ - - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="postgres", - "password"="123456", - "jdbc_url" = "jdbc:postgresql://${externalEnvIp}:${pg_port}/postgres?currentSchema=doris_test&useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "org.postgresql.Driver", - "enable_connection_pool" = "${poolEnabled}" - );""" - - def tasks = (1..5).collect { - Thread.start { - sql """switch ${catalog_name}""" - sql """ use ${ex_schema_name}""" - order_qt_select_all_types """select * from ${test_all_types}; """ - } - } - tasks*.join() - - sql """refresh catalog ${catalog_name}""" - - def refreshTasks = (1..5).collect { - Thread.start { - sql """switch ${catalog_name}""" - sql """ use ${ex_schema_name}""" - order_qt_select_all_types_refresh """select * from ${test_all_types}; """ - } - } - refreshTasks*.join() - - sql """ drop catalog if exists ${catalog_name} """ - } - } -} diff --git a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.groovy b/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.groovy deleted file mode 100644 index a8ebc8952fb850..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/connection_pool_test/test_sqlserver_jdbc_catalog_pool_test.groovy +++ /dev/null @@ -1,70 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_sqlserver_jdbc_catalog_pool_test", "p0,external,sqlserver,external_docker,external_docker_sqlserver") { - String enabled = context.config.otherConfigs.get("enableJdbcTest"); - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mssql-jdbc-11.2.3.jre8.jar" - - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String ex_db_name = "dbo"; - String sqlserver_port = context.config.otherConfigs.get("sqlserver_2022_port"); - - def poolOptions = [true, false] - - poolOptions.each { poolEnabled -> - String poolState = poolEnabled ? "pool_true" : "pool_false" - String catalog_name = "sqlserver_catalog_${poolState}"; - - sql """ drop catalog if exists ${catalog_name} """ - - sql """ create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="sa", - "password"="Doris123456", - "jdbc_url" = "jdbc:sqlserver://${externalEnvIp}:${sqlserver_port};encrypt=false;databaseName=doris_test;", - "driver_url" = "${driver_url}", - "driver_class" = "com.microsoft.sqlserver.jdbc.SQLServerDriver", - "enable_connection_pool" = "${poolEnabled}" - );""" - - def tasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use ${ex_db_name} """ - order_qt_all_type """ select * from all_type order by id; """ - } - } - tasks*.join() - - sql """refresh catalog ${catalog_name}""" - - def refreshTasks = (1..5).collect { - Thread.start { - sql """ switch ${catalog_name} """ - sql """ use ${ex_db_name} """ - order_qt_all_type_refresh """ select * from all_type order by id; """ - } - } - refreshTasks*.join() - - sql """ drop catalog if exists ${catalog_name} """ - } - } -} diff --git a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy b/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy index ce23a4f658c859..a88ba550eea87f 100644 --- a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy +++ b/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_catalog.groovy @@ -24,7 +24,16 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc String bucket = getS3BucketName() String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar" // String driver_url = "mysql-connector-java-8.0.25.jar" - if (enabled != null && enabled.equalsIgnoreCase("true")) { + if (enabled == null || !enabled.equalsIgnoreCase("true")) { + return; + } + + for (String driver_class : ["com.mysql.cj.jdbc.Driver","com.mysql.jdbc.Driver" ]) { + if (driver_class.equals("com.mysql.jdbc.Driver")) { + driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-5.1.49.jar" + } else { + driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar" + } String user = "test_jdbc_user"; String pwd = '123456'; def tokens = context.config.jdbcUrl.split('/') @@ -78,7 +87,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver" + "driver_class" = "${driver_class}" );""" sql """use ${internal_db_name}""" @@ -226,13 +235,13 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "only_specified_database" = "true" );""" sql """switch ${catalog_name}""" - qt_specified_database_1 """ show databases; """ + qt_specified_database_1 """ show databases; """ // only has doris_test sql """ drop catalog if exists ${catalog_name} """ @@ -243,14 +252,14 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "only_specified_database" = "true", "include_database_list" = "doris_test" );""" sql """switch ${catalog_name}""" - qt_specified_database_2 """ show databases; """ + qt_specified_database_2 """ show databases; """ // only has doris_test sql """ drop catalog if exists ${catalog_name} """ @@ -261,14 +270,16 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "only_specified_database" = "true", "exclude_database_list" = "doris_test" );""" sql """switch ${catalog_name}""" - qt_specified_database_3 """ show databases; """ + List> show_result3 = sql "show databases" + assertTrue(show_result3.size() > 0) // has other databases + qt_specified_database_3 """ show databases like "doris_test"; """ // does not have doris_test sql """ drop catalog if exists ${catalog_name} """ @@ -279,7 +290,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "only_specified_database" = "true", "include_database_list" = "doris_test", "exclude_database_list" = "doris_test" @@ -287,7 +298,9 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc sql """switch ${catalog_name}""" - qt_specified_database_4 """ show databases; """ + List> show_result4 = sql "show databases" + assertTrue(show_result4.size() > 0) // has other databases + qt_specified_database_4 """ show databases like "doris_test"; """ // does not have doris_test sql """ drop catalog if exists ${catalog_name} """ @@ -298,7 +311,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "jdbc.password"="123456", "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.cj.jdbc.Driver"); + "jdbc.driver_class" = "${driver_class}"); """ sql """ switch ${catalog_name} """ @@ -335,7 +348,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "jdbc.password"="123456", "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.cj.jdbc.Driver"); + "jdbc.driver_class" = "${driver_class}"); """ qt_mysql_view """ select * from view_catalog.doris_test.mysql_view order by col_1;""" sql """ drop catalog if exists view_catalog; """ @@ -347,7 +360,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "jdbc.password"="123456", "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.cj.jdbc.Driver"); + "jdbc.driver_class" = "${driver_class}"); """ sql """switch mysql_fun_push_catalog""" @@ -494,7 +507,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver" + "driver_class" = "${driver_class}" );""" sql """switch ${catalog_name}""" @@ -525,7 +538,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "lower_case_meta_names" = "true", "meta_names_mapping" = '{"databases": [{"remoteDatabase": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","mapping": "doris_2"},{"remoteDatabase": "doris","mapping": "doris_3"}],"tables": [{"remoteDatabase": "Doris","remoteTable": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","remoteTable": "Doris","mapping": "doris_2"},{"remoteDatabase": "Doris","remoteTable": "doris","mapping": "doris_3"}]}' ); @@ -546,7 +559,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "lower_case_table_names" = "true", "meta_names_mapping" = '{"databases": [{"remoteDatabase": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","mapping": "doris_2"},{"remoteDatabase": "doris","mapping": "doris_3"}],"tables": [{"remoteDatabase": "Doris","remoteTable": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","remoteTable": "Doris","mapping": "doris_2"},{"remoteDatabase": "Doris","remoteTable": "doris","mapping": "doris_3"}]}' ); @@ -561,7 +574,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "lower_case_meta_names" = "true", "meta_names_mapping" = "{\\\"databases\\\": [{\\\"remoteDatabase\\\": \\\"DORIS\\\",\\\"mapping\\\": \\\"doris_1\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"mapping\\\": \\\"doris_2\\\"},{\\\"remoteDatabase\\\": \\\"doris\\\",\\\"mapping\\\": \\\"doris_3\\\"}],\\\"tables\\\": [{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"DORIS\\\",\\\"mapping\\\": \\\"doris_1\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"Doris\\\",\\\"mapping\\\": \\\"doris_2\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"doris\\\",\\\"mapping\\\": \\\"doris_3\\\"}]}" ); @@ -576,7 +589,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver", + "driver_class" = "${driver_class}", "metadata_refresh_interval_sec" = "5" );""" @@ -588,7 +601,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver" + "driver_class" = "${driver_class}" );""" qt_sql """select count(*) from mysql_rename1.doris_test.ex_tb1;""" @@ -607,7 +620,7 @@ suite("test_mysql_jdbc_catalog", "p0,external,mysql,external_docker,external_doc "password"="123456", "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.cj.jdbc.Driver" + "driver_class" = "${driver_class}" );""" order_qt_sql """SELECT * FROM mysql_conjuncts.doris_test.compoundpredicate_test WHERE (pk > 4) OR ((pk < 6 OR pk > 7) AND col_int_undef_signed < 1);""" diff --git a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.groovy b/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.groovy deleted file mode 100644 index c82c3aafcd8e39..00000000000000 --- a/regression-test/suites/external_table_p0/jdbc/test_mysql_jdbc_driver5_catalog.groovy +++ /dev/null @@ -1,566 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("test_mysql_jdbc_driver5_catalog", "p0,external,mysql,external_docker,external_docker_mysql") { - qt_sql """select current_catalog()""" - - String enabled = context.config.otherConfigs.get("enableJdbcTest") - String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") - String s3_endpoint = getS3Endpoint() - String bucket = getS3BucketName() - String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-5.1.49.jar" - if (enabled != null && enabled.equalsIgnoreCase("true")) { - String user = "test_jdbc_user"; - String pwd = '123456'; - def tokens = context.config.jdbcUrl.split('/') - def url = tokens[0] + "//" + tokens[2] + "/" + "information_schema" + "?" - String catalog_name = "mysql_jdbc5_catalog"; - String internal_db_name = "regression_test_jdbc_catalog_p0"; - String ex_db_name = "doris_test"; - String mysql_port = context.config.otherConfigs.get("mysql_57_port"); - String inDorisTable = "test_mysql_jdbc_doris_in_tb"; - String ex_tb0 = "ex_tb0"; - String ex_tb1 = "ex_tb1"; - String ex_tb2 = "ex_tb2"; - String ex_tb3 = "ex_tb3"; - String ex_tb4 = "ex_tb4"; - String ex_tb5 = "ex_tb5"; - String ex_tb6 = "ex_tb6"; - String ex_tb7 = "ex_tb7"; - String ex_tb8 = "ex_tb8"; - String ex_tb9 = "ex_tb9"; - String ex_tb10 = "ex_tb10"; - String ex_tb11 = "ex_tb11"; - String ex_tb12 = "ex_tb12"; - String ex_tb13 = "ex_tb13"; - String ex_tb14 = "ex_tb14"; - String ex_tb15 = "ex_tb15"; - String ex_tb16 = "ex_tb16"; - String ex_tb17 = "ex_tb17"; - String ex_tb18 = "ex_tb18"; - String ex_tb19 = "ex_tb19"; - String ex_tb20 = "ex_tb20"; - String ex_tb21 = "test_key_word"; - String test_insert = "test_insert"; - String test_insert2 = "test_insert2"; - String test_insert_all_types = "test_mysql_insert_all_types"; - String test_ctas = "test_mysql_ctas"; - String auto_default_t = "auto_default_t"; - String dt = "dt"; - String dt_null = "dt_null"; - String test_zd = "test_zd" - - try_sql("DROP USER ${user}") - sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'""" - - sql """create database if not exists ${internal_db_name}; """ - - sql """drop catalog if exists ${catalog_name} """ - - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false&zeroDateTimeBehavior=convertToNull", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver" - );""" - order_qt_show_db """ show databases from ${catalog_name}; """ - - sql """use ${internal_db_name}""" - sql """ drop table if exists ${internal_db_name}.${inDorisTable} """ - sql """ - CREATE TABLE ${internal_db_name}.${inDorisTable} ( - `id` INT NULL COMMENT "主键id", - `name` string NULL COMMENT "名字" - ) DISTRIBUTED BY HASH(id) BUCKETS 10 - PROPERTIES("replication_num" = "1"); - """ - - // used for testing all types - sql """ drop table if exists ${internal_db_name}.${test_insert_all_types} """ - sql """ - CREATE TABLE ${internal_db_name}.${test_insert_all_types} ( - `tinyint_u` SMALLINT, - `smallint_u` INT, - `mediumint_u` INT, - `int_u` BIGINT, - `bigint_u` LARGEINT, - `decimal_u` DECIMAL(18, 5), - `double_u` DOUBLE, - `float_u` FLOAT, - `boolean` TINYINT, - `tinyint` TINYINT, - `smallint` SMALLINT, - `year` SMALLINT, - `mediumint` INT, - `int` INT, - `bigint` BIGINT, - `date` DATE, - `timestamp` DATETIME(4) null, - `datetime` DATETIME, - `float` FLOAT, - `double` DOUBLE, - `decimal` DECIMAL(12, 4), - `char` CHAR(5), - `varchar` VARCHAR(10), - `time` STRING, - `text` STRING, - `blob` STRING, - `json` JSON, - `set` STRING, - `bit` STRING, - `binary` STRING, - `varbinary` STRING, - `enum` STRING - ) DISTRIBUTED BY HASH(tinyint_u) BUCKETS 10 - PROPERTIES("replication_num" = "1"); - """ - - qt_sql """select current_catalog()""" - sql """switch ${catalog_name}""" - qt_sql """select current_catalog()""" - sql """ use ${ex_db_name}""" - - order_qt_ex_tb0 """ select id, name from ${ex_tb0} order by id; """ - sql """ insert into internal.${internal_db_name}.${inDorisTable} select id, name from ${ex_tb0}; """ - order_qt_in_tb """ select id, name from internal.${internal_db_name}.${inDorisTable} order by id; """ - - order_qt_ex_tb1 """ select * from ${ex_tb1} order by id; """ - order_qt_ex_tb2 """ select * from ${ex_tb2} order by id; """ - order_qt_ex_tb3 """ select * from ${ex_tb3} order by game_code; """ - order_qt_ex_tb4 """ select * from ${ex_tb4} order by products_id; """ - order_qt_ex_tb5 """ select * from ${ex_tb5} order by id; """ - order_qt_ex_tb6 """ select * from ${ex_tb6} order by id; """ - order_qt_ex_tb7 """ select * from ${ex_tb7} order by id; """ - order_qt_ex_tb8 """ select * from ${ex_tb8} order by uid; """ - order_qt_ex_tb9 """ select * from ${ex_tb9} order by c_date; """ - order_qt_ex_tb10 """ select * from ${ex_tb10} order by aa; """ - order_qt_ex_tb11 """ select * from ${ex_tb11} order by aa; """ - order_qt_ex_tb12 """ select * from ${ex_tb12} order by cc; """ - order_qt_ex_tb13 """ select * from ${ex_tb13} order by name; """ - order_qt_ex_tb14 """ select * from ${ex_tb14} order by tid; """ - order_qt_ex_tb15 """ select * from ${ex_tb15} order by col1; """ - order_qt_ex_tb16 """ select * from ${ex_tb16} order by id; """ - order_qt_ex_tb17 """ select * from ${ex_tb17} order by id; """ - order_qt_ex_tb18 """ select * from ${ex_tb18} order by num_tinyint; """ - order_qt_ex_tb19 """ select * from ${ex_tb19} order by date_value; """ - order_qt_ex_tb20 """ select * from ${ex_tb20} order by decimal_normal; """ - order_qt_ex_tb21_1 """ select `key`, `id` from ${ex_tb21} where `key` = 2 order by id;""" - order_qt_ex_tb21_2 """ select `key`, `id` from ${ex_tb21} where `key` like 2 order by id;""" - order_qt_ex_tb21_3 """ select `key`, `id` from ${ex_tb21} where `key` in (1,2) order by id;""" - order_qt_ex_tb21_4 """ select `key`, `id` from ${ex_tb21} where abs(`key`) = 2 order by id;""" - order_qt_ex_tb21_5 """ select `key`, `id` from ${ex_tb21} where `key` between 1 and 2 order by id;""" - order_qt_ex_tb21_6 """ select `key`, `id` from ${ex_tb21} where `key` = case when id = 1 then 1 else 0 end order by id;""" - order_qt_ex_tb21_7 """ select (`key` +1) as k, `id` from ${ex_tb21} having abs(k) = 2 order by id;""" - order_qt_ex_tb21_8 """ select `key` as k, `id` from ${ex_tb21} having abs(k) = 2 order by id;""" - order_qt_information_schema """ show tables from information_schema like "processlist"; """ - order_qt_dt """select * from ${dt}; """ - order_qt_dt_null """select * from ${dt_null} order by 1; """ - order_qt_test_dz """select * from ${test_zd} order by 1; """ - order_qt_test_filter_not """select * from ${ex_tb13} where name not like '%张三0%' order by 1; """ - explain { - sql("select `datetime` from all_types where to_date(`datetime`) = '2012-10-25';") - contains """ SELECT `datetime` FROM `doris_test`.`all_types` WHERE (date(`datetime`) = '2012-10-25')""" - } - - explain { - sql("select /*+ SET_VAR(enable_ext_func_pred_pushdown = false) */ `datetime` from all_types where to_date(`datetime`) = '2012-10-25';") - contains """SELECT `datetime` FROM `doris_test`.`all_types`""" - } - - // test insert - String uuid1 = UUID.randomUUID().toString(); - connect(user=user, password="${pwd}", url=url) { - try { - sql """ insert into ${catalog_name}.${ex_db_name}.${test_insert} values ('${uuid1}', 'doris1', 18) """ - fail() - } catch (Exception e) { - log.info(e.getMessage()) - } - } - - sql """GRANT LOAD_PRIV ON ${catalog_name}.${ex_db_name}.${test_insert} TO ${user}""" - - connect(user=user, password="${pwd}", url=url) { - try { - sql """ insert into ${catalog_name}.${ex_db_name}.${test_insert} values ('${uuid1}', 'doris1', 18) """ - } catch (Exception e) { - fail(); - } - } - order_qt_test_insert1 """ select name, age from ${test_insert} where id = '${uuid1}' order by age """ - - String uuid2 = UUID.randomUUID().toString(); - sql """ insert into ${test_insert} values ('${uuid2}', 'doris2', 19), ('${uuid2}', 'doris3', 20) """ - order_qt_test_insert2 """ select name, age from ${test_insert} where id = '${uuid2}' order by age """ - - sql """ insert into ${test_insert} select * from ${test_insert} where id = '${uuid2}' """ - order_qt_test_insert3 """ select name, age from ${test_insert} where id = '${uuid2}' order by age """ - - String uuid3 = UUID.randomUUID().toString(); - sql """ INSERT INTO ${test_insert2} VALUES - ('${uuid3}', true, 'abcHa1.12345', '1.123450xkalowadawd', '2022-10-01', 3.14159, 1, 2, 0, 100000, 1.2345678, 24.000, '07:09:51', '2022', '2022-11-27 07:09:51', '2022-11-27 07:09:51'); """ - order_qt_test_insert4 """ select k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15 from ${test_insert2} where id = '${uuid3}' """ - - sql """ drop catalog if exists ${catalog_name} """ - - // test only_specified_database argument - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "only_specified_database" = "true" - );""" - - sql """switch ${catalog_name}""" - - qt_specified_database_1 """ show databases; """ - - sql """ drop catalog if exists ${catalog_name} """ - - // test only_specified_database and include_database_list argument - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "only_specified_database" = "true", - "include_database_list" = "doris_test" - );""" - - sql """switch ${catalog_name}""" - - qt_specified_database_2 """ show databases; """ - - sql """ drop catalog if exists ${catalog_name} """ - - // test only_specified_database and exclude_database_list argument - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "only_specified_database" = "true", - "exclude_database_list" = "doris_test" - );""" - - sql """switch ${catalog_name}""" - - qt_specified_database_3 """ show databases; """ - - sql """ drop catalog if exists ${catalog_name} """ - - // test include_database_list and exclude_database_list have overlapping items case - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "only_specified_database" = "true", - "include_database_list" = "doris_test", - "exclude_database_list" = "doris_test" - );""" - - sql """switch ${catalog_name}""" - - qt_specified_database_4 """ show databases; """ - - sql """ drop catalog if exists ${catalog_name} """ - - // test old create-catalog syntax for compatibility - sql """ CREATE CATALOG ${catalog_name} PROPERTIES ( - "type"="jdbc", - "jdbc.user"="root", - "jdbc.password"="123456", - "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.jdbc.Driver"); - """ - sql """ switch ${catalog_name} """ - - sql """ use ${ex_db_name} """ - order_qt_ex_tb1 """ select * from ${ex_tb1} order by id; """ - - // test all types supported by MySQL - sql """use doris_test;""" - qt_mysql_all_types """select * from all_types order by tinyint_u;""" - - // test insert into internal.db.table select * from all_types - sql """ insert into internal.${internal_db_name}.${test_insert_all_types} select * from all_types; """ - order_qt_select_insert_all_types """ select * from internal.${internal_db_name}.${test_insert_all_types} order by tinyint_u; """ - - // test CTAS - sql """ drop table if exists internal.${internal_db_name}.${test_ctas} """ - sql """ create table internal.${internal_db_name}.${test_ctas} - PROPERTIES("replication_num" = "1") - AS select * from all_types; - """ - - order_qt_ctas """select * from internal.${internal_db_name}.${test_ctas} order by tinyint_u;""" - - order_qt_ctas_desc """desc internal.${internal_db_name}.${test_ctas};""" - - sql """ drop catalog if exists ${catalog_name} """ - - // test mysql view - sql """ drop catalog if exists view_catalog """ - sql """ CREATE CATALOG view_catalog PROPERTIES ( - "type"="jdbc", - "jdbc.user"="root", - "jdbc.password"="123456", - "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.jdbc.Driver"); - """ - qt_mysql_view """ select * from view_catalog.doris_test.mysql_view order by col_1;""" - sql """ drop catalog if exists view_catalog; """ - - sql """ drop catalog if exists mysql_fun_push_catalog """ - sql """ CREATE CATALOG mysql_fun_push_catalog PROPERTIES ( - "type"="jdbc", - "jdbc.user"="root", - "jdbc.password"="123456", - "jdbc.jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "jdbc.driver_url" = "${driver_url}", - "jdbc.driver_class" = "com.mysql.jdbc.Driver"); - """ - - sql """switch mysql_fun_push_catalog""" - sql """ use ${ex_db_name}""" - sql """ set enable_ext_func_pred_pushdown = "true"; """ - order_qt_filter1 """select * from ${ex_tb17} where id = 1; """ - order_qt_filter2 """select * from ${ex_tb17} where 1=1 order by 1; """ - order_qt_filter3 """select * from ${ex_tb17} where id = 1 and 1 = 1; """ - order_qt_date_trunc """ SELECT timestamp0 from dt where DATE_TRUNC(date_sub(timestamp0,INTERVAL 9 HOUR),'hour') > '2011-03-03 17:39:05'; """ - order_qt_money_format """ select k8 from test1 where money_format(k8) = '1.00'; """ - explain { - sql("select k8 from test1 where money_format(k8) = '1.00';") - - contains "QUERY: SELECT `k8` FROM `doris_test`.`test1`" - } - explain { - sql ("SELECT timestamp0 from dt where DATE_TRUNC(date_sub(timestamp0,INTERVAL 9 HOUR),'hour') > '2011-03-03 17:39:05';") - - contains "QUERY: SELECT `timestamp0` FROM `doris_test`.`dt`" - } - explain { - sql ("SELECT timestamp0 from dt where DATE_TRUNC(date_sub(timestamp0,INTERVAL 9 HOUR),'hour') > '2011-03-03 17:39:05' and timestamp0 > '2022-01-01';") - - contains "QUERY: SELECT `timestamp0` FROM `doris_test`.`dt` WHERE (`timestamp0` > '2022-01-01 00:00:00')" - } - explain { - sql ("select k6, k8 from test1 where nvl(k6, null) = 1;") - - contains "QUERY: SELECT `k6`, `k8` FROM `doris_test`.`test1` WHERE ((ifnull(`k6`, NULL) = 1))" - } - explain { - sql ("select k6, k8 from test1 where nvl(nvl(k6, null),null) = 1;") - - contains "QUERY: SELECT `k6`, `k8` FROM `doris_test`.`test1` WHERE ((ifnull(ifnull(`k6`, NULL), NULL) = 1))" - } - sql """ set enable_ext_func_pred_pushdown = "false"; """ - explain { - sql ("select k6, k8 from test1 where nvl(k6, null) = 1 and k8 = 1;") - - contains "QUERY: SELECT `k6`, `k8` FROM `doris_test`.`test1` WHERE ((`k8` = 1))" - } - sql """ set enable_ext_func_pred_pushdown = "true"; """ - // test date_add - order_qt_date_add_year """ select * from test_zd where date_add(d_z,interval 1 year) = '2023-01-01' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 year) = '2023-01-01' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_month """ select * from test_zd where date_add(d_z,interval 1 month) = '2022-02-01' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 month) = '2022-02-01' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_week """ select * from test_zd where date_add(d_z,interval 1 week) = '2022-01-08' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 week) = '2022-01-08' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_day """ select * from test_zd where date_add(d_z,interval 1 day) = '2022-01-02' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 day) = '2022-01-02' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_hour """ select * from test_zd where date_add(d_z,interval 1 hour) = '2022-01-01 01:00:00' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 hour) = '2022-01-01 01:00:00' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_min """ select * from test_zd where date_add(d_z,interval 1 minute) = '2022-01-01 00:01:00' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 minute) = '2022-01-01 00:01:00' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_add_sec """ select * from test_zd where date_add(d_z,interval 1 second) = '2022-01-01 00:00:01' order by 1; """ - explain { - sql("select * from test_zd where date_add(d_z,interval 1 second) = '2022-01-01 00:00:01' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - // date_sub - order_qt_date_sub_year """ select * from test_zd where date_sub(d_z,interval 1 year) = '2021-01-01' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 year) = '2021-01-01' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_month """ select * from test_zd where date_sub(d_z,interval 1 month) = '2021-12-01' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 month) = '2021-12-01' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_week """ select * from test_zd where date_sub(d_z,interval 1 week) = '2021-12-25' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 week) = '2021-12-25' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_day """ select * from test_zd where date_sub(d_z,interval 1 day) = '2021-12-31' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 day) = '2021-12-31' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_hour """ select * from test_zd where date_sub(d_z,interval 1 hour) = '2021-12-31 23:00:00' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 hour) = '2021-12-31 23:00:00' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_min """ select * from test_zd where date_sub(d_z,interval 1 minute) = '2021-12-31 23:59:00' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 minute) = '2021-12-31 23:59:00' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - order_qt_date_sub_sec """ select * from test_zd where date_sub(d_z,interval 1 second) = '2021-12-31 23:59:59' order by 1; """ - explain { - sql("select * from test_zd where date_sub(d_z,interval 1 second) = '2021-12-31 23:59:59' order by 1;") - - contains " QUERY: SELECT `id`, `d_z` FROM `doris_test`.`test_zd` WHERE (`d_z` = '2022-01-01')" - } - - sql """ drop catalog if exists mysql_fun_push_catalog; """ - - // test insert null - - sql """drop catalog if exists ${catalog_name} """ - - sql """create catalog if not exists ${catalog_name} properties( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver" - );""" - - sql """switch ${catalog_name}""" - sql """ use ${ex_db_name}""" - - order_qt_auto_default_t1 """insert into ${auto_default_t}(name) values('a'); """ - test { - sql "insert into ${auto_default_t}(name,dt) values('a', null);" - exception "Column `dt` is not nullable, but the inserted value is nullable." - } - test { - sql "insert into ${auto_default_t}(name,dt) select '1', null;" - exception "Column `dt` is not nullable, but the inserted value is nullable." - } - explain { - sql "insert into ${auto_default_t}(name,dt) select col1,col12 from ex_tb15;" - contains "PreparedStatement SQL: INSERT INTO `doris_test`.`auto_default_t`(`name`,`dt`) VALUES (?, ?)" - } - order_qt_auto_default_t2 """insert into ${auto_default_t}(name,dt) select col1, coalesce(col12,'2022-01-01 00:00:00') from ex_tb15 limit 1;""" - sql """drop catalog if exists ${catalog_name} """ - - // test lower_case_meta_names - - sql """ drop catalog if exists mysql_lower_case_catalog """ - sql """ CREATE CATALOG mysql_lower_case_catalog PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "lower_case_meta_names" = "true", - "meta_names_mapping" = '{"databases": [{"remoteDatabase": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","mapping": "doris_2"},{"remoteDatabase": "doris","mapping": "doris_3"}],"tables": [{"remoteDatabase": "Doris","remoteTable": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","remoteTable": "Doris","mapping": "doris_2"},{"remoteDatabase": "Doris","remoteTable": "doris","mapping": "doris_3"}]}' - ); - """ - - qt_sql "show databases from mysql_lower_case_catalog;" - qt_sql "show tables from mysql_lower_case_catalog.doris_2;" - qt_sql "select * from mysql_lower_case_catalog.doris_2.doris_1 order by id;" - qt_sql "select * from mysql_lower_case_catalog.doris_2.doris_2 order by id;" - qt_sql "select * from mysql_lower_case_catalog.doris_2.doris_3 order by id;" - - sql """ drop catalog if exists mysql_lower_case_catalog; """ - sql """ drop catalog if exists mysql_lower_case_catalog2; """ - test { - sql """ CREATE CATALOG mysql_lower_case_catalog2 PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "lower_case_table_names" = "true", - "meta_names_mapping" = '{"databases": [{"remoteDatabase": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","mapping": "doris_2"},{"remoteDatabase": "doris","mapping": "doris_3"}],"tables": [{"remoteDatabase": "Doris","remoteTable": "DORIS","mapping": "doris_1"},{"remoteDatabase": "Doris","remoteTable": "Doris","mapping": "doris_2"},{"remoteDatabase": "Doris","remoteTable": "doris","mapping": "doris_3"}]}' - ); - """ - exception "Jdbc catalog property lower_case_table_names is not supported, please use lower_case_meta_names instead" - } - sql """ drop catalog if exists mysql_lower_case_catalog2; """ - sql """ drop catalog if exists mysql_lower_case_catalog3; """ - sql """ CREATE CATALOG mysql_lower_case_catalog3 PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}/doris_test?useSSL=false", - "driver_url" = "${driver_url}", - "driver_class" = "com.mysql.jdbc.Driver", - "lower_case_meta_names" = "true", - "meta_names_mapping" = "{\\\"databases\\\": [{\\\"remoteDatabase\\\": \\\"DORIS\\\",\\\"mapping\\\": \\\"doris_1\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"mapping\\\": \\\"doris_2\\\"},{\\\"remoteDatabase\\\": \\\"doris\\\",\\\"mapping\\\": \\\"doris_3\\\"}],\\\"tables\\\": [{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"DORIS\\\",\\\"mapping\\\": \\\"doris_1\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"Doris\\\",\\\"mapping\\\": \\\"doris_2\\\"},{\\\"remoteDatabase\\\": \\\"Doris\\\",\\\"remoteTable\\\": \\\"doris\\\",\\\"mapping\\\": \\\"doris_3\\\"}]}" - ); - """ - sql """ drop catalog if exists mysql_lower_case_catalog3; """ - } -} - diff --git a/regression-test/suites/fault_injection_p0/test_build_index_with_clone_fault.groovy b/regression-test/suites/fault_injection_p0/test_build_index_with_clone_fault.groovy index 156a582f55362b..ec175792d48de3 100644 --- a/regression-test/suites/fault_injection_p0/test_build_index_with_clone_fault.groovy +++ b/regression-test/suites/fault_injection_p0/test_build_index_with_clone_fault.groovy @@ -60,8 +60,8 @@ suite("test_build_index_with_clone_fault_injection", "nonConcurrent"){ if (show_build_index && show_build_index.size() > 0) { def currentState = show_build_index[0].State def currentMsg = show_build_index[0].Msg - if (currentState == expectedState && currentMsg == expectedMsg) { - logger.info("Attempt ${attempt + 1}: State and Msg match expected values.") + if ((currentState == expectedState && currentMsg == expectedMsg) || currentState == "FINISHED") { + logger.info(currentState+" "+currentMsg) return } else { logger.warn("Attempt ${attempt + 1}: Expected State='${expectedState}' and Msg='${expectedMsg}', but got State='${currentState}' and Msg='${currentMsg}'. Retrying after ${waitSeconds} second(s)...") @@ -109,10 +109,8 @@ suite("test_build_index_with_clone_fault_injection", "nonConcurrent"){ // create index on table sql """ create index idx_k2 on ${tbl}(k2) using inverted """ sql """ build index idx_k2 on ${tbl} """ - // sleep 5s to wait for the build index job report table is unstable - sleep(5000) - assertShowBuildIndexWithRetry(tbl, 'WAITING_TXN', 'table is unstable', 3, 5) + assertShowBuildIndexWithRetry(tbl, 'WAITING_TXN', 'table is unstable', 3, 10) def state = wait_for_last_build_index_on_table_finish(tbl, timeout) assertEquals(state, "FINISHED") diff --git a/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy new file mode 100644 index 00000000000000..d96f6f0ec48cd3 --- /dev/null +++ b/regression-test/suites/fault_injection_p0/test_fix_tablet_stat_fault_injection.groovy @@ -0,0 +1,159 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods +import org.apache.doris.regression.util.Http + +suite("test_fix_tablet_stat_fault_injection", "nonConcurrent") { + if(isCloudMode()){ + def tableName = "test_fix_tablet_stat_fault_injection" + def bucketSize = 10 + def partitionSize = 100 + def maxPartition = partitionSize + 1 + def create_table_sql = """ + CREATE TABLE IF NOT EXISTS ${tableName} + ( + `k1` INT NULL, + `v1` INT NULL, + `v2` INT NULL + ) + UNIQUE KEY (k1) + PARTITION BY RANGE(`k1`) + ( + FROM (1) TO (${maxPartition}) INTERVAL 1 + ) + DISTRIBUTED BY HASH(`k1`) BUCKETS ${bucketSize} + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true" + ); + """ + def insertData = { + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + try { + // enable debug point + GetDebugPoint().enableDebugPointForAllBEs("CloudFullCompaction::modify_rowsets.wrong_compaction_data_size") + // insert data + sql """ DROP TABLE IF EXISTS ${tableName} """ + + sql "${create_table_sql}" + (1..partitionSize).each { i -> + sql "insert into ${tableName} values (${i},1,1);" + sql "insert into ${tableName} values (${i},2,2);" + sql "insert into ${tableName} values (${i},3,3);" + sql "insert into ${tableName} values (${i},4,4);" + sql "insert into ${tableName} values (${i},5,5);" + } + + sql "select count(*) from ${tableName};" + sleep(60000) + qt_select_1 "show data from ${tableName};" + + // check rowsets num + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + // before full compaction, there are 6 rowsets. + int rowsetCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status after insert data: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + rowsetCount +=((List) tabletJson.rowsets).size() + } + assert (rowsetCount == 6 * bucketSize * partitionSize) + + // trigger full compactions for all tablets in ${tableName} + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + times = 1 + + do{ + (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + ++times + } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) + + def compactJson = parseJson(out.trim()) + assertEquals("success", compactJson.status.toLowerCase()) + } + + // wait for full compaction done + for (def tablet in tablets) { + boolean running = true + do { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + + sleep(60000) + // after full compaction, there are 2 rowsets. + rowsetCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status after full compaction: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + rowsetCount +=((List) tabletJson.rowsets).size() + } + // assert (rowsetCount == 2 * bucketSize * partitionSize) + + // data size should be very large + sql "select count(*) from ${tableName};" + qt_select_2 "show data from ${tableName};" + + + fix_tablet_stats(getTableId(tableName)) + + sleep(60000) + // after fix, there are 2 rowsets. + rowsetCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + //logger.info("Show tablets status after fix stats: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + rowsetCount +=((List) tabletJson.rowsets).size() + } + // assert (rowsetCount == 2 * bucketSize * partitionSize) + // after fix table stats, data size should be normal + sql "select count(*) from ${tableName};" + qt_select_3 "show data from ${tableName};" + } finally { + //try_sql("DROP TABLE IF EXISTS ${tableName}") + GetDebugPoint().disableDebugPointForAllBEs("CloudFullCompaction::modify_rowsets.wrong_compaction_data_size") + } + } + insertData() + } +} + diff --git a/regression-test/suites/fault_injection_p0/test_writer_v2_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_writer_v2_fault_injection.groovy index c0cbd1686eb685..f05a084035cb3f 100644 --- a/regression-test/suites/fault_injection_p0/test_writer_v2_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_writer_v2_fault_injection.groovy @@ -73,8 +73,10 @@ suite("test_writer_v2_fault_injection", "nonConcurrent") { sql "insert into test select * from baseall where k1 <= 3" } catch(Exception e) { logger.info(e.getMessage()) - assertTrue(e.getMessage().contains(error_msg)) + assertTrue(e.getMessage().contains(error_msg), + String.format("expected '%s', actual '%s'", error_msg, e.getMessage())) } finally { + sleep 1000 // wait some time for instance finish before disable injection GetDebugPoint().disableDebugPointForAllBEs(injection) } } @@ -84,15 +86,15 @@ suite("test_writer_v2_fault_injection", "nonConcurrent") { // VTabletWriterV2 _vec_output_expr_ctxs not equal _output_tuple_slot load_with_injection("VTabletWriterV2._init._vec_output_expr_ctxs_not_equal_output_tuple_slot", "should be equal to output_expr_num") // VTabletWriterV2 node_info is null - load_with_injection("VTabletWriterV2._open_streams_to_backend.node_info_null", "Unknown node") + load_with_injection("VTabletWriterV2._open_streams_to_backend.node_info_null", "failed to open streams to any BE") // VTabletWriterV2 do not get tablet schema on open_streams load_with_injection("VTabletWriterV2._open_streams_to_backend.no_schema_when_open_streams", "success") // VTabletWriterV2 tablet_location is null load_with_injection("VTabletWriterV2._build_tablet_node_mapping.tablet_location_null", "unknown tablet location") // VTabletWriterV2 location is null - load_with_injection("VTabletWriterV2._select_streams.location_null", "failed to open DeltaWriter for tablet") + load_with_injection("VTabletWriterV2._select_streams.location_null", "failed to open DeltaWriter") // VTabletWriterV2 index not found - load_with_injection("VTabletWriterV2._write_memtable.index_not_found", "failed to open DeltaWriter for tablet") + load_with_injection("VTabletWriterV2._write_memtable.index_not_found", "failed to open DeltaWriter") // VTabletWriterV2 cancel load_with_injection("VTabletWriterV2.close.cancel", "load cancel") // VTabletWriterV2 load timeout before close_wait diff --git a/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create.groovy b/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create.groovy new file mode 100644 index 00000000000000..4d0b667dd44b84 --- /dev/null +++ b/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create.groovy @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_iot_overwrite_and_create") { + sql "set enable_auto_create_when_overwrite = true;" + + sql " drop table if exists auto_list; " + sql """ + create table auto_list( + k0 varchar null + ) + auto partition by list (k0) + ( + PARTITION p1 values in (("Beijing"), ("BEIJING")), + PARTITION p2 values in (("Shanghai"), ("SHANGHAI")), + PARTITION p3 values in (("xxx"), ("XXX")), + PARTITION p4 values in (("list"), ("LIST")), + PARTITION p5 values in (("1234567"), ("7654321")) + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 1 + properties("replication_num" = "1"); + """ + sql """ insert into auto_list values ("Beijing"),("Shanghai"),("xxx"),("list"),("1234567"); """ + qt_origin "select * from auto_list order by k0;" + + sql """insert overwrite table auto_list values ("SHANGHAI"), ("zzz");""" + qt_0 "select * from auto_list order by k0;" + sql """insert overwrite table auto_list values ("zzz2");""" + qt_1 "select * from auto_list order by k0;" + + test{ + sql """insert overwrite table auto_list partition(p1, p2) values ("zzz");""" + exception "Insert has filtered data in strict mode." + } + test{ + sql """insert overwrite table auto_list partition(p3) values ("zzz3");""" + exception "Insert has filtered data in strict mode." + } + + sql """ insert into auto_list values ("Beijing"),("Shanghai"),("xxx"),("list"),("1234567"); """ + sql """insert overwrite table auto_list partition(*) values ("abcd"), ("BEIJING");""" + qt_2 "select * from auto_list order by k0;" + + sql "set enable_auto_create_when_overwrite = false;" + test{ + sql """insert overwrite table auto_list values ("zzz3");""" + exception "Insert has filtered data in strict mode." + } + test{ + sql """insert overwrite table auto_list partition(p1, p2) values ("zzz");""" + exception "Insert has filtered data in strict mode." + } + test{ + sql """insert overwrite table auto_list partition(*) values ("zzz3");""" + exception "Cannot found origin partitions in auto detect overwriting" + } +} diff --git a/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create_many.groovy b/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create_many.groovy new file mode 100644 index 00000000000000..dcade3ce211453 --- /dev/null +++ b/regression-test/suites/insert_overwrite_p0/test_iot_overwrite_and_create_many.groovy @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_iot_overwrite_and_create_many") { + sql "set enable_auto_create_when_overwrite = true;" + + sql " drop table if exists target; " + sql """ + create table target( + k0 varchar null + ) + auto partition by list (k0) + ( + PARTITION p1 values in (("Beijing"), ("BEIJING")), + PARTITION p2 values in (("Shanghai"), ("SHANGHAI")), + PARTITION p3 values in (("xxx"), ("XXX")), + PARTITION p4 values in (("list"), ("LIST")), + PARTITION p5 values in (("1234567"), ("7654321")) + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 2 + properties("replication_num" = "1"); + """ + sql """ insert into target values ("Beijing"),("Shanghai"),("xxx"),("list"),("1234567"); """ + + sql " drop table if exists source; " + sql """ + create table source( + k0 varchar null + ) + DISTRIBUTED BY HASH(`k0`) BUCKETS 10 + properties("replication_num" = "1"); + """ + + sql """ insert into source select "Beijing" from numbers("number" = "20000"); """ + sql """ insert into source select "Shanghai" from numbers("number" = "20000"); """ + sql """ insert into source select "zzz" from numbers("number"= "20000"); """ + def result + result = sql " show partitions from target; " + logger.info("origin: ${result}") + + sql " insert overwrite table target partition(*) select * from source; " + result = sql " show partitions from target; " + logger.info("changed: ${result}") + + qt_sql1 " select k0, count(k0) from target group by k0 order by k0; " + + sql """ insert into source select "yyy" from numbers("number" = "20000"); """ + sql " insert overwrite table target select * from source; " + qt_sql2 " select k0, count(k0) from target group by k0 order by k0; " +} diff --git a/regression-test/suites/insert_p0/transaction/txn_insert.groovy b/regression-test/suites/insert_p0/transaction/txn_insert.groovy index 44ed52f098e1c9..9459297a00b325 100644 --- a/regression-test/suites/insert_p0/transaction/txn_insert.groovy +++ b/regression-test/suites/insert_p0/transaction/txn_insert.groovy @@ -503,7 +503,7 @@ suite("txn_insert") { assertFalse(true, "should not reach here") } catch (Exception e) { logger.info("exception: " + e) - assertTrue(e.getMessage().contains("The transaction is already timeout") || e.getMessage().contains("Execute timeout")) + assertTrue(e.getMessage().contains("The transaction is already timeout") || e.getMessage().contains("timeout")) } finally { try { sql "rollback" diff --git a/regression-test/suites/inverted_index_p0/array_contains/test_add_index_for_arr.groovy b/regression-test/suites/inverted_index_p0/array_contains/test_add_index_for_arr.groovy new file mode 100644 index 00000000000000..6f3e772dd08151 --- /dev/null +++ b/regression-test/suites/inverted_index_p0/array_contains/test_add_index_for_arr.groovy @@ -0,0 +1,155 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + +suite("test_add_index_for_arr") { + // prepare test table + def timeout = 60000 + def delta_time = 1000 + def alter_res = "null" + def useTime = 0 + // here some variable to control inverted index query + sql """ set enable_profile=true""" + sql """ set enable_pipeline_x_engine=true;""" + sql """ set enable_inverted_index_query=false""" + sql """ set enable_common_expr_pushdown=true """ + + def wait_for_latest_op_on_table_finish = { table_name, OpTimeout -> + for(int t = delta_time; t <= OpTimeout; t += delta_time){ + alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = "${table_name}" ORDER BY CreateTime DESC LIMIT 1;""" + alter_res = alter_res.toString() + if(alter_res.contains("FINISHED")) { + sleep(3000) // wait change table state to normal + logger.info(table_name + " latest alter job finished, detail: " + alter_res) + break + } + useTime = t + sleep(delta_time) + } + assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish timeout") + } + + def wait_for_build_index_on_partition_finish = { table_name, OpTimeout -> + for(int t = delta_time; t <= OpTimeout; t += delta_time){ + alter_res = sql """SHOW BUILD INDEX WHERE TableName = "${table_name}";""" + def expected_finished_num = alter_res.size(); + def finished_num = 0; + for (int i = 0; i < expected_finished_num; i++) { + logger.info(table_name + " build index job state: " + alter_res[i][7] + i) + if (alter_res[i][7] == "FINISHED") { + ++finished_num; + } + } + if (finished_num == expected_finished_num) { + logger.info(table_name + " all build index jobs finished, detail: " + alter_res) + break + } + useTime = t + sleep(delta_time) + } + assertTrue(useTime <= OpTimeout, "wait_for_latest_build_index_on_partition_finish timeout") + } + + + sql "DROP TABLE IF EXISTS my_test_array" + // create table without any index + sql """ + CREATE TABLE IF NOT EXISTS my_test_array ( + `id` int(11) NULL, + `name` ARRAY NULL, + `description` ARRAY NULL, + ) + DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + properties("replication_num" = "1"); + """ + + def var_result = sql "show variables" + logger.info("show variales result: " + var_result ) + + // stream_load with csv data + streamLoad { + table "my_test_array" + + file "arr_null_test_data.csv" // import csv file + time 10000 // limit inflight 10s + set 'column_separator', '|' + set 'format', 'csv' + + // if declared a check callback, the default check condition will ignore. + // So you must check all condition + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals(200, json.NumberTotalRows) + assertEquals(200, json.NumberLoadedRows) + assertTrue(json.LoadBytes > 0) + } + } + + // query without inverted index + // query rows with array_contains + def sql_query_name1 = sql "select id, name[1], description[1] from my_test_array where array_contains(name,'text7')" + // query rows with !array_contains + def sql_query_name2 = sql "select id, name[1], description[1] from my_test_array where !array_contains(name,'text7')" + + // add index for name + sql "ALTER TABLE my_test_array ADD INDEX name_idx (name) USING INVERTED;" + wait_for_latest_op_on_table_finish("my_test_array", timeout) + // build index for name that name data can using inverted index + if (!isCloudMode()) { + sql "BUILD INDEX name_idx ON my_test_array" + wait_for_build_index_on_partition_finish("my_test_array", timeout) + } + + // query with inverted index + sql "set enable_inverted_index_query=true" + // query rows with array_contains + def sql_query_name1_inverted = sql "select id, name[1], description[1] from my_test_array where array_contains(name,'text7')" + // query rows with !array_contains + def sql_query_name2_inverted = sql "select id, name[1], description[1] from my_test_array where !array_contains(name,'text7')" + + // check result for query without inverted index and with inverted index + def size1 = sql_query_name1.size(); + log.info("sql_query_name1 query without inverted index rows size: ${size1}") + for (int i = 0; i < sql_query_name1.size(); i++) { + assertEquals(sql_query_name1[i][0], sql_query_name1_inverted[i][0]) + assertEquals(sql_query_name1[i][1], sql_query_name1_inverted[i][1]) + assertEquals(sql_query_name1[i][2], sql_query_name1_inverted[i][2]) + } + def size2 = sql_query_name2.size(); + log.info("sql_query_name2 query without inverted index rows size: ${size2}") + for (int i = 0; i < sql_query_name2.size(); i++) { + assertEquals(sql_query_name2[i][0], sql_query_name2_inverted[i][0]) + assertEquals(sql_query_name2[i][1], sql_query_name2_inverted[i][1]) + assertEquals(sql_query_name2[i][2], sql_query_name2_inverted[i][2]) + } + + // drop index + // add index on column description + sql "drop index name_idx on my_test_array" + wait_for_latest_op_on_table_finish("my_test_array", timeout) + + def sql_query_name1_without_inverted = sql "select id, name[1], description[1] from my_test_array where array_contains(name,'text7')" + def sql_query_name2_without_inverted = sql "select id, name[1], description[1] from my_test_array where !array_contains(name,'text7')" + + assertEquals(sql_query_name1.size(), sql_query_name1_without_inverted.size()) + assertEquals(sql_query_name2.size(), sql_query_name2_without_inverted.size()) +} diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy new file mode 100644 index 00000000000000..d25c4f149eb834 --- /dev/null +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_p0.groovy @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.concurrent.Executors +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility + +suite("test_index_compaction_p0", "p0, nonConcurrent") { + + def compaction_table_name = "httplogs" + + def load_json_data = {table_name, file_name -> + // load the json data + streamLoad { + table "${table_name}" + + // set http request header params + set 'read_json_by_line', 'true' + set 'format', 'json' + set 'max_filter_ratio', '0.1' + file file_name // import json file + time 10000 // limit inflight 10s + + // if declared a check callback, the default check condition will ignore. + // So you must check all condition + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + logger.info("Stream load ${file_name} result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + // assertEquals(json.NumberTotalRows, json.NumberLoadedRows + json.NumberUnselectedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + } + } + sql "DROP TABLE IF EXISTS ${compaction_table_name}" + sql """ + CREATE TABLE ${compaction_table_name} ( + `@timestamp` int(11) NULL, + `clientip` varchar(20) NULL, + `request` varchar(500) NULL, + `status` int NULL, + `size` int NULL, + INDEX clientip_idx (`clientip`) USING INVERTED COMMENT '', + INDEX request_idx (`request`) USING INVERTED PROPERTIES("parser" = "unicode") COMMENT '', + INDEX status_idx (`status`) USING INVERTED COMMENT '', + INDEX size_idx (`size`) USING INVERTED COMMENT '' + ) ENGINE=OLAP + DISTRIBUTED BY HASH(`@timestamp`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "compaction_policy" = "time_series", + "time_series_compaction_file_count_threshold" = "10", + "disable_auto_compaction" = "true" + ); + """ + def executor = Executors.newFixedThreadPool(20) + (1..20).each { i -> + executor.submit { + def fileName = "documents-" + i + ".json" + load_json_data.call(compaction_table_name, """${getS3Url()}/regression/inverted_index_cases/httplogs/${fileName}""") + } + } + executor.shutdown() + executor.awaitTermination(1, TimeUnit.MINUTES) + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def set_be_config = { key, value -> + for (String backend_id: backendId_to_backendIP.keySet()) { + def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) + logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) + } + } + set_be_config.call("inverted_index_compaction_enable", "true") + //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus + def tablets = sql_return_maparray """ show tablets from ${compaction_table_name}; """ + + int beforeSegmentCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + for (String rowset in (List) tabletJson.rowsets) { + beforeSegmentCount += Integer.parseInt(rowset.split(" ")[1]) + } + } + assertEquals(beforeSegmentCount, 20) + + // trigger compactions for all tablets in ${tableName} + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactJson = parseJson(out.trim()) + assertEquals("success", compactJson.status.toLowerCase()) + } + + // wait for all compactions done + for (def tablet in tablets) { + Awaitility.await().atMost(1, TimeUnit.MINUTES).untilAsserted(() -> { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) + return compactionStatus.run_status; + }); + } + + int afterSegmentCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + for (String rowset in (List) tabletJson.rowsets) { + logger.info("rowset is: " + rowset) + afterSegmentCount += Integer.parseInt(rowset.split(" ")[1]) + } + } + assertEquals(afterSegmentCount, 1) +} diff --git a/regression-test/suites/inverted_index_p0/test_count_on_index.groovy b/regression-test/suites/inverted_index_p0/test_count_on_index.groovy index 0143f37997a424..89d1e8f93b25ea 100644 --- a/regression-test/suites/inverted_index_p0/test_count_on_index.groovy +++ b/regression-test/suites/inverted_index_p0/test_count_on_index.groovy @@ -145,9 +145,39 @@ suite("test_count_on_index_httplogs", "p0") { sql """set experimental_enable_nereids_planner=true;""" sql """set enable_fallback_to_original_planner=false;""" sql """analyze table ${testTable_dup} with sync"""; - // wait BE report every partition's row count - sleep(10000) // case1: test duplicate table + def executeSqlWithRetry = { String sqlQuery, int maxRetries = 3, int waitSeconds = 1 -> + def attempt = 0 + def success = false + + while (attempt < maxRetries && !success) { + try { + explain { + // Wait for BE to report every partition's row count + sleep(10000) + sql(sqlQuery) + notContains("cardinality=0") + } + success = true + } catch (Exception e) { + attempt++ + log.error("Attempt ${attempt} failed: ${e.message}") + if (attempt < maxRetries) { + log.info("Retrying... (${attempt + 1}/${maxRetries}) after ${waitSeconds} second(s).") + sleep(waitSeconds * 1000) + } else { + log.error("All ${maxRetries} attempts failed.") + throw e + } + } + } + } + // make sure row count stats is not 0 for duplicate table + executeSqlWithRetry("SELECT COUNT() FROM ${testTable_dup}") + // make sure row count stats is not 0 for unique table + sql """analyze table ${testTable_unique} with sync"""; + executeSqlWithRetry("SELECT COUNT() FROM ${testTable_unique}") + explain { sql("select COUNT() from ${testTable_dup} where request match 'GET'") contains "pushAggOp=COUNT_ON_INDEX" diff --git a/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy new file mode 100644 index 00000000000000..a64e4da7d82a19 --- /dev/null +++ b/regression-test/suites/inverted_index_p1/index_compaction/test_index_compaction_p1.groovy @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.concurrent.Executors +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility + +suite("test_index_compaction_p1", "p1, nonConcurrent") { + + def compaction_table_name = "httplogs" + + def load_json_data = {table_name, file_name -> + // load the json data + streamLoad { + table "${table_name}" + + // set http request header params + set 'read_json_by_line', 'true' + set 'format', 'json' + set 'max_filter_ratio', '0.1' + file file_name // import json file + time 10000 // limit inflight 10s + + // if declared a check callback, the default check condition will ignore. + // So you must check all condition + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + logger.info("Stream load ${file_name} result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + // assertEquals(json.NumberTotalRows, json.NumberLoadedRows + json.NumberUnselectedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + } + } + sql "DROP TABLE IF EXISTS ${compaction_table_name}" + sql """ + CREATE TABLE ${compaction_table_name} ( + `@timestamp` int(11) NULL, + `clientip` varchar(20) NULL, + `request` varchar(500) NULL, + `status` int NULL, + `size` int NULL, + INDEX clientip_idx (`clientip`) USING INVERTED COMMENT '', + INDEX request_idx (`request`) USING INVERTED PROPERTIES("parser" = "unicode") COMMENT '', + INDEX status_idx (`status`) USING INVERTED COMMENT '', + INDEX size_idx (`size`) USING INVERTED COMMENT '' + ) ENGINE=OLAP + DISTRIBUTED BY HASH(`@timestamp`) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "compaction_policy" = "time_series", + "time_series_compaction_file_count_threshold" = "20", + "disable_auto_compaction" = "true" + ); + """ + def executor = Executors.newFixedThreadPool(50) + (1..110).each { i -> + executor.submit { + def fileName = "documents-" + i + ".json" + load_json_data.call(compaction_table_name, """${getS3Url()}/regression/inverted_index_cases/httplogs/${fileName}""") + } + } + executor.shutdown() + executor.awaitTermination(10, TimeUnit.MINUTES) + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def set_be_config = { key, value -> + for (String backend_id: backendId_to_backendIP.keySet()) { + def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) + logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) + } + } + set_be_config.call("inverted_index_compaction_enable", "true") + //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus + def tablets = sql_return_maparray """ show tablets from ${compaction_table_name}; """ + + int beforeSegmentCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + for (String rowset in (List) tabletJson.rowsets) { + beforeSegmentCount += Integer.parseInt(rowset.split(" ")[1]) + } + } + assertEquals(beforeSegmentCount, 110) + + // trigger compactions for all tablets in ${tableName} + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactJson = parseJson(out.trim()) + assertEquals("success", compactJson.status.toLowerCase()) + } + + // wait for all compactions done + for (def tablet in tablets) { + Awaitility.await().atMost(10, TimeUnit.MINUTES).untilAsserted(() -> { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("compaction task for this tablet is not running", compactionStatus.msg.toLowerCase()) + return compactionStatus.run_status; + }); + } + + int afterSegmentCount = 0 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) + logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def tabletJson = parseJson(out.trim()) + assert tabletJson.rowsets instanceof List + for (String rowset in (List) tabletJson.rowsets) { + logger.info("rowset is: " + rowset) + afterSegmentCount += Integer.parseInt(rowset.split(" ")[1]) + } + } + assertEquals(afterSegmentCount, 1) +} diff --git a/regression-test/suites/load_p0/stream_load/test_stream_load_job_status.groovy b/regression-test/suites/load_p0/stream_load/test_stream_load_job_status.groovy new file mode 100644 index 00000000000000..9cb38747e22811 --- /dev/null +++ b/regression-test/suites/load_p0/stream_load/test_stream_load_job_status.groovy @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_stream_load_job_status", "p0") { + def tableName = "test_stream_load_job_status" + + sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `k1` bigint(20) NULL, + `k2` bigint(20) NULL, + `v1` tinyint(4) SUM NULL, + `v2` tinyint(4) REPLACE NULL, + `v3` tinyint(4) REPLACE_IF_NOT_NULL NULL, + `v4` smallint(6) REPLACE_IF_NOT_NULL NULL, + `v5` int(11) REPLACE_IF_NOT_NULL NULL, + `v6` bigint(20) REPLACE_IF_NOT_NULL NULL, + `v7` largeint(40) REPLACE_IF_NOT_NULL NULL, + `v8` datetime REPLACE_IF_NOT_NULL NULL, + `v9` date REPLACE_IF_NOT_NULL NULL, + `v10` char(10) REPLACE_IF_NOT_NULL NULL, + `v11` varchar(6) REPLACE_IF_NOT_NULL NULL, + `v12` decimal(27, 9) REPLACE_IF_NOT_NULL NULL + ) ENGINE=OLAP + AGGREGATE KEY(`k1`, `k2`) + COMMENT 'OLAP' + PARTITION BY RANGE(`k1`) + (PARTITION partition_a VALUES [("-9223372036854775808"), ("100000")), + PARTITION partition_b VALUES [("100000"), ("1000000000")), + PARTITION partition_c VALUES [("1000000000"), ("10000000000")), + PARTITION partition_d VALUES [("10000000000"), (MAXVALUE))) + DISTRIBUTED BY HASH(`k1`, `k2`) BUCKETS 3 + PROPERTIES ("replication_allocation" = "tag.location.default: 1"); + """ + + streamLoad { + table "${tableName}" + set 'column_separator', '\t' + set 'label', 'test_stream_load_job_status' + set 'columns', 'k1, k2, v2, v10, v11' + set 'partitions', 'partition_a, partition_b, partition_c, partition_d' + set 'strict_mode', 'true' + + file 'test_strict_mode.csv' + time 10000 // limit inflight 10s + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + } + } + + streamLoad { + table "${tableName}" + set 'column_separator', '\t' + set 'label', 'test_stream_load_job_status' + set 'columns', 'k1, k2, v2, v10, v11' + set 'partitions', 'partition_a, partition_b, partition_c, partition_d' + set 'strict_mode', 'true' + + file 'test_strict_mode.csv' + time 10000 // limit inflight 10s + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("label already exists", json.Status.toLowerCase()) + assertEquals("finished", json.ExistingJobStatus.toLowerCase()) + } + } +} \ No newline at end of file diff --git a/regression-test/suites/manager/test_manager_interface_3.groovy b/regression-test/suites/manager/test_manager_interface_3.groovy index 21b4853da9aa31..52e5f5f99db55b 100644 --- a/regression-test/suites/manager/test_manager_interface_3.groovy +++ b/regression-test/suites/manager/test_manager_interface_3.groovy @@ -424,7 +424,7 @@ suite('test_manager_interface_3',"p0") { x ++ } } - assertTrue(x == 21) + assertTrue(x == 20) connect(user=user, password="${pwd}", url=url) { result = sql """ show resources """ @@ -435,7 +435,7 @@ suite('test_manager_interface_3',"p0") { x ++ } } - assertTrue(x == 21) + assertTrue(x == 20) } @@ -469,7 +469,7 @@ suite('test_manager_interface_3',"p0") { } sql """grant USAGE_PRIV on RESOURCE ${resource_name} TO '${user}' """ - connect(user=user, password="${pwd}", url=url) { + connect(user=user, password="${pwd}", url=url) { result = sql """ show resources """ x = 0 for(int i = 0;i 1 + and o_orderkey IN (1, 3) then o_custkey else null end + ) + ) cnt_1, + bitmap_union( + to_bitmap( + case when o_shippriority > 2 + and o_orderkey IN (2) then o_custkey else null end + ) + ) as cnt_2 + from + lineitem_1 + inner join orders_1 on lineitem_1.l_orderkey = orders_1.o_orderkey + where + lineitem_1.l_shipdate >= "2023-10-17" + group by + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey + ) as t + inner join partsupp_1 on t.l_partkey = partsupp_1.ps_partkey + and t.l_suppkey = partsupp_1.ps_suppkey + where + partsupp_1.ps_suppkey > 1 + group by + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey, + ps_partkey, + ps_suppkey, + agg1, + agg3, + agg4, + agg5, + agg6 + ) as t1 + left join ( + select + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey, + ps_partkey, + ps_suppkey, + t.agg1 as agg1, + t.sum_total as agg3, + t.max_total as agg4, + t.min_total as agg5, + t.count_all as agg6, + cast( + sum( + IFNULL(ps_suppkey, 0) * IFNULL(ps_partkey, 0) + ) as decimal(28, 8) + ) as agg2 + from + ( + select + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey, + cast( + sum( + IFNULL(o_orderkey, 0) * IFNULL(o_custkey, 0) + ) as decimal(28, 8) + ) as agg1, + sum(o_totalprice) as sum_total, + max(o_totalprice) as max_total, + min(o_totalprice) as min_total, + count(*) as count_all, + bitmap_union( + to_bitmap( + case when o_shippriority > 1 + and o_orderkey IN (1, 3) then o_custkey else null end + ) + ) cnt_1, + bitmap_union( + to_bitmap( + case when o_shippriority > 2 + and o_orderkey IN (2) then o_custkey else null end + ) + ) as cnt_2 + from + lineitem_1 + inner join orders_1 on lineitem_1.l_orderkey = orders_1.o_orderkey + where + lineitem_1.l_shipdate >= "2023-10-17" + group by + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey + ) as t + inner join partsupp_1 on t.l_partkey = partsupp_1.ps_partkey + and t.l_suppkey = partsupp_1.ps_suppkey + where + partsupp_1.ps_suppkey > 1 + group by + l_orderkey, + l_partkey, + l_suppkey, + o_orderkey, + o_custkey, + ps_partkey, + ps_suppkey, + agg1, + agg3, + agg4, + agg5, + agg6 + ) as t2 on t1.l_orderkey = t2.l_orderkey + where + t1.l_orderkey > 1 + group by + t1.l_orderkey, + t2.l_partkey, + t1.l_suppkey, + t2.o_orderkey, + t1.o_custkey, + t2.ps_partkey, + t1.ps_suppkey, + t2.agg1, + t1.agg2, + t2.agg3, + t1.agg4, + t2.agg5, + t1.agg6 + order by 1, 2, 3, 4, 5, 6,7, 8, 9; + """ + order_qt_shuffle "${query}" +} \ No newline at end of file diff --git a/regression-test/suites/nereids_p0/join/test_join_on.groovy b/regression-test/suites/nereids_p0/join/test_join_on.groovy index 02b04479e98688..2415a3496cf718 100644 --- a/regression-test/suites/nereids_p0/join/test_join_on.groovy +++ b/regression-test/suites/nereids_p0/join/test_join_on.groovy @@ -39,7 +39,7 @@ suite("test_join_on", "nereids_p0") { qt_sql """ select * from join_on order by k1; """ test { sql """ select * from join_on as j1 inner join join_on as j2 on j1.d_array = j2.d_array; """ - exception "Method get_max_row_byte_size is not supported for Array" + exception "meet invalid type, type=Array(Nullable(Int32))" } test { sql """ select * from join_on as j1 inner join join_on as j2 on j1.hll_col = j2.hll_col; """ diff --git a/regression-test/suites/nereids_p0/union/test_union.groovy b/regression-test/suites/nereids_p0/union/test_union.groovy index 5e9a9e71bf38b0..2d6f84235fe727 100644 --- a/regression-test/suites/nereids_p0/union/test_union.groovy +++ b/regression-test/suites/nereids_p0/union/test_union.groovy @@ -21,6 +21,8 @@ suite("test_union") { SET enable_fallback_to_original_planner=false; set disable_nereids_rules='PRUNE_EMPTY_PARTITION'; """ + String suiteName = "nereids_union_test_union" + String viewName = "${suiteName}_view" def db = "nereids_test_query_db" sql "use ${db}" @@ -177,14 +179,14 @@ suite("test_union") { // test_union_bug // PALO-3617 qt_union36 """select * from (select 1 as a, 2 as b union select 3, 3) c where a = 1""" - sql """drop view if exists nullable""" - sql """CREATE VIEW `nullable` AS SELECT `a`.`k1` AS `n1`, `b`.`k2` AS `n2` + sql """drop view if exists ${viewName}""" + sql """CREATE VIEW `${viewName}` AS SELECT `a`.`k1` AS `n1`, `b`.`k2` AS `n2` FROM `${db}`.`baseall` a LEFT OUTER JOIN `${db}`.`bigtable` b ON `a`.`k1` = `b`.`k1` + 10 WHERE `b`.`k2` IS NULL""" - order_qt_union37 """select n1 from nullable union all select n2 from nullable""" - qt_union38 """(select n1 from nullable) union all (select n2 from nullable order by n1) order by n1""" - qt_union39 """(select n1 from nullable) union all (select n2 from nullable) order by n1""" + order_qt_union37 """select n1 from ${viewName} union all select n2 from ${viewName}""" + qt_union38 """(select n1 from ${viewName}) union all (select n2 from ${viewName} order by n1) order by n1""" + qt_union39 """(select n1 from ${viewName}) union all (select n2 from ${viewName}) order by n1""" // test_union_different_column diff --git a/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/create_commit_mtmv_many_task.groovy b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/create_commit_mtmv_many_task.groovy new file mode 100644 index 00000000000000..b61918fe713ef3 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/create_commit_mtmv_many_task.groovy @@ -0,0 +1,200 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.time.LocalDate + +suite("create_commit_mtmv_many_tasks", "p2") { + + def src_database_name = context.config.getDbNameByFile(context.file) + sql """drop database if exists ${src_database_name};""" + sql """create database ${src_database_name};""" + sql """use ${src_database_name};""" + + def table_name1 = "lineitem" + def table_name2 = "orders" + sql """drop table if exists ${table_name1}""" + sql """drop table if exists ${table_name2}""" + sql """CREATE TABLE lineitem ( + l_orderkey bigint NOT NULL, + l_linenumber int not null, + l_partkey int NOT NULL, + l_suppkey int not null, + l_quantity decimal(15, 2) NOT NULL, + l_extendedprice decimal(15, 2) NOT NULL, + l_discount decimal(15, 2) NOT NULL, + l_tax decimal(15, 2) NOT NULL, + l_returnflag VARCHAR(1) NOT NULL, + l_linestatus VARCHAR(1) NOT NULL, + l_shipdate DATE NOT NULL, + l_commitdate DATE NOT NULL, + l_receiptdate DATE NOT NULL, + l_shipinstruct VARCHAR(25) NOT NULL, + l_shipmode VARCHAR(10) NOT NULL, + l_comment VARCHAR(44) NOT NULL, + l_null VARCHAR(1) NULL + )ENGINE=OLAP + UNIQUE KEY(`l_orderkey`) + COMMENT "OLAP" + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_num" = "1", + "colocate_with" = "lineitem_orders", + "enable_unique_key_merge_on_write" = "true" + );""" + sql """CREATE TABLE orders ( + o_orderkey bigint NOT NULL, + o_custkey int NOT NULL, + o_orderstatus VARCHAR(1) NOT NULL, + o_totalprice decimal(15, 2) NOT NULL, + o_orderdate DATE NOT NULL, + o_orderpriority VARCHAR(15) NOT NULL, + o_clerk VARCHAR(15) NOT NULL, + o_shippriority int NOT NULL, + o_comment VARCHAR(79) NOT NULL, + o_null VARCHAR(1) NULL + )ENGINE=OLAP + UNIQUE KEY(`o_orderkey`) + COMMENT "OLAP" + DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_num" = "1", + "colocate_with" = "lineitem_orders", + "enable_unique_key_merge_on_write" = "false" + );""" + + def stream_load_job = { table_name, src_file_name -> + streamLoad { + table table_name + set 'column_separator', '|' + file """${getS3Url() + '/regression/tpch/sf1/'}${src_file_name}""" + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + assertEquals(json.NumberTotalRows, json.NumberLoadedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + + } + sql "select count(*) from ${table_name}" + } + + for (int i = 1; i <= 10; i++) { + stream_load_job(table_name1, "lineitem.tbl.${i}") + stream_load_job(table_name2, "orders.tbl.${i}") + } + + def dst_database_name = "wz_tpch_mtmv_hit_property" + sql """drop database if exists ${dst_database_name};""" + sql """create database ${dst_database_name};""" + sql """use ${dst_database_name};""" + + sql """drop table if exists ${table_name1}""" + sql """drop table if exists ${table_name2}""" + sql """CREATE TABLE `lineitem` ( + `l_orderkey` BIGINT NOT NULL, + `l_linenumber` INT NOT NULL, + `l_partkey` INT NOT NULL, + `l_suppkey` INT NOT NULL, + `l_quantity` DECIMAL(15, 2) NOT NULL, + `l_extendedprice` DECIMAL(15, 2) NOT NULL, + `l_discount` DECIMAL(15, 2) NOT NULL, + `l_tax` DECIMAL(15, 2) NOT NULL, + `l_returnflag` VARCHAR(1) NOT NULL, + `l_linestatus` VARCHAR(1) NOT NULL, + `l_commitdate` DATE NOT NULL, + `l_receiptdate` DATE NOT NULL, + `l_shipinstruct` VARCHAR(25) NOT NULL, + `l_shipmode` VARCHAR(10) NOT NULL, + `l_comment` VARCHAR(44) NOT NULL, + `l_shipdate` DATE NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey ) + COMMENT 'OLAP' + AUTO PARTITION BY range(date_trunc(`l_shipdate`, 'day')) () + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql """CREATE TABLE `orders` ( + `o_orderkey` BIGINT NOT NULL, + `o_custkey` INT NOT NULL, + `o_orderstatus` VARCHAR(1) NOT NULL, + `o_totalprice` DECIMAL(15, 2) NOT NULL, + `o_orderpriority` VARCHAR(15) NOT NULL, + `o_clerk` VARCHAR(15) NOT NULL, + `o_shippriority` INT NOT NULL, + `o_comment` VARCHAR(79) NOT NULL, + `o_orderdate` DATE NOT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`o_orderkey`, `o_custkey`) + COMMENT 'OLAP' + AUTO PARTITION BY range(date_trunc(`o_orderdate`, 'day')) () + DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql """drop MATERIALIZED VIEW if exists mv1;""" + sql """ + CREATE MATERIALIZED VIEW mv1 + BUILD IMMEDIATE REFRESH ON COMMIT + partition by(l_shipdate) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') AS + select l_shipdate, l_orderkey from lineitem as t1 left join orders as t2 on t1.l_orderkey = t2.o_orderkey group by l_shipdate, l_orderkey; + """ + + def insert_into_select = { date_it -> + sql """INSERT INTO ${dst_database_name}.${table_name1} + SELECT l_orderkey, l_linenumber, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment, '${date_it}' AS new_date_column + FROM ${src_database_name}.${table_name1};""" + + sql """INSERT INTO ${dst_database_name}.${table_name2} + SELECT o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderpriority, o_clerk, o_shippriority, o_comment, '${date_it}' AS new_date_column + FROM ${src_database_name}.${table_name2}""" + } + + def get_next_day = { def date_it -> + def date = LocalDate.parse(date_it) + def next_day = date.plusDays(1) + return next_day + } + + def start_date = "2023-12-01" + while (true) { + if (start_date.toString() == "2024-03-11") { + break + } + logger.info("task load start") + insert_into_select(start_date) + start_date = get_next_day(start_date.toString()) + } + + def job_name = getJobName(dst_database_name, "mv1") + waitingMTMVTaskFinished(job_name) + def task_num = sql """select count(*) from tasks("type"="mv") where JobName="${job_name}";""" + assertTrue(task_num[0][0] < 100) + + def mv_row_count = sql """select count(1) from mv1;""" + def real_row_count = sql """select count(1) from (select l_shipdate, l_orderkey from lineitem as t1 left join orders as t2 on t1.l_orderkey = t2.o_orderkey group by l_shipdate, l_orderkey) t;""" + assertTrue(mv_row_count[0][0] == real_row_count[0][0]) + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_datetrunc_part_up.groovy b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_datetrunc_part_up.groovy new file mode 100644 index 00000000000000..c86f5d6a141f7f --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_datetrunc_part_up.groovy @@ -0,0 +1,142 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("mtmv_range_date_datetrunc_date_part_up") { + + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql "SET enable_materialized_view_rewrite=true" + sql "SET enable_nereids_timeout = false" + String mv_prefix = "range_datetrunc_date_up" + String tb_name = mv_prefix + "_tb" + String mv_name = mv_prefix + "_mv" + + sql """ + drop table if exists ${tb_name} + """ + + sql """CREATE TABLE `${tb_name}` ( + `l_orderkey` BIGINT NULL, + `l_linenumber` INT NULL, + `l_partkey` INT NULL, + `l_suppkey` INT NULL, + `l_quantity` DECIMAL(15, 2) NULL, + `l_extendedprice` DECIMAL(15, 2) NULL, + `l_discount` DECIMAL(15, 2) NULL, + `l_tax` DECIMAL(15, 2) NULL, + `l_returnflag` VARCHAR(1) NULL, + `l_linestatus` VARCHAR(1) NULL, + `l_commitdate` DATE NULL, + `l_receiptdate` DATE NULL, + `l_shipinstruct` VARCHAR(25) NULL, + `l_shipmode` VARCHAR(10) NULL, + `l_comment` VARCHAR(44) NULL, + `l_shipdate` DATEtime not NULL + ) ENGINE=OLAP + DUPLICATE KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey ) + COMMENT 'OLAP' + partition by range (`l_shipdate`) ( + partition p1 values [("2023-10-29 00:00:00"), ("2023-10-29 01:00:00")), + partition p2 values [("2023-10-29 01:00:00"), ("2023-10-29 02:00:00")), + partition p3 values [("2023-10-29 02:00:00"), ("2023-10-29 03:00:00")), + partition p4 values [("2023-10-29 03:00:00"), ("2023-10-29 04:00:00")), + partition p5 values [("2023-10-29 04:00:00"), ("2023-10-29 05:00:00")) + ) + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """ + insert into ${tb_name} values + (null, 1, 2, 3, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29 00:00:00'), + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29 01:00:00'), + (3, 3, null, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx', '2023-10-29 02:00:00'), + (1, 2, 3, null, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29 03:00:00'), + (2, 3, 2, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29 04:00:00'); + """ + + def compare_res = { def stmt -> + sql "SET enable_materialized_view_rewrite=false" + def origin_res = sql stmt + logger.info("origin_res: " + origin_res) + sql "SET enable_materialized_view_rewrite=true" + def mv_origin_res = sql stmt + logger.info("mv_origin_res: " + mv_origin_res) + assertTrue((mv_origin_res == [] && origin_res == []) || (mv_origin_res.size() == origin_res.size())) + for (int row = 0; row < mv_origin_res.size(); row++) { + assertTrue(mv_origin_res[row].size() == origin_res[row].size()) + for (int col = 0; col < mv_origin_res[row].size(); col++) { + assertTrue(mv_origin_res[row][col] == origin_res[row][col]) + } + } + } + + def create_mv = { cur_mv_name, mv_sql, col_name, date_trunc_range -> + sql """DROP MATERIALIZED VIEW IF EXISTS ${cur_mv_name};""" + sql """DROP TABLE IF EXISTS ${cur_mv_name}""" + sql""" + CREATE MATERIALIZED VIEW ${cur_mv_name} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + partition by(date_trunc(`${col_name}`, '${date_trunc_range}')) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + ${mv_sql} + """ + } + + def select_list1_1 = "l_shipdate" + def select_list1_2 = "date_trunc(`l_shipdate`, 'day') as col1" + def select_list1_3 = "DATE_FORMAT(`l_shipdate`, '%Y-%m-%d')" + def select_list2_1 = "date_trunc(`l_shipdate`, 'day') as col1, l_shipdate" + def select_list2_2 = "date_trunc(`l_shipdate`, 'day') as col1, DATE_FORMAT(`l_shipdate`, '%Y-%m-%d')" + def select_list2_3 = "l_shipdate, DATE_FORMAT(`l_shipdate`, '%Y-%m-%d')" + def select_list3_1 = "date_trunc(`l_shipdate`, 'day') as col1, l_shipdate, DATE_FORMAT(`l_shipdate`, '%Y-%m-%d')" + def select_list3_2 = "date_trunc(`l_shipdate`, 'day') as col1, DATE_FORMAT(`l_shipdate`, '%Y-%m-%d'), l_shipdate" + def select_list3_3 = "l_shipdate, DATE_FORMAT(`l_shipdate`, '%Y-%m-%d'), date_trunc(`l_shipdate`, 'day') as col1" + + def select_lists = [select_list1_1, select_list1_2, select_list1_3, select_list2_1, select_list2_2, + select_list2_3, select_list3_1, select_list3_2, select_list3_3] + for (int i = 0; i < select_lists.size(); i++) { + for (int j = 0; j < select_lists.size(); j++) { + if (i == j || j > 5) { + def group_by_str = select_lists[j].replaceAll("as col1", "") + def str = "select " + select_lists[i] + " from ${tb_name} group by " + group_by_str + sql str + + if (select_lists[i].replaceAll("`l_shipdate`", "").indexOf("l_shipdate") != -1) { + create_mv(mv_name, str, "l_shipdate", "day") + waitingMTMVTaskFinishedByMvName(mv_name) + mv_rewrite_success(str, mv_name) + compare_res(str + " order by 1,2,3") + } + + if (select_lists[i].indexOf("col1") != -1) { + create_mv(mv_name, str, "col1", "day") + waitingMTMVTaskFinishedByMvName(mv_name) + mv_rewrite_success(str, mv_name) + compare_res(str + " order by 1,2,3") + } + + } + } + } + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up.groovy b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up.groovy new file mode 100644 index 00000000000000..5be6b1afdcdc44 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up.groovy @@ -0,0 +1,207 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("mtmv_range_date_part_up") { + + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql "SET enable_materialized_view_rewrite=true" + sql "SET enable_nereids_timeout = false" + String mv_prefix = "range_date_up" + + sql """ + drop table if exists lineitem_range_date + """ + + sql """CREATE TABLE `lineitem_range_date` ( + `l_orderkey` BIGINT NULL, + `l_linenumber` INT NULL, + `l_partkey` INT NULL, + `l_suppkey` INT NULL, + `l_quantity` DECIMAL(15, 2) NULL, + `l_extendedprice` DECIMAL(15, 2) NULL, + `l_discount` DECIMAL(15, 2) NULL, + `l_tax` DECIMAL(15, 2) NULL, + `l_returnflag` VARCHAR(1) NULL, + `l_linestatus` VARCHAR(1) NULL, + `l_commitdate` DATE NULL, + `l_receiptdate` DATE NULL, + `l_shipinstruct` VARCHAR(25) NULL, + `l_shipmode` VARCHAR(10) NULL, + `l_comment` VARCHAR(44) NULL, + `l_shipdate` DATE not NULL + ) ENGINE=OLAP + DUPLICATE KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey ) + COMMENT 'OLAP' + partition by range (`l_shipdate`) ( + partition p1 values [("2023-10-29"), ("2023-10-30")), + partition p2 values [("2023-10-30"), ("2023-10-31")), + partition p3 values [("2023-10-31"), ("2023-11-01"))) + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(col1) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select l_shipdate as col1 from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv2_1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv2_1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(col1) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1 from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv2_2;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv2_2 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(l_shipdate) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv3;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv3 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`l_shipdate`, 'day')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select l_shipdate from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv4_1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv4_1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`l_shipdate`, 'day')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv4_2;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv4_2 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'day')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1 from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv5;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv5 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(col1) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'month') as col1 from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv6;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv6 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`l_shipdate`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select l_shipdate from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv7_1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv7_1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'year')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'month') as col1 from lineitem_range_date;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv7_2;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv7_2 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`l_shipdate`, 'year')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'month') as col1, l_shipdate from lineitem_range_date;""" + + // don't create + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv8;""" + try { + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv8 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'year') as col1, l_shipdate from lineitem_range_date;""" + } catch (Exception e) { + log.info(e.getMessage()) + assertTrue(e.getMessage().contains("Unable to find a suitable base table for partitioning")) + } + + sql """ + insert into lineitem_range_date values + (null, 1, 2, 3, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (3, 3, null, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx', '2023-10-31'), + (1, 2, 3, null, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (2, 3, 2, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-30'), + (3, 1, 1, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', null, 'c', 'd', 'xxxxxxxxx', '2023-10-31'), + (1, 3, 2, 2, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'); + """ + + def get_part = { def mv_name -> + def part_res = sql """show partitions from ${mv_name}""" + return part_res.size() + } + + def localWaitingMTMVTaskFinished = { def jobName -> + Thread.sleep(2000); + String showTasks = "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where JobName = '${jobName}' order by CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(4) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL')) + if (status != "SUCCESS") { + logger.info("status is not success") + } + } + + def mv_name_list = ["${mv_prefix}_mv1", "${mv_prefix}_mv2_1", "${mv_prefix}_mv2_2", "${mv_prefix}_mv3", "${mv_prefix}_mv4_1", "${mv_prefix}_mv4_2", "${mv_prefix}_mv5", "${mv_prefix}_mv6", "${mv_prefix}_mv7_1", "${mv_prefix}_mv7_2"] + def mv_part = [3, 3, 3, 3, 3, 3, 1, 1, 1, 1] + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + def job_name = getJobName(db, mv_name_list[i]) + waitingMTMVTaskFinished(job_name) + assertEquals(get_part(mv_name_list[i]), mv_part[i]) + } + + sql """alter table lineitem_range_date add partition p4 values [("2023-11-01"), ("2023-11-02"));""" + sql """insert into lineitem_range_date values + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-01')""" + + mv_part = [4, 4, 4, 4, 4, 4, 2, 2, 1, 1] + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + def job_name = getJobName(db, mv_name_list[i]) + waitingMTMVTaskFinished(job_name) + assertEquals(get_part(mv_name_list[i]), mv_part[i]) + } + + sql """alter table lineitem_range_date add partition p5 values [("2023-11-02"), ("2023-12-02"));""" + sql """insert into lineitem_range_date values + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-02')""" + + mv_part = [5, -1, 5, -1, -1, -1, -1, -1, 1, 1] + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + def job_name = getJobName(db, mv_name_list[i]) + if (i in [1, 3, 4, 5, 6, 7]) { + localWaitingMTMVTaskFinished(job_name) + def mv_task = sql "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where JobName = '${job_name}' order by CreateTime DESC" + logger.info("mv_task: " + mv_task) + assertEquals("FAILED", mv_task[0][4]) + } else { + waitingMTMVTaskFinished(job_name) + assertEquals(get_part(mv_name_list[i]), mv_part[i]) + } + } + + sql """alter table lineitem_range_date add partition p6 values [("2023-12-02"), ("2024-12-02"));""" + sql """insert into lineitem_range_date values + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2024-12-01')""" + + mv_part = [6, -1, 6, -1, -1, -1, -1, -1, -1, -1] + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + if (i in [1, 3, 4, 5, 6, 7, 8, 9]) { + def job_name = getJobName(db, mv_name_list[i]) + localWaitingMTMVTaskFinished(job_name) + def mv_task = sql "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where JobName = '${job_name}' order by CreateTime DESC" + assertEquals("FAILED", mv_task[0][4]) + } else { + def job_name = getJobName(db, mv_name_list[i]) + waitingMTMVTaskFinished(job_name) + assertEquals(get_part(mv_name_list[i]), mv_part[i]) + } + } + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up_rewrite.groovy b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up_rewrite.groovy new file mode 100644 index 00000000000000..8bf629d93841fb --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_date_part_up_rewrite.groovy @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("mtmv_range_date_part_up_rewrite") { + + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql "SET enable_materialized_view_rewrite=true" + sql "SET enable_materialized_view_nest_rewrite=true" + sql "SET enable_materialized_view_union_rewrite=true" + sql "SET enable_nereids_timeout = false" + String mv_prefix = "range_date_up_union" + + sql """ + drop table if exists lineitem_range_date_union + """ + + sql """CREATE TABLE `lineitem_range_date_union` ( + `l_orderkey` BIGINT NULL, + `l_linenumber` INT NULL, + `l_partkey` INT NULL, + `l_suppkey` INT NULL, + `l_quantity` DECIMAL(15, 2) NULL, + `l_extendedprice` DECIMAL(15, 2) NULL, + `l_discount` DECIMAL(15, 2) NULL, + `l_tax` DECIMAL(15, 2) NULL, + `l_returnflag` VARCHAR(1) NULL, + `l_linestatus` VARCHAR(1) NULL, + `l_commitdate` DATE NULL, + `l_receiptdate` DATE NULL, + `l_shipinstruct` VARCHAR(25) NULL, + `l_shipmode` VARCHAR(10) NULL, + `l_comment` VARCHAR(44) NULL, + `l_shipdate` DATE not NULL + ) ENGINE=OLAP + DUPLICATE KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey ) + COMMENT 'OLAP' + partition by range (`l_shipdate`) ( + partition p1 values [("2023-10-29"), ("2023-10-30")), + partition p2 values [("2023-10-30"), ("2023-10-31")), + partition p3 values [("2023-10-31"), ("2023-11-01"))) + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """ + drop table if exists orders_range_date_union + """ + + sql """CREATE TABLE `orders_range_date_union` ( + `o_orderkey` BIGINT NULL, + `o_custkey` INT NULL, + `o_orderstatus` VARCHAR(1) NULL, + `o_totalprice` DECIMAL(15, 2) NULL, + `o_orderpriority` VARCHAR(15) NULL, + `o_clerk` VARCHAR(15) NULL, + `o_shippriority` INT NULL, + `o_comment` VARCHAR(79) NULL, + `o_orderdate` DATE not NULL + ) ENGINE=OLAP + DUPLICATE KEY(`o_orderkey`, `o_custkey`) + COMMENT 'OLAP' + partition by range (`o_orderdate`) ( + partition p1 values [("2023-10-29"), ("2023-10-30")), + partition p2 values [("2023-10-30"), ("2023-10-31")), + partition p3 values [("2023-10-31"), ("2023-11-01")), + partition p4 values [("2023-11-01"), ("2023-11-02")), + partition p5 values [("2023-11-02"), ("2023-11-03"))) + DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """ + insert into lineitem_range_date_union values + (null, 1, 2, 3, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (3, 3, null, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx', '2023-10-31'), + (1, 2, 3, null, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'), + (2, 3, 2, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-30'), + (3, 1, 1, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', null, 'c', 'd', 'xxxxxxxxx', '2023-10-31'), + (1, 3, 2, 2, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29'); + """ + + sql """ + insert into orders_range_date_union values + (null, 1, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-29'), + (1, null, 'o', 109.2, 'c','d',2, 'mm', '2023-10-29'), + (3, 3, null, 99.5, 'a', 'b', 1, 'yy', '2023-10-30'), + (1, 2, 'o', null, 'a', 'b', 1, 'yy', '2023-11-01'), + (2, 3, 'k', 109.2, null,'d',2, 'mm', '2023-11-02'), + (3, 1, 'k', 99.5, 'a', null, 1, 'yy', '2023-11-02'), + (1, 3, 'o', 99.5, 'a', 'b', null, 'yy', '2023-10-31'), + (2, 1, 'o', 109.2, 'c','d',2, null, '2023-10-30'), + (3, 2, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-29'), + (4, 5, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-31'); + """ + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate, l_orderkey from lineitem_range_date_union as t1 left join orders_range_date_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv2;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv2 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'hour') as col1, l_shipdate, l_orderkey from lineitem_range_date_union as t1 left join orders_range_date_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey;""" + + def sql1 = """select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate, l_orderkey from lineitem_range_date_union as t1 left join orders_range_date_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey""" + def sql2 = """select date_trunc(`l_shipdate`, 'hour') as col1, l_shipdate, l_orderkey from lineitem_range_date_union as t1 left join orders_range_date_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey""" + + def localWaitingMTMVTaskFinished = { def jobName -> + Thread.sleep(2000); + String showTasks = "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where JobName = '${jobName}' order by CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(4) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL')) + if (status != "SUCCESS") { + logger.info("status is not success") + } + } + + def compare_res = { def stmt -> + sql "SET enable_materialized_view_rewrite=false" + def origin_res = sql stmt + logger.info("origin_res: " + origin_res) + sql "SET enable_materialized_view_rewrite=true" + def mv_origin_res = sql stmt + logger.info("mv_origin_res: " + mv_origin_res) + assertTrue((mv_origin_res == [] && origin_res == []) || (mv_origin_res.size() == origin_res.size())) + for (int row = 0; row < mv_origin_res.size(); row++) { + assertTrue(mv_origin_res[row].size() == origin_res[row].size()) + for (int col = 0; col < mv_origin_res[row].size(); col++) { + assertTrue(mv_origin_res[row][col] == origin_res[row][col]) + } + } + } + + def query_stmt_list = [sql1, sql2] + def mv_name_list = ["${mv_prefix}_mv1", "${mv_prefix}_mv2"] + for (int i = 0; i < mv_name_list.size(); i++) { + def job_name = getJobName(db, mv_name_list[i]) + waitingMTMVTaskFinished(job_name) + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + + sql """alter table lineitem_range_date_union add partition p4 values [("2023-11-01"), ("2023-11-02"));""" + sql """insert into lineitem_range_date_union values + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-01')""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + sql """insert into lineitem_range_date_union values + (2, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-01');""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + sql """ALTER TABLE lineitem_range_date_union DROP PARTITION IF EXISTS p4 FORCE""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_datetime_part_up_rewrite.groovy b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_datetime_part_up_rewrite.groovy new file mode 100644 index 00000000000000..b7e9562249d789 --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/create_part_and_up/range_datetime_part_up_rewrite.groovy @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("mtmv_range_datetime_part_up_rewrite") { + + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "SET enable_nereids_planner=true" + sql "SET enable_fallback_to_original_planner=false" + sql "SET enable_materialized_view_rewrite=true" + sql "SET enable_materialized_view_nest_rewrite=true" + sql "SET enable_materialized_view_union_rewrite=true" + sql "SET enable_nereids_timeout = false" + String mv_prefix = "range_datetime_up_union" + + sql """ + drop table if exists lineitem_range_datetime_union + """ + + sql """CREATE TABLE `lineitem_range_datetime_union` ( + `l_orderkey` BIGINT NULL, + `l_linenumber` INT NULL, + `l_partkey` INT NULL, + `l_suppkey` INT NULL, + `l_quantity` DECIMAL(15, 2) NULL, + `l_extendedprice` DECIMAL(15, 2) NULL, + `l_discount` DECIMAL(15, 2) NULL, + `l_tax` DECIMAL(15, 2) NULL, + `l_returnflag` VARCHAR(1) NULL, + `l_linestatus` VARCHAR(1) NULL, + `l_commitdate` DATE NULL, + `l_receiptdate` DATE NULL, + `l_shipinstruct` VARCHAR(25) NULL, + `l_shipmode` VARCHAR(10) NULL, + `l_comment` VARCHAR(44) NULL, + `l_shipdate` DATEtime not NULL + ) ENGINE=OLAP + DUPLICATE KEY(l_orderkey, l_linenumber, l_partkey, l_suppkey ) + COMMENT 'OLAP' + partition by range (`l_shipdate`) ( + partition p1 values [("2023-10-29 00:00:00"), ("2023-10-29 01:00:00")), + partition p2 values [("2023-10-29 01:00:00"), ("2023-10-29 02:00:00")), + partition p3 values [("2023-10-29 02:00:00"), ("2023-10-29 03:00:00"))) + DISTRIBUTED BY HASH(`l_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """ + drop table if exists orders_range_datetime_union + """ + + sql """CREATE TABLE `orders_range_datetime_union` ( + `o_orderkey` BIGINT NULL, + `o_custkey` INT NULL, + `o_orderstatus` VARCHAR(1) NULL, + `o_totalprice` DECIMAL(15, 2) NULL, + `o_orderpriority` VARCHAR(15) NULL, + `o_clerk` VARCHAR(15) NULL, + `o_shippriority` INT NULL, + `o_comment` VARCHAR(79) NULL, + `o_orderdate` DATEtime not NULL + ) ENGINE=OLAP + DUPLICATE KEY(`o_orderkey`, `o_custkey`) + COMMENT 'OLAP' + partition by range (`o_orderdate`) ( + partition p1 values [("2023-10-29 00:00:00"), ("2023-10-29 01:00:00")), + partition p2 values [("2023-10-29 01:00:00"), ("2023-10-29 02:00:00")), + partition p3 values [("2023-10-29 02:00:00"), ("2023-10-29 03:00:00")), + partition p4 values [("2023-10-29 03:00:00"), ("2023-10-29 04:00:00")), + partition p5 values [("2023-10-29 04:00:00"), ("2023-10-29 05:00:00"))) + DISTRIBUTED BY HASH(`o_orderkey`) BUCKETS 96 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + + sql """ + insert into lineitem_range_datetime_union values + (null, 1, 2, 3, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29 00:00:00'), + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29 00:00:00'), + (3, 3, null, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx', '2023-10-29 02:00:00'), + (1, 2, 3, null, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29 00:00:00'), + (2, 3, 2, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', null, '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-29 01:00:00'), + (3, 1, 1, 2, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', null, 'c', 'd', 'xxxxxxxxx', '2023-10-29 02:00:00'), + (1, 3, 2, 2, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-29 00:00:00'); + """ + + sql """ + insert into orders_range_datetime_union values + (null, 1, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-29 00:00:00'), + (1, null, 'o', 109.2, 'c','d',2, 'mm', '2023-10-29 00:00:00'), + (3, 3, null, 99.5, 'a', 'b', 1, 'yy', '2023-10-29 01:00:00'), + (1, 2, 'o', null, 'a', 'b', 1, 'yy', '2023-10-29 03:00:00'), + (2, 3, 'k', 109.2, null,'d',2, 'mm', '2023-10-29 04:00:00'), + (3, 1, 'k', 99.5, 'a', null, 1, 'yy', '2023-10-29 04:00:00'), + (1, 3, 'o', 99.5, 'a', 'b', null, 'yy', '2023-10-29 02:00:00'), + (2, 1, 'o', 109.2, 'c','d',2, null, '2023-10-29 01:00:00'), + (3, 2, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-29 00:00:00'), + (4, 5, 'k', 99.5, 'a', 'b', 1, 'yy', '2023-10-29 02:00:00'); + """ + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv1;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv1 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate, l_orderkey from lineitem_range_datetime_union as t1 left join orders_range_datetime_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey;""" + + sql """DROP MATERIALIZED VIEW if exists ${mv_prefix}_mv2;""" + sql """CREATE MATERIALIZED VIEW ${mv_prefix}_mv2 BUILD IMMEDIATE REFRESH AUTO ON MANUAL partition by(date_trunc(`col1`, 'month')) DISTRIBUTED BY RANDOM BUCKETS 2 PROPERTIES ('replication_num' = '1') AS + select date_trunc(`l_shipdate`, 'hour') as col1, l_shipdate, l_orderkey from lineitem_range_datetime_union as t1 left join orders_range_datetime_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey;""" + + def sql1 = """select date_trunc(`l_shipdate`, 'day') as col1, l_shipdate, l_orderkey from lineitem_range_datetime_union as t1 left join orders_range_datetime_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey""" + def sql2 = """select date_trunc(`l_shipdate`, 'hour') as col1, l_shipdate, l_orderkey from lineitem_range_datetime_union as t1 left join orders_range_datetime_union as t2 on t1.l_orderkey = t2.o_orderkey group by col1, l_shipdate, l_orderkey""" + + def localWaitingMTMVTaskFinished = { def jobName -> + Thread.sleep(2000); + String showTasks = "select TaskId,JobId,JobName,MvId,Status,MvName,MvDatabaseName,ErrorMsg from tasks('type'='mv') where JobName = '${jobName}' order by CreateTime ASC" + String status = "NULL" + List> result + long startTime = System.currentTimeMillis() + long timeoutTimestamp = startTime + 5 * 60 * 1000 // 5 min + do { + result = sql(showTasks) + logger.info("result: " + result.toString()) + if (!result.isEmpty()) { + status = result.last().get(4) + } + logger.info("The state of ${showTasks} is ${status}") + Thread.sleep(1000); + } while (timeoutTimestamp > System.currentTimeMillis() && (status == 'PENDING' || status == 'RUNNING' || status == 'NULL')) + if (status != "SUCCESS") { + logger.info("status is not success") + } + } + + def compare_res = { def stmt -> + sql "SET enable_materialized_view_rewrite=false" + def origin_res = sql stmt + logger.info("origin_res: " + origin_res) + sql "SET enable_materialized_view_rewrite=true" + def mv_origin_res = sql stmt + logger.info("mv_origin_res: " + mv_origin_res) + assertTrue((mv_origin_res == [] && origin_res == []) || (mv_origin_res.size() == origin_res.size())) + for (int row = 0; row < mv_origin_res.size(); row++) { + assertTrue(mv_origin_res[row].size() == origin_res[row].size()) + for (int col = 0; col < mv_origin_res[row].size(); col++) { + assertTrue(mv_origin_res[row][col] == origin_res[row][col]) + } + } + } + + def query_stmt_list = [sql1, sql2] + def mv_name_list = ["${mv_prefix}_mv1", "${mv_prefix}_mv2"] + for (int i = 0; i < mv_name_list.size(); i++) { + def job_name = getJobName(db, mv_name_list[i]) + waitingMTMVTaskFinished(job_name) + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + + sql """alter table lineitem_range_datetime_union add partition p4 values [("2023-11-29 03:00:00"), ("2023-11-29 04:00:00"));""" + sql """insert into lineitem_range_datetime_union values + (1, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-29 03:00:00')""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + sql """insert into lineitem_range_datetime_union values + (3, null, 3, 1, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-11-29 03:00:00');""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + sql """ALTER TABLE lineitem_range_datetime_union DROP PARTITION IF EXISTS p4 FORCE""" + for (int i = 0; i < mv_name_list.size(); i++) { + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + + for (int i = 0; i < mv_name_list.size(); i++) { + sql """refresh MATERIALIZED VIEW ${mv_name_list[i]} auto;""" + mv_rewrite_success(query_stmt_list[i], mv_name_list[i]) + compare_res(query_stmt_list[i] + " order by 1,2,3") + } + +} diff --git a/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite.groovy b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite.groovy index 2b00c8200d2589..cbeb6768edec1a 100644 --- a/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite.groovy +++ b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite.groovy @@ -141,7 +141,6 @@ suite ("partition_curd_union_rewrite") { ${mv_def_sql} """ - def compare_res = { def stmt -> sql "SET enable_materialized_view_rewrite=false" def origin_res = sql stmt @@ -171,7 +170,6 @@ suite ("partition_curd_union_rewrite") { is_partition_statistics_ready(db, ["lineitem", "orders", mv_name])) compare_res(partition_sql + order_by_stmt) - /* // Part partition is invalid, test can not use partition 2023-10-17 to rewrite sql """ insert into lineitem values @@ -179,15 +177,9 @@ suite ("partition_curd_union_rewrite") { """ // wait partition is invalid sleep(5000) - explain { - sql("${all_partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(all_partition_sql, mv_name) compare_res(all_partition_sql + order_by_stmt) - explain { - sql("${partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(partition_sql, mv_name) compare_res(partition_sql + order_by_stmt) sql "REFRESH MATERIALIZED VIEW ${mv_name} AUTO" @@ -199,15 +191,9 @@ suite ("partition_curd_union_rewrite") { """ // Wait partition is invalid sleep(5000) - explain { - sql("${all_partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(all_partition_sql, mv_name) compare_res(all_partition_sql + order_by_stmt) - explain { - sql("${partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(partition_sql, mv_name) compare_res(partition_sql + order_by_stmt) // Test when base table delete partition test @@ -217,15 +203,8 @@ suite ("partition_curd_union_rewrite") { """ // Wait partition is invalid sleep(3000) - explain { - sql("${all_partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(all_partition_sql, mv_name) compare_res(all_partition_sql + order_by_stmt) - explain { - sql("${partition_sql}") - contains("${mv_name}(${mv_name})") - } + mv_rewrite_success(partition_sql, mv_name) compare_res(partition_sql + order_by_stmt) - */ } diff --git a/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite_hive.groovy b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite_hive.groovy new file mode 100644 index 00000000000000..14170bf21f40fa --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/mv/union_rewrite/partition_curd_union_rewrite_hive.groovy @@ -0,0 +1,256 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite ("partition_curd_union_rewrite_hive") { + String enabled = context.config.otherConfigs.get("enableHiveTest") + if (enabled == null || !enabled.equalsIgnoreCase("true")) { + logger.info("diable Hive test.") + return + } + + sql """SET materialized_view_rewrite_enable_contain_external_table = true;""" + + def create_mv = { mv_name, mv_sql -> + sql """DROP MATERIALIZED VIEW IF EXISTS ${mv_name};""" + sql """DROP TABLE IF EXISTS ${mv_name}""" + sql""" + CREATE MATERIALIZED VIEW ${mv_name} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + partition by(l_shipdate) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + ${mv_sql} + """ + } + def compare_res = { def stmt -> + def mark = true + sql "SET materialized_view_rewrite_enable_contain_external_table=false" + def origin_res = sql stmt + logger.info("origin_res: " + origin_res) + sql "SET materialized_view_rewrite_enable_contain_external_table=true" + def mv_origin_res = sql stmt + logger.info("mv_origin_res: " + mv_origin_res) + if (!((mv_origin_res == [] && origin_res == []) || (mv_origin_res.size() == origin_res.size()))) { + mark = false + return mark + } + assertTrue((mv_origin_res == [] && origin_res == []) || (mv_origin_res.size() == origin_res.size())) + for (int row = 0; row < mv_origin_res.size(); row++) { + if (!(mv_origin_res[row].size() == origin_res[row].size())) { + mark = false + return mark + } + assertTrue(mv_origin_res[row].size() == origin_res[row].size()) + for (int col = 0; col < mv_origin_res[row].size(); col++) { + if (!(mv_origin_res[row][col] == origin_res[row][col])) { + mark = false + return mark + } + assertTrue(mv_origin_res[row][col] == origin_res[row][col]) + } + } + return mark + } + + + String db = context.config.getDbNameByFile(context.file) + String ctl = "partition_curd_union_hive" + for (String hivePrefix : ["hive2", "hive3"]) { + String hms_port = context.config.otherConfigs.get(hivePrefix + "HmsPort") + String hdfs_port = context.config.otherConfigs.get(hivePrefix + "HdfsPort") + String catalog_name = ctl + "_" + hivePrefix + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") + + sql """drop catalog if exists ${catalog_name}""" + sql """create catalog if not exists ${catalog_name} properties ( + 'type'='hms', + 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}', + 'fs.defaultFS' = 'hdfs://${externalEnvIp}:${hdfs_port}', + 'use_meta_cache' = 'true' + );""" + sql """switch ${catalog_name}""" + sql """create database if not exists ${db}""" + sql """use `${db}`""" + String orders_tb_name = catalog_name + "_orders" + String lineitem_tb_name = catalog_name + "_lineitem" + def mv_name = catalog_name + "_test_mv" + + sql """drop table if exists ${orders_tb_name}""" + sql """CREATE TABLE IF NOT EXISTS ${orders_tb_name} ( + o_orderkey int, + o_custkey int, + o_orderstatus VARCHAR(1), + o_totalprice DECIMAL(15, 2), + o_orderpriority VARCHAR(15), + o_clerk VARCHAR(15), + o_shippriority int, + o_comment VARCHAR(15), + o_orderdate date + ) + ENGINE=hive + PARTITION BY LIST (`o_orderdate`) () + PROPERTIES ( + "replication_num" = "1", + 'file_format'='orc' + ); + """ + + sql """ + insert into ${orders_tb_name} values + (1, 1, 'o', 99.5, 'a', 'b', 1, 'yy', '2023-10-17'), + (2, 2, 'k', 109.2, 'c','d',2, 'mm', '2023-10-18'), + (3, 3, 'o', 99.5, 'a', 'b', 1, 'yy', '2023-10-19'); + """ + + sql """drop table if exists ${lineitem_tb_name}""" + sql""" + CREATE TABLE IF NOT EXISTS ${lineitem_tb_name} ( + l_orderkey INT, + l_partkey INT, + l_suppkey INT, + l_linenumber INT, + l_quantity DECIMAL(15, 2), + l_extendedprice DECIMAL(15, 2), + l_discount DECIMAL(15, 2), + l_tax DECIMAL(15, 2), + l_returnflag VARCHAR(1), + l_linestatus VARCHAR(1), + l_commitdate date, + l_receiptdate date, + l_shipinstruct VARCHAR(10), + l_shipmode VARCHAR(10), + l_comment VARCHAR(44), + l_shipdate date + ) ENGINE=hive + PARTITION BY LIST (`l_shipdate`) () + PROPERTIES ( + "replication_num" = "1", + 'file_format'='orc' + ); + """ + + sql """ + insert into ${lineitem_tb_name} values + (1, 2, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-17'), + (2, 2, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-18', '2023-10-18', 'a', 'b', 'yyyyyyyyy', '2023-10-18'), + (3, 2, 3, 6, 7.5, 8.5, 9.5, 10.5, 'k', 'o', '2023-10-19', '2023-10-19', 'c', 'd', 'xxxxxxxxx', '2023-10-19'); + """ + + sql """switch internal""" + sql """create database if not exists ${db}""" + sql """use ${db}""" + + def mv_def_sql = """ + select l_shipdate, o_orderdate, l_partkey, + l_suppkey, sum(o_totalprice) as sum_total + from ${catalog_name}.${db}.${lineitem_tb_name} + left join ${catalog_name}.${db}.${orders_tb_name} on l_orderkey = o_orderkey and l_shipdate = o_orderdate + group by + l_shipdate, + o_orderdate, + l_partkey, + l_suppkey + """ + + def all_partition_sql = """ + select l_shipdate, o_orderdate, l_partkey, l_suppkey, sum(o_totalprice) as sum_total + from ${catalog_name}.${db}.${lineitem_tb_name} as t1 + left join ${catalog_name}.${db}.${orders_tb_name} as t2 on l_orderkey = o_orderkey and l_shipdate = o_orderdate + group by + l_shipdate, + o_orderdate, + l_partkey, + l_suppkey + """ + + def partition_sql = """ + select l_shipdate, o_orderdate, l_partkey, l_suppkey, sum(o_totalprice) as sum_total + from ${catalog_name}.${db}.${lineitem_tb_name} as t1 + left join ${catalog_name}.${db}.${orders_tb_name} as t2 on l_orderkey = o_orderkey and l_shipdate = o_orderdate + where (l_shipdate>= '2023-10-18' and l_shipdate <= '2023-10-19') + group by + l_shipdate, + o_orderdate, + l_partkey, + l_suppkey + """ + + sql """DROP MATERIALIZED VIEW IF EXISTS ${mv_name}""" + sql """DROP TABLE IF EXISTS ${mv_name}""" + sql """ + CREATE MATERIALIZED VIEW ${mv_name} + BUILD IMMEDIATE REFRESH AUTO ON MANUAL + partition by(l_shipdate) + DISTRIBUTED BY RANDOM BUCKETS 2 + PROPERTIES ('replication_num' = '1') + AS + ${mv_def_sql} + """ + + def order_by_stmt = " order by 1,2,3,4,5" + waitingMTMVTaskFinished(getJobName(db, mv_name)) + + // All partition is valid, test query and rewrite by materialized view + mv_rewrite_success(all_partition_sql, mv_name) + compare_res(all_partition_sql + order_by_stmt) + mv_rewrite_success(partition_sql, mv_name) + compare_res(partition_sql + order_by_stmt) + + // Part partition is invalid, test can not use partition 2023-10-17 to rewrite + sql """ + insert into ${catalog_name}.${db}.${lineitem_tb_name} values + (1, 2, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-17', '2023-10-17', 'a', 'b', 'yyyyyyyyy', '2023-10-17'); + """ + // wait partition is invalid + sleep(5000) + mv_rewrite_success(all_partition_sql, mv_name) + assertTrue(compare_res(all_partition_sql + order_by_stmt) == false) + mv_rewrite_success(partition_sql, mv_name) + assertTrue(compare_res(all_partition_sql + order_by_stmt) == false) + + + sql "REFRESH MATERIALIZED VIEW ${mv_name} AUTO" + waitingMTMVTaskFinished(getJobName(db, mv_name)) + mv_rewrite_success(all_partition_sql, mv_name) + compare_res(all_partition_sql + order_by_stmt) + mv_rewrite_success(partition_sql, mv_name) + compare_res(partition_sql + order_by_stmt) + + + // Test when base table create partition + sql """ + insert into ${catalog_name}.${db}.${lineitem_tb_name} values + (1, 2, 3, 4, 5.5, 6.5, 7.5, 8.5, 'o', 'k', '2023-10-21', '2023-10-21', 'a', 'b', 'yyyyyyyyy', '2023-10-21'); + """ + // Wait partition is invalid + sleep(5000) + mv_rewrite_success(all_partition_sql, mv_name) + assertTrue(compare_res(all_partition_sql + order_by_stmt) == false) + mv_rewrite_success(partition_sql, mv_name) + compare_res(partition_sql + order_by_stmt) + + // Test when base table delete partition test + sql "REFRESH MATERIALIZED VIEW ${mv_name} AUTO" + waitingMTMVTaskFinished(getJobName(db, mv_name)) + mv_rewrite_success(all_partition_sql, mv_name) + compare_res(all_partition_sql + order_by_stmt) + mv_rewrite_success(partition_sql, mv_name) + compare_res(partition_sql + order_by_stmt) + + } +} diff --git a/regression-test/suites/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.groovy b/regression-test/suites/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.groovy new file mode 100644 index 00000000000000..3eee499b2d865f --- /dev/null +++ b/regression-test/suites/nereids_rules_p0/pull_up_join_from_union/pull_up_join_from_union.groovy @@ -0,0 +1,429 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("join_pull_up_union") { + sql "set runtime_filter_mode=OFF" + sql """ SET inline_cte_referenced_threshold=0 """ + sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" + sql "SET disable_join_reorder=true" + + sql """ + -- Create tables + DROP TABLE IF EXISTS table_a; + CREATE TABLE table_a ( + id INT, + name VARCHAR(50), + value VARCHAR(50) + )ENGINE = OLAP + DISTRIBUTED BY HASH(id) BUCKETS 4 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + DROP TABLE IF EXISTS table_b; + CREATE TABLE table_b ( + id INT, + name VARCHAR(50), + value VARCHAR(50) + )ENGINE = OLAP + DISTRIBUTED BY HASH(id) BUCKETS 4 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + DROP TABLE IF EXISTS table_c; + CREATE TABLE table_c ( + id INT, + name VARCHAR(50), + value VARCHAR(50), + value1 VARCHAR(50), + value2 VARCHAR(50) + )ENGINE = OLAP + DISTRIBUTED BY HASH(id) BUCKETS 4 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + DROP TABLE IF EXISTS table_d; + CREATE TABLE table_d ( + id INT, + name VARCHAR(50), + value VARCHAR(50) + )ENGINE = OLAP + DISTRIBUTED BY HASH(id) BUCKETS 4 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + sql """INSERT INTO table_a (id, name, value) VALUES + (1, 'Alice', 'Value_A1'), + (2, 'Bob', 'Value_A2'), + (3, 'Charlie', 'Value_A3'), + (5, 'Eva', 'Value_A5');""" + + sql """INSERT INTO table_b (id, name, value) VALUES + (1, 'Alice', 'Value_B1'), + (2, 'Bob', 'Value_B2'), + (4, 'Daniel', 'Value_B4'), + (6, 'Fiona', 'Value_B6');""" + + sql """INSERT INTO table_c (id, name, value, value1, value2) VALUES + (1, 'Alice', 'Value_C1', 'Extra_C1_1', 'Extra_C1_2'), + (3, 'Charlie', 'Value_C3', 'Extra_C3_1', 'Extra_C3_2'), + (4, 'Daniel', 'Value_C4', 'Extra_C4_1', 'Extra_C4_2'), + (7, 'Grace', 'Value_C7', 'Extra_C7_1', 'Extra_C7_2');""" + + sql """INSERT INTO table_d (id, name, value) VALUES + (1, 'Alice', 'Value_D1'), + (2, 'Bob', 'Value_D2'), + (3, 'Charlie', 'Value_D3'), + (8, 'Henry', 'Value_D8');""" + + + // Simple case with two tables joined in a union + qt_basic_join_union """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Three-way union with common join + qt_three_way_union """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id + UNION ALL + SELECT a.id, a.name, d.value FROM table_a a JOIN table_d d ON a.id = d.id) t + """ + + // Union with projections above joins + qt_union_with_projections """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, UPPER(b.value) AS upper_value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, LOWER(c.value) AS lower_value FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with constant expressions + qt_union_with_constants """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with loss slots + qt_union_with_loss_slots """ + explain shape plan + SELECT t.id FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with different join conditions + qt_different_join_conditions """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.name = c.name) t + """ + + // Union with multi-column join conditions + qt_multi_column_join """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id AND a.name = b.name + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id AND a.name = c.name) t + """ + + // Union with other joins + qt_left_joins """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a LEFT JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a LEFT JOIN table_c c ON a.id = c.id) t + """ + + // Union with subqueries in join + qt_subquery_join """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN (SELECT id, MAX(value) AS value FROM table_b GROUP BY id) b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN (SELECT id, MAX(value) AS value FROM table_c GROUP BY id) c ON a.id = c.id) t + """ + + // Union with complex expressions in join condition + qt_complex_join_condition1 """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON CAST(a.id AS INT) + 1 = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON CAST(a.id AS INT) + 1 = c.id) t + """ + + // Union with complex expressions in join condition + qt_complex_join_condition2 """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON CAST(a.id AS INT) + 1 = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON CAST(a.id AS DOUBLE) + 1 = c.id) t + """ + + qt_union_filter1 """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id where a.id = 1 + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id where a.id = 1) t + """ + + qt_union_filter2 """ + explain shape plan + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id where a.value = 1 + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id where a.value = 1) t + """ + + + // Simple case with two tables joined in a union + order_qt_basic_join_union_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Three-way union with common join + order_qt_three_way_union_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id + UNION ALL + SELECT a.id, a.name, d.value FROM table_a a JOIN table_d d ON a.id = d.id) t + """ + + // Union with projections above joins + order_qt_union_with_projections_res """ + SELECT * FROM + (SELECT a.id, a.name, UPPER(b.value) AS upper_value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, LOWER(c.value) AS lower_value FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with constant expressions + order_qt_union_with_constants_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with loss slots + order_qt_union_with_loss_slots_res """ + SELECT t.id FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id) t + """ + + // Union with different join conditions + order_qt_different_join_conditions_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.name = c.name) t + """ + + // Union with multi-column join conditions + order_qt_multi_column_join_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON a.id = b.id AND a.name = b.name + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON a.id = c.id AND a.name = c.name) t + """ + + // Union with other joins + order_qt_left_joins_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a LEFT JOIN table_b b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a LEFT JOIN table_c c ON a.id = c.id) t + """ + + // Union with subqueries in join + order_qt_subquery_join_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN (SELECT id, MAX(value) AS value FROM table_b GROUP BY id) b ON a.id = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN (SELECT id, MAX(value) AS value FROM table_c GROUP BY id) c ON a.id = c.id) t + """ + + // Union with complex expressions in join condition + order_qt_complex_join_condition1_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON CAST(a.id AS INT) + 1 = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON CAST(a.id AS INT) + 1 = c.id) t + """ + + // Union with complex expressions in join condition + order_qt_complex_join_condition2_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value FROM table_a a JOIN table_b b ON CAST(a.id AS INT) + 1 = b.id + UNION ALL + SELECT a.id, a.name, c.value FROM table_a a JOIN table_c c ON CAST(a.id AS DOUBLE) + 1 = c.id) t + """ + + order_qt_union_filter1_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id where a.id = 1 + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id where a.id = 1) t + """ + + order_qt_union_filter2_res """ + SELECT * FROM + (SELECT a.id, a.name, b.value, 'B' AS source FROM table_a a JOIN table_b b ON a.id = b.id where a.value = 1 + UNION ALL + SELECT a.id, a.name, c.value, 'C' AS source FROM table_a a JOIN table_c c ON a.id = c.id where a.value = 1) t + """ + + sql """drop table if exists test_like1""" + sql """CREATE TABLE `test_like1` ( + `a` INT NULL, + `b` VARCHAR(10) NULL, + `c` INT NULL, + `d` INT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`a`, `b`) + DISTRIBUTED BY RANDOM BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql """drop table if exists test_like2""" + sql """CREATE TABLE `test_like2` ( + `a` INT NULL, + `b` VARCHAR(10) NULL, + `c` INT NULL, + `d` INT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`a`, `b`) + DISTRIBUTED BY RANDOM BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql """drop table if exists test_like3""" + sql """CREATE TABLE `test_like3` ( + `a` INT NULL, + `b` VARCHAR(10) NULL, + `c` INT NULL, + `d` INT NULL + ) ENGINE=OLAP + DUPLICATE KEY(`a`, `b`) + DISTRIBUTED BY RANDOM BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql "drop table if exists test_like4" + sql """create table test_like4 (a bigint, b varchar(10), c int, d int) ENGINE=OLAP + DUPLICATE KEY(`a`, `b`) + DISTRIBUTED BY RANDOM BUCKETS AUTO + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + );""" + sql "insert into test_like1 values(100,'d2',3,5),(0,'d2',3,5),(null,null,9,3),(33,'d2',2,5),(null,'d2',3,55),(78,null,9,3),(12,null,9,3);" + sql "insert into test_like2 values(10,'d2',2,2),(0,'d2',2,2),(100,'d2',3,null),(null,null,9,3),(78,'d2',23,5),(33,'d2',23,5);" + sql "insert into test_like3 values(1,'d2',2,2),(33,'d2',99,5),(33,'d2',23,6),(33,'d2',3,5);" + sql "insert into test_like4 values(11,'d2',3,5),(1,'d2',3,5),(79,null,9,3),(33,'d2',2,5),(null,'d2',3,55),(78,null,9,3),(12,null,9,3);" + + qt_expr """select c1,c2 from + (select t2.a+1 c1,t2.c+2 c2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + select t3.a+1,t1.a+2 from test_like1 t1 join test_like3 t3 on t1.a=t3.a) t order by 1,2""" + qt_const """select c1,c2 from + (select t2.a+1 c1,2 c2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.a=t3.a) t order by 1,2""" + + qt_multi_condition """select c1,c2 from ( + select t2.a+1 c1,2 c2 from test_like1 t1 join test_like2 t2 on t1.c=t2.d AND t1.c=t2.d union ALL + select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.c=t3.d AND t1.c=t3.d) t order by 1,2""" + + qt_multi_condition2 """select c1,c2 from ( + select t2.a+1 c1 ,2 c2 from test_like1 t1 join test_like2 t2 on t1.c=t2.d AND t1.c=t2.c union ALL + select t3.a+1,3 from test_like1 t1 join test_like3 t3 on t1.c=t3.d AND t1.c=t3.c) t order by 1,2""" + + qt_multi_differenct_count_condition """select c1,c2 from ( + select t2.a+1 c1,2 c2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a AND t1.a=t2.c union ALL + select t3.a+1 c3,3 c4 from test_like1 t1 join test_like3 t3 on t1.a=t3.c) t order by 1,2""" + qt_no_common_side_project """select c1,c2 from ( + select t2.a+1 c1,t2.c+2 c2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + select t3.a+1,t3.a+2 from test_like1 t1 join test_like3 t3 on t1.a=t3.a) t order by 1,2""" + + qt_common_slot_differnt """select c1,c2 from ( + select t2.a+1 c1,t2.c+2 c2 from test_like1 t1 join test_like2 t2 on t1.a+1=t2.a union ALL + select t3.a+1,t3.c+2 from test_like1 t1 join test_like3 t3 on t1.a=t3.a) t order by 1,2""" + + qt_other_expr_differnt """select c1,c2 from ( + select t2.a+1 c1,t2.c+2 c2 from test_like1 t1 join test_like2 t2 on t1.a=t2.a union ALL + select t3.a+1,t3.c+100 from test_like1 t1 join test_like3 t3 on t1.a=t3.a) t order by 1,2""" + + qt_2_same_tables """select c1,c2 from ( + select t1.a c1,1 c2 from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a + union ALL + select t1.a,t2.a from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a) t order by 1,2""" + + qt_simple_column """select c1,c2 from ( + select t1.a c1,t2.a c2 from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a + union ALL + select t1.a,t2.a from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a) t order by 1,2""" + + qt_func_column """select c1,c2 from ( + select t1.a+1 c1,length(t2.b) c2 from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a + union ALL + select t1.a+1,length(t2.b)+1 from test_like1 t1 inner join test_like3 t2 on t1.a=t2.a) t order by 1,2""" + + qt_other_join_slot_differnt """select c1 from ( + select t1.a+1 c1 from test_like1 t1 inner join test_like2 t2 on t1.a=t2.c + union ALL + select t1.a+1 from test_like1 t1 inner join test_like3 t2 on t1.a=t2.a) t order by 1""" + + qt_join_common_slot_has_expr """select c1 from ( + select t1.a+1 c1 from test_like1 t1 inner join test_like2 t2 on t1.a+1=t2.a + union ALL + select t1.a+1 from test_like1 t1 inner join test_like2 t2 on t1.a+1=t2.a) t order by 1""" + + qt_can_not_transform """select c1,c2 from ( + select t2.c c1,t1.a c2 from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a + union ALL + select t1.a,t2.c from test_like1 t1 inner join test_like2 t2 on t1.a=t2.a) t order by 1,2""" + + qt_other_side_condition_slot_has_expr_do_transform """ + select c1 from ( + select t1.a+1 c1 from test_like4 t1 inner join test_like2 t2 on t1.a=t2.a+2 + union ALL + select t1.a+1 from test_like4 t1 inner join test_like3 t2 on t1.a=t2.a+1) t order by 1""" +} \ No newline at end of file diff --git a/regression-test/suites/nereids_syntax_p0/alias_conflict.groovy b/regression-test/suites/nereids_syntax_p0/alias_conflict.groovy index a6bc70387fed34..ad3a04d9f1d872 100644 --- a/regression-test/suites/nereids_syntax_p0/alias_conflict.groovy +++ b/regression-test/suites/nereids_syntax_p0/alias_conflict.groovy @@ -17,6 +17,9 @@ suite("alias_conflict") { + String s3_endpoint = getS3Endpoint() + String bucket = getS3BucketName() + String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar" sql """ DROP TABLE IF EXISTS `test_alias_conflict1` """ sql """ DROP TABLE IF EXISTS `test_alias_conflict2` """ sql """ DROP TABLE IF EXISTS `test_alias_conflict3` """ @@ -163,7 +166,7 @@ suite("alias_conflict") { 'user'='${context.config.jdbcUser}', 'password'='${context.config.jdbcPassword}', 'jdbc_url' = '${context.config.jdbcUrl}', - 'driver_url' = 'https://repo1.maven.org/maven2/com/mysql/mysql-connector-j/8.4.0/mysql-connector-j-8.4.0.jar', + 'driver_url' = "${driver_url}", 'driver_class' = 'com.mysql.cj.jdbc.Driver' ); """ diff --git a/regression-test/suites/nereids_syntax_p0/distribute/local_shuffle.groovy b/regression-test/suites/nereids_syntax_p0/distribute/local_shuffle.groovy index 950b6171c7ca84..d701ad890d68b6 100644 --- a/regression-test/suites/nereids_syntax_p0/distribute/local_shuffle.groovy +++ b/regression-test/suites/nereids_syntax_p0/distribute/local_shuffle.groovy @@ -45,7 +45,7 @@ suite("local_shuffle") { insert into test_local_shuffle1 values (1, 1), (2, 2); insert into test_local_shuffle2 values (2, 2), (3, 3); - set enable_nereids_distribute_planner=true; + // set enable_nereids_distribute_planner=true; set enable_pipeline_x_engine=true; set disable_join_reorder=true; set enable_local_shuffle=true; diff --git a/regression-test/suites/nereids_syntax_p0/slow_fold_constant_case_when.groovy b/regression-test/suites/nereids_syntax_p0/slow_fold_constant_case_when.groovy new file mode 100644 index 00000000000000..eb8d9be0bafbbf --- /dev/null +++ b/regression-test/suites/nereids_syntax_p0/slow_fold_constant_case_when.groovy @@ -0,0 +1,824 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("slow_fold_constant_case_when") { + sql "drop table if exists slow_fold_constant_case_when" + + sql """ + CREATE TABLE `slow_fold_constant_case_when` ( + `c1` decimal(38,10) NULL, + `c2` decimal(38,10) NULL, + `c3` decimal(38,10) NULL, + `c4` decimal(38,10) NULL, + `c5` decimal(38,10) NULL, + `c6` decimal(38,10) NULL, + `c7` decimal(38,10) NULL, + `c8` decimal(38,10) NULL, + `c9` decimal(38,10) NULL, + `c10` decimal(38,10) NULL, + `c11` decimal(38,10) NULL, + `c12` decimal(38,10) NULL, + `c13` decimal(38,10) NULL, + `c14` decimal(38,10) NULL, + `c15` decimal(38,10) NULL, + `c16` decimal(38,10) NULL, + `c17` decimal(38,10) NULL, + `c18` decimal(38,10) NULL, + `c19` decimal(38,10) NULL, + `c20` decimal(38,10) NULL, + `c21` decimal(38,10) NULL, + `c22` decimal(38,10) NULL, + `c23` decimal(38,10) NULL, + `c24` decimal(38,10) NULL, + `c25` decimal(38,10) NULL, + `c26` decimal(38,10) NULL, + `c27` decimal(38,10) NULL, + `c28` decimal(38,10) NULL, + `c29` decimal(38,10) NULL, + `c30` decimal(38,10) NULL, + `c31` decimal(38,10) NULL, + `c32` decimal(38,10) NULL, + `c33` decimal(38,10) NULL, + `c34` decimal(38,10) NULL, + `c35` decimal(38,10) NULL, + `c36` decimal(38,10) NULL, + `c37` decimal(38,10) NULL, + `c38` decimal(38,10) NULL, + `c39` decimal(38,10) NULL, + `c40` decimal(38,10) NULL, + `c41` decimal(38,10) NULL, + `c42` decimal(38,10) NULL, + `c43` varchar(65532) NULL, + `c44` varchar(65532) NULL, + `c45` varchar(65532) NULL, + `c46` varchar(65532) NULL, + `c47` varchar(65532) NULL, + `c48` varchar(65532) NULL, + `c49` varchar(65532) NULL, + `c50` varchar(65532) NULL, + `c51` varchar(65532) NULL, + `c52` varchar(65532) NULL, + `c53` varchar(65532) NULL, + `c54` varchar(65532) NULL, + `c55` varchar(65532) NULL, + `c56` varchar(65532) NULL, + `c57` varchar(65532) NULL, + `c58` varchar(65532) NULL, + `c59` varchar(65532) NULL, + `c60` varchar(65532) NULL, + `c61` varchar(65532) NULL, + `c62` varchar(65532) NULL, + `c63` varchar(65532) NULL, + `c64` varchar(65532) NULL, + `c65` varchar(65532) NULL, + `c66` varchar(65532) NULL, + `c67` varchar(65532) NULL, + `c68` varchar(65532) NULL, + `c69` varchar(65532) NULL, + `c70` varchar(65532) NULL, + `c71` varchar(65532) NULL, + `c72` varchar(65532) NULL, + `c73` varchar(65532) NULL, + `c74` varchar(65532) NOT NULL, + ) ENGINE=OLAP + DUPLICATE KEY(`c1`, `c2`) + PARTITION BY LIST(`c74`) + (PARTITION P_201901 VALUES IN ("201901"), + PARTITION P_201902 VALUES IN ("201902"), + PARTITION P_201903 VALUES IN ("201903"), + PARTITION P_201904 VALUES IN ("201904"), + PARTITION P_201905 VALUES IN ("201905"), + PARTITION P_201906 VALUES IN ("201906"), + PARTITION P_201907 VALUES IN ("201907"), + PARTITION P_201908 VALUES IN ("201908"), + PARTITION P_201909 VALUES IN ("201909"), + PARTITION P_201910 VALUES IN ("201910"), + PARTITION P_201911 VALUES IN ("201911"), + PARTITION P_201912 VALUES IN ("201912"), + PARTITION P_202000 VALUES IN ("202000"), + PARTITION P_202001 VALUES IN ("202001"), + PARTITION P_202002 VALUES IN ("202002"), + PARTITION P_202003 VALUES IN ("202003"), + PARTITION P_202004 VALUES IN ("202004"), + PARTITION P_202005 VALUES IN ("202005"), + PARTITION P_202006 VALUES IN ("202006"), + PARTITION P_202007 VALUES IN ("202007"), + PARTITION P_202008 VALUES IN ("202008"), + PARTITION P_202009 VALUES IN ("202009"), + PARTITION P_202010 VALUES IN ("202010"), + PARTITION P_202011 VALUES IN ("202011"), + PARTITION P_202012 VALUES IN ("202012"), + PARTITION P_202100 VALUES IN ("202100"), + PARTITION P_202101 VALUES IN ("202101"), + PARTITION P_202102 VALUES IN ("202102"), + PARTITION P_202103 VALUES IN ("202103"), + PARTITION P_202104 VALUES IN ("202104"), + PARTITION P_202105 VALUES IN ("202105"), + PARTITION P_202106 VALUES IN ("202106"), + PARTITION P_202107 VALUES IN ("202107"), + PARTITION P_202108 VALUES IN ("202108"), + PARTITION P_202109 VALUES IN ("202109"), + PARTITION P_202110 VALUES IN ("202110"), + PARTITION P_202111 VALUES IN ("202111"), + PARTITION P_202112 VALUES IN ("202112"), + PARTITION P_202201 VALUES IN ("202201"), + PARTITION P_202202 VALUES IN ("202202"), + PARTITION P_202203 VALUES IN ("202203"), + PARTITION P_202204 VALUES IN ("202204"), + PARTITION P_202205 VALUES IN ("202205"), + PARTITION P_202206 VALUES IN ("202206"), + PARTITION P_202207 VALUES IN ("202207"), + PARTITION P_202208 VALUES IN ("202208"), + PARTITION P_202209 VALUES IN ("202209"), + PARTITION P_202210 VALUES IN ("202210"), + PARTITION P_202211 VALUES IN ("202211"), + PARTITION P_202212 VALUES IN ("202212"), + PARTITION P_202300 VALUES IN ("202300"), + PARTITION P_202301 VALUES IN ("202301"), + PARTITION P_202302 VALUES IN ("202302"), + PARTITION P_202303 VALUES IN ("202303"), + PARTITION P_202304 VALUES IN ("202304"), + PARTITION P_202305 VALUES IN ("202305"), + PARTITION P_202306 VALUES IN ("202306"), + PARTITION P_202307 VALUES IN ("202307"), + PARTITION P_202308 VALUES IN ("202308"), + PARTITION P_202309 VALUES IN ("202309"), + PARTITION P_202310 VALUES IN ("202310"), + PARTITION P_202311 VALUES IN ("202311"), + PARTITION P_202312 VALUES IN ("202312"), + PARTITION P_202401 VALUES IN ("202401"), + PARTITION P_202402 VALUES IN ("202402"), + PARTITION P_202403 VALUES IN ("202403"), + PARTITION P_202404 VALUES IN ("202404"), + PARTITION P_202405 VALUES IN ("202405"), + PARTITION P_202406 VALUES IN ("202406"), + PARTITION P_202407 VALUES IN ("202407"), + PARTITION P_202408 VALUES IN ("202408"), + PARTITION P_202409 VALUES IN ("202409")) + DISTRIBUTED BY HASH(`c44`) BUCKETS 3 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ); + """ + + + def sqlStr = """ + SELECT `_____` , + `c24` , + `c28` , + `c26` , + `c21` , + `c25` , + `c29` , + `c5` , + `c6` , + `c7` , + `c15` , + `c4` , + `c27` , + `c19` , + `c18` , + `c20` , + `c8` , + `c13` , + `c16` , + `c11` , + `c10` , + `c14` , + `c9` , + `c12` , + `c17` , + (c22)/(((c39)+(c40))/2) AS `_____` , + ((c39)+(c40))/2 AS `_______` +FROM + (SELECT sum(c22) AS `c22` , + sum(c16) AS `c16` , + sum(c6) AS `c6` , + sum(c26) AS `c26` , + sum(c10) AS `c10` , + sum(c19) AS `c19` , + sum(c17) AS `c17` , + sum(c39) AS `c39` , + sum(c21) AS `c21` , + sum(c12) AS `c12` , + sum(c5) AS `c5` , + sum(c11) AS `c11` , + sum(c4) AS `c4` , + sum(c27) AS `c27` , + sum(c29) AS `c29` , + sum(c20) AS `c20` , + sum(c28) AS `c28` , + sum(c13) AS `c13` , + sum(c25) AS `c25` , + sum(c9) AS `c9` , + sum(c24) AS `c24` , + sum(c14) AS `c14` , + sum(c40) AS `c40` , + sum(c7) AS `c7` , + sum(c15) AS `c15` , + sum(c18) AS `c18` , + sum(c8) AS `c8` , + `_____` + FROM + (SELECT `_____` , + `c24` , + `c28` , + `c26` , + `c21` , + `c25` , + `c29` , + `c5` , + `c6` , + `c7` , + `c15` , + `c4` , + `c27` , + `c19` , + `c18` , + `c20` , + `c8` , + `c13` , + `c16` , + `c11` , + `c10` , + `c14` , + `c9` , + `c12` , + `c17` , + `c22` , + `c39` , + `c40` + FROM + (SELECT array ( `_______0` ) AS _______temp, + `c24` , + `c28` , + `c26` , + `c21` , + `c25` , + `c29` , + `c5` , + `c6` , + `c7` , + `c15` , + `c4` , + `c27` , + `c19` , + `c18` , + `c20` , + `c8` , + `c13` , + `c16` , + `c11` , + `c10` , + `c14` , + `c9` , + `c12` , + `c17` , + `c22` , + `c39` , + `c40` + FROM ( select + CASE + WHEN c57 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c63 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c73 = '______' THEN + '______' + ELSE ( + CASE + WHEN c72 = '______' THEN + '______' + ELSE ( + CASE + WHEN c72 = '______' THEN + '______' + ELSE ( + CASE + WHEN c72 = '______' THEN + '______' + ELSE '______' + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END ) + END AS `_______0` , sum(c24) AS `c24` , sum(c28) AS `c28` , sum(c26) AS `c26` , sum(c21) AS `c21` , sum(c25) AS `c25` , sum(c29) AS `c29` , sum(c5) AS `c5` , sum(c6) AS `c6` , sum(c7) AS `c7` , sum(c15) AS `c15` , sum(c4) AS `c4` , sum(c27) AS `c27` , sum(c19) AS `c19` , sum(c18) AS `c18` , sum(c20) AS `c20` , sum(c8) AS `c8` , sum(c13) AS `c13` , sum(c16) AS `c16` , sum(c11) AS `c11` , sum(c10) AS `c10` , sum(c14) AS `c14` , sum(c9) AS `c9` , sum(c12) AS `c12` , sum(c17) AS `c17` , sum(c22)c22 , sum(c39)c39 , sum(c40)c40 + FROM slow_fold_constant_case_when + WHERE slow_fold_constant_case_when.`c43` IN ('______') + AND (( ((`c72` IN ('______')) + OR (`c73` IN ('______')))) + OR (`c63` IN ('______'))) + GROUP BY _______0 limit 3000 ) t ) t lateral view explode ( `_______temp` ) c0 AS _____ + WHERE _____ is NOT null + AND _____ != '______' ) t + GROUP BY grouping sets((_____) ,()) + ORDER BY _____ nulls last limit 3000 ) t + ORDER BY _____ nulls last + """ + + sql "set enable_fold_constant_by_be=false" + test { + sql sqlStr + time 60000 + } + + sql "set enable_fold_constant_by_be=true" + test { + sql sqlStr + time 60000 + } +} diff --git a/regression-test/suites/nereids_syntax_p0/test_limit.groovy b/regression-test/suites/nereids_syntax_p0/test_limit.groovy index 5688097f35f813..567821c627cda0 100644 --- a/regression-test/suites/nereids_syntax_p0/test_limit.groovy +++ b/regression-test/suites/nereids_syntax_p0/test_limit.groovy @@ -34,6 +34,13 @@ suite("test_limit") { result([[1]]) } + test { + sql """ + select * from test1 t1 join (select * from test1 limit 1 offset 1) t2 + """ + result([[1,1],[1,1]]) + } + sql """ drop table if exists row_number_limit_tbl; """ @@ -57,9 +64,13 @@ suite("test_limit") { sql """ INSERT INTO row_number_limit_tbl VALUES (7788, 'SCOTT', 'ANALYST', 7566, '1987-04-19', 3000, 0, 20); """ sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """ - qt_limit1 """ - select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t order by k6s limit 1 offset 1; + + test { + sql """ + select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t order by k6s limit 1 offset 1 """ + rowNum 1 + } sql """ truncate table row_number_limit_tbl; """ @@ -67,12 +78,18 @@ suite("test_limit") { sql """ INSERT INTO row_number_limit_tbl VALUES (7844, 'TURNER', 'SALESMAN', 7698, '1981-09-08', 1500, 0, 30); """ sql """ INSERT INTO row_number_limit_tbl VALUES (7934, 'MILLER', 'CLERK', 7782, '1982-01-23', 1300, 0, 10); """ - qt_lmit2 """ - select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2; + test { + sql """ + select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2 """ + rowNum 1 + } sql """ set parallel_pipeline_task_num = 1; """ - qt_lmit3 """ - select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2; + test { + sql """ + select row_number() over(order by k6 desc) k6s, t.* from row_number_limit_tbl t limit 1 offset 2 """ + rowNum 1 + } } diff --git a/regression-test/suites/node_p0/test_frontend.groovy b/regression-test/suites/node_p0/test_frontend.groovy index 4478a1d3709c28..2ccc432460bd9c 100644 --- a/regression-test/suites/node_p0/test_frontend.groovy +++ b/regression-test/suites/node_p0/test_frontend.groovy @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -suite("test_frontend") { +suite("test_frontend", "nonconcurrent") { def address = "127.0.0.1" def notExistPort = 12345 diff --git a/regression-test/suites/query_p0/cte/query_with_dup_column.groovy b/regression-test/suites/query_p0/cte/query_with_dup_column.groovy new file mode 100644 index 00000000000000..be88cb44bab196 --- /dev/null +++ b/regression-test/suites/query_p0/cte/query_with_dup_column.groovy @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert; + +suite("query_with_dup_column") { + String db = context.config.getDbNameByFile(context.file) + sql "use ${db}" + sql "set runtime_filter_mode=OFF"; + sql "SET ignore_shape_nodes='PhysicalDistribute,PhysicalProject'" + + sql """ + drop table if exists test_table; + """ + + sql """ + CREATE TABLE `test_table` ( + `unique_id` varchar(256) NULL, + `name` varchar(256) NULL + ) + PROPERTIES ( + "replication_num" = "1" + ); + """ + + sql """ + insert into test_table values ("yyyxxxzzz", "abc000000") + """ + + // should fail + try { + sql """ + with tmp1 as ( + select unique_id, unique_id from test_table + ) + select * from tmp1; + """ + } catch (Exception e) { + assertTrue(e.message.contains("Duplicated inline view column alias")) + } + + // should fail + try { + sql """ + with tmp1 as ( + select unique_id, unique_id from test_table + ) + select * from tmp1 t; + """ + } catch (Exception e) { + assertTrue(e.message.contains("Duplicated inline view column alias")) + } + + + try { + sql """ + with tmp1 as ( + select *, unique_id from test_table + ) + select * from tmp1; + """ + } catch (Exception e) { + assertTrue(e.message.contains("Duplicated inline view column alias")) + } + + // should fail + try { + sql """ + with tmp1 as ( + select *, unique_id from test_table + ) + select * from tmp1 t; + """ + } catch (Exception e) { + assertTrue(e.message.contains("Duplicated inline view column alias")) + } + + // should success + sql """ + select *, unique_id from test_table; + """ + + // should success + sql """ + select *, unique_id from test_table t; + """ + + // should success + sql """ + select unique_id, unique_id from test_table + """ + + // should success + sql """ + select unique_id, unique_id from test_table t + """ +} + diff --git a/regression-test/suites/query_p0/set/test_set_command.groovy b/regression-test/suites/query_p0/set/test_set_command.groovy new file mode 100644 index 00000000000000..8b29304fbe240f --- /dev/null +++ b/regression-test/suites/query_p0/set/test_set_command.groovy @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_set_command") { + // setSystemVariable + def default_value = sql """show variables where variable_name = 'insert_timeout';""" + sql """set insert_timeout=97531;""" + def modified_value = sql """show variables where variable_name = 'insert_timeout';""" + assertTrue(modified_value.toString().contains('97531')) + sql """set insert_timeout=DEFAULT;""" + def restored_value = sql """show variables where variable_name = 'insert_timeout';""" + assertEquals(default_value, restored_value) + + default_value = sql """show variables where variable_name = 'insert_timeout';""" + sql """set @@insert_timeout=97531;""" + modified_value = sql """show variables where variable_name = 'insert_timeout';""" + assertTrue(modified_value.toString().contains('97531')) + sql """set @@insert_timeout=DEFAULT;""" + restored_value = sql """show variables where variable_name = 'insert_timeout';""" + assertEquals(default_value, restored_value) + + default_value = sql """show variables where variable_name = 'insert_timeout';""" + sql """set @@session.insert_timeout=97531;""" + modified_value = sql """show variables where variable_name = 'insert_timeout';""" + assertTrue(modified_value.toString().contains('97531')) + sql """set @@session.insert_timeout=DEFAULT;""" + restored_value = sql """show variables where variable_name = 'insert_timeout';""" + assertEquals(default_value, restored_value) + + // setVariableWithType + default_value = sql """show variables where variable_name = 'insert_timeout';""" + sql """set session insert_timeout=97531;""" + modified_value = sql """show variables where variable_name = 'insert_timeout';""" + assertTrue(modified_value.toString().contains('97531')) + sql """set session insert_timeout=DEFAULT;""" + restored_value = sql """show variables where variable_name = 'insert_timeout';""" + assertEquals(default_value, restored_value) + + // setNames do nothing + sql """set names = utf8;""" + + // setCollate do nothing + sql """set names default collate utf_8_ci;""" + + // setTransaction do nothing + sql """set transaction read only;""" + sql """set transaction read write;""" + + // setCharset do nothing + sql """set charset utf8;""" + sql """set charset default;""" +} \ No newline at end of file diff --git a/regression-test/suites/query_p0/sql_functions/string_functions/test_url_functions.groovy b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_functions.groovy new file mode 100644 index 00000000000000..389020b63e225e --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/string_functions/test_url_functions.groovy @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_url_functions") { + sql " drop table if exists test_url_functions" + sql """ + create table test_url_functions ( + id int, + s1 string not null, + s2 string null + ) + DISTRIBUTED BY HASH(id) + PROPERTIES + ( + "replication_num" = "1" + ); + """ + + //empty table + order_qt_empty_nullable1 "select top_level_domain(s2) from test_url_functions" + order_qt_empty_nullable2 "select first_significant_subdomain(s2) from test_url_functions" + order_qt_empty_nullable3 "select cut_to_first_significant_subdomain(s2) from test_url_functions" + order_qt_empty_not_nullable1 "select top_level_domain(s1) from test_url_functions" + order_qt_empty_not_nullable2 "select first_significant_subdomain(s1) from test_url_functions" + order_qt_empty_not_nullable3 "select cut_to_first_significant_subdomain(s1) from test_url_functions" + + //null / const + order_qt_empty_null1 "select top_level_domain(NULL)" + order_qt_empty_null2 "select first_significant_subdomain(NULL)" + order_qt_empty_null3 "select cut_to_first_significant_subdomain(NULL)" + + //vaild url + order_qt_empty_const1 "select top_level_domain('www.baidu.com')" + order_qt_empty_const2 "select first_significant_subdomain('www.baidu.com')" + order_qt_empty_const3 "select cut_to_first_significant_subdomain('www.baidu.com')" + order_qt_empty_const4 "select top_level_domain('www.google.com.cn')" + order_qt_empty_const5 "select first_significant_subdomain('www.google.com.cn')" + order_qt_empty_const6 "select cut_to_first_significant_subdomain('www.google.com.cn')" + + //invaild url + order_qt_empty_const7 "select top_level_domain('I am invaild url')" + order_qt_empty_const8 "select first_significant_subdomain('I am invaild url')" + order_qt_empty_const9 "select cut_to_first_significant_subdomain('I am invaild url')" + + + sql """ insert into test_url_functions values (1, 'www.baidu.com', 'www.baidu.com'); """ + sql """ insert into test_url_functions values (2, 'www.google.com.cn', 'www.google.com.cn'); """ + sql """ insert into test_url_functions values (3, 'invalid url', 'invalid url'); """ + sql """ insert into test_url_functions values (4, '', ''); """ + sql """ insert into test_url_functions values (5, ' ', ' '); """ + sql """ insert into test_url_functions values (6, ' ', NULL); """ + sql """ insert into test_url_functions values (7, 'xxxxxxxx', 'xxxxxxxx'); """ + sql """ insert into test_url_functions values (8, 'http://www.example.com/a/b/c?a=b', 'http://www.example.com/a/b/c?a=b'); """ + sql """ insert into test_url_functions values (9, 'https://news.clickhouse.com/', 'https://news.clickhouse.com/'); """ + sql """ insert into test_url_functions values (10, 'https://news.clickhouse.com.tr/', 'https://news.clickhouse.com.tr/'); """ + + order_qt_nullable1 "select id,s2,top_level_domain(s2) from test_url_functions order by id" + order_qt_nullable2 "select id,s2,first_significant_subdomain(s2) from test_url_functions order by id" + order_qt_nullable3 "select id,s2,cut_to_first_significant_subdomain(s2) from test_url_functions order by id" + + order_qt_not_nullable1 "select id,s1,top_level_domain(s1) from test_url_functions order by id" + order_qt_not_nullable2 "select id,s1,first_significant_subdomain(s1) from test_url_functions order by id" + order_qt_not_nullable3 "select id,s1,cut_to_first_significant_subdomain(s1) from test_url_functions order by id" + +} diff --git a/regression-test/suites/query_p0/sql_functions/window_functions/test_qualify_query.groovy b/regression-test/suites/query_p0/sql_functions/window_functions/test_qualify_query.groovy new file mode 100644 index 00000000000000..04ed0cd74157fc --- /dev/null +++ b/regression-test/suites/query_p0/sql_functions/window_functions/test_qualify_query.groovy @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +suite("test_qualify_query") { + sql "create database if not exists qualify_test" + sql "use qualify_test" + sql "DROP TABLE IF EXISTS sales" + sql """ + CREATE TABLE sales ( + year INT, + country STRING, + product STRING, + profit INT + ) + DISTRIBUTED BY HASH(`year`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ) + """ + sql """ + INSERT INTO sales VALUES + (2000,'Finland','Computer',1501), + (2000,'Finland','Phone',100), + (2001,'Finland','Phone',10), + (2000,'India','Calculator',75), + (2000,'India','Calculator',76), + (2000,'India','Computer',1201), + (2000,'USA','Calculator',77), + (2000,'USA','Computer',1502), + (2001,'USA','Calculator',50), + (2001,'USA','Computer',1503), + (2001,'USA','Computer',1202), + (2001,'USA','TV',150), + (2001,'USA','TV',101); + """ + + qt_select_1 "select year + 1 as year, country from sales where year >= 2000 qualify row_number() over (order by year) > 1 order by year,country;" + + qt_select_4 "select year, country, profit, row_number() over (order by year) as rk from (select * from sales) a where year = 2000 qualify rk = 1;" + + qt_select_5 "select year, country, product, profit, row_number() over (partition by year, country order by profit desc) as rk from sales where year = 2000 qualify rk = 1 order by year, country, product, profit;" + + qt_select_6 "select year, country, profit, row_number() over (partition by year, country order by profit desc) as rk from (select * from sales) a where year >= 2000 having profit > 200 qualify rk = 1 order by year, country;" + + qt_select_7 "select year, country, profit from (select year, country, profit from (select year, country, profit, row_number() over (partition by year, country order by profit desc) as rk from (select * from sales) a where year >= 2000 having profit > 200) t where rk = 1) a where year >= 2000 qualify row_number() over (order by profit) = 1;" + + qt_select_8 "select year, country, profit from (select year, country, profit from (select * from sales) a where year >= 2000 having profit > 200 qualify row_number() over (partition by year, country order by profit desc) = 1) a qualify row_number() over (order by profit) = 1;" + + qt_select_9 "select * except(year) replace(profit+1 as profit), row_number() over (order by profit) as rk from sales where year >= 2000 qualify rk = 1;" + + qt_select_10 "select * except(year) replace(profit+1 as profit) from sales where year >= 2000 qualify row_number() over (order by year) > profit;" + + qt_select_12 "select year + 1, if(country = 'USA', 'usa' , country), case when profit < 200 then 200 else profit end as new_profit, row_number() over (partition by year, country order by profit desc) as rk from (select * from sales) a where year >= 2000 having profit > 200 qualify rk = 1 order by new_profit;" + + qt_select_13 "select year + 1, if(country = 'USA', 'usa' , country), case when profit < 200 then 200 else profit end as new_profit from (select * from sales) a where year >= 2000 having profit > 200 qualify row_number() over (partition by year, country order by profit desc) = 1 order by new_profit;" + + qt_select_14 "select * from sales where year >= 2000 qualify row_number() over (partition by year order by profit desc, country) = 1 order by country,profit;" + + qt_select_15 "select *,row_number() over (partition by year order by profit desc, country) as rk from sales where year >= 2000 qualify rk = 1 order by country,profit;" + + qt_select_16 "select * from sales where year >= 2000 qualify row_number() over (partition by year order by if(profit > 200, profit, profit+200) desc, country) = profit order by country;" + + qt_select_17 "select * from sales where year >= 2000 qualify row_number() over (partition by year order by case when profit > 200 then profit else profit+200 end desc, country) = profit order by country;" + + qt_select_18 "select distinct x.year, x.country, x.product from sales x left join sales y on x.year = y.year left join sales z on x.year = z.year where x.year >= 2000 qualify row_number() over (partition by x.year order by x.profit desc) = x.profit order by year;" + + qt_select_19 "select year, country, profit, row_number() over (order by profit) as rk1, row_number() over (order by country) as rk2 from (select * from sales) a where year >= 2000 qualify rk1 = 1 and rk2 > 2;" + + qt_select_20 "select year, country, profit, row_number() over (order by year) as rk from (select * from sales) a where year >= 2000 qualify rk + 1 > 1 * 100;" + + qt_select_21 "select year, country, profit, row_number() over (order by profit) as rk from (select * from sales) a where year >= 2000 qualify rk in (1,2,3);" + + qt_select_22 "select year, country, profit, row_number() over (order by profit) as rk from (select * from sales) a where year >= 2000 qualify rk = (select 1);" + + qt_select_23 "select year, country, profit, row_number() over (order by year) as rk from (select * from sales) a where year >= 2000 qualify rk = (select max(year) from sales);" + + qt_select_24 "select year+1, country, sum(profit) as total from sales where year >= 2000 and country = 'Finland' group by year,country having sum(profit) > 100 qualify row_number() over (order by year) = 1;" + + qt_select_25 "select year, country, profit from (select * from sales) a where year >= 2000 qualify row_number() over (partition by year, country order by profit desc) = 1 order by year, country, profit;" + + qt_select_26 "select year + 1, country from sales where year >= 2000 and country = 'Finland' group by year,country qualify row_number() over (order by year) > 1;" + + qt_select_27 "select year + 1, country, row_number() over (order by year) as rk from sales where year >= 2000 and country = 'Finland' group by year,country qualify rk > 1;" + + qt_select_28 "select year + 1, country, sum(profit) as total from sales where year >= 2000 group by year,country having sum(profit) > 1700 qualify row_number() over (order by year) = 1;" + + qt_select_29 "select distinct year + 1,country from sales qualify row_number() over (order by profit + 1) = 1;" + + qt_select_30 "select distinct year,country, row_number() over (order by profit + 1) as rk from sales qualify row_number() over (order by profit + 1) = 1;" + + qt_select_31 "select distinct year + 1 as year,country from sales where country = 'Finland' group by year, country qualify row_number() over (order by year) = 1;" + + qt_select_32 "select distinct year,country from sales having sum(profit) > 100 qualify row_number() over (order by year) > 100;" + + qt_select_33 "select distinct year,country,rank() over (order by year) from sales where country = 'USA' having sum(profit) > 100 qualify row_number() over (order by year) > 1;" + + qt_select_34 "select distinct year,country,rank() over (order by year) from sales where country = 'India' having sum(profit) > 100;" + + qt_select_35 "select year + 1, country from sales having profit >= 100 qualify row_number() over (order by profit) = 6;" + + qt_select_36 "select year + 1, country, row_number() over (order by profit) rk from sales having profit >= 100 qualify rk = 6;" +} + + + + + diff --git a/regression-test/suites/query_p0/union/test_union.groovy b/regression-test/suites/query_p0/union/test_union.groovy index f6b9f1e329b1d8..87523ff81cc2d1 100644 --- a/regression-test/suites/query_p0/union/test_union.groovy +++ b/regression-test/suites/query_p0/union/test_union.groovy @@ -16,6 +16,8 @@ // under the License. suite("test_union") { + String suiteName = "query_union_test_union" + String viewName = "${suiteName}_view" def db = "test_query_db" sql "use ${db}" @@ -172,14 +174,14 @@ suite("test_union") { // test_union_bug // PALO-3617 qt_union36 """select * from (select 1 as a, 2 as b union select 3, 3) c where a = 1""" - sql """drop view if exists nullable""" - sql """CREATE VIEW `nullable` AS SELECT `a`.`k1` AS `n1`, `b`.`k2` AS `n2` + sql """drop view if exists ${viewName}""" + sql """CREATE VIEW `${viewName}` AS SELECT `a`.`k1` AS `n1`, `b`.`k2` AS `n2` FROM `${db}`.`baseall` a LEFT OUTER JOIN `${db}`.`bigtable` b ON `a`.`k1` = `b`.`k1` + 10 WHERE `b`.`k2` IS NULL""" - order_qt_union37 """select n1 from nullable union all select n2 from nullable""" - qt_union38 """(select n1 from nullable) union all (select n2 from nullable order by n1) order by n1""" - qt_union39 """(select n1 from nullable) union all (select n2 from nullable) order by n1""" + order_qt_union37 """select n1 from ${viewName} union all select n2 from ${viewName}""" + qt_union38 """(select n1 from ${viewName}) union all (select n2 from ${viewName} order by n1) order by n1""" + qt_union39 """(select n1 from ${viewName}) union all (select n2 from ${viewName}) order by n1""" // test_union_different_column diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_new_partial_update_delete.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_new_partial_update_delete.groovy index 99983dde6dfca7..8c338f6aa03dc9 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_new_partial_update_delete.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_new_partial_update_delete.groovy @@ -90,6 +90,7 @@ suite('test_new_partial_update_delete') { "store_row_column" = "${use_row_store}"); """ def output1 = sql "show create table ${tableName1}" + logger.info("output1: ${output1}") assertTrue output1[0][1].contains("\"enable_mow_light_delete\" = \"false\""); sql "insert into ${tableName1} values(1,1,1,1,1)" // 1,1,1,1,1 @@ -113,6 +114,7 @@ suite('test_new_partial_update_delete') { sql "set enable_unique_key_partial_update=false;" sql "set enable_insert_strict=true;" def output2 = sql "show create table ${tableName1}" + logger.info("output2: ${output2}") assertTrue output2[0][1].contains("\"enable_mow_light_delete\" = \"true\""); sql "insert into ${tableName1} values(2,2,2,2,2)" // 1,2,NULL,NULL,NULL diff --git a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_delete.groovy b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_delete.groovy index 67aa6aa2f13b3a..6850faf2dd9ff2 100644 --- a/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_delete.groovy +++ b/regression-test/suites/unique_with_mow_c_p0/partial_update/test_partial_update_delete.groovy @@ -54,6 +54,9 @@ suite('test_partial_update_delete') { "disable_auto_compaction" = "true", "replication_num" = "1", "store_row_column" = "${use_row_store}"); """ + + def res = sql """ show create table ${tableName2}""" + logger.info("show: " + res) sql "insert into ${tableName1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);" qt_sql "select * from ${tableName1} order by k1;" diff --git a/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy new file mode 100644 index 00000000000000..3893d43c02a34b --- /dev/null +++ b/regression-test/suites/vault_p0/alter/test_alter_hdfs_vault.groovy @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_alter_hdfs_vault", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${suiteName} + PROPERTIES ( + "type"="HDFS", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${suiteName}", + "hadoop.username" = "hadoop" + ); + """ + + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${suiteName} + PROPERTIES ( + "type"="hdfs", + "path_prefix" = "${suiteName}" + ); + """ + }, "Alter property") + + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${suiteName} + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS" = "not_exist_vault" + ); + """ + }, "Alter property") + + def vaultName = suiteName + String properties; + + def vaultInfos = try_sql """show storage vault""" + + for (int i = 0; i < vaultInfos.size(); i++) { + def name = vaultInfos[i][0] + if (name.equals(vaultName)) { + properties = vaultInfos[i][2] + } + } + + def newVaultName = suiteName + "_new"; + sql """ + ALTER STORAGE VAULT ${vaultName} + PROPERTIES ( + "type"="hdfs", + "VAULT_NAME" = "${newVaultName}", + "hadoop.username" = "hdfs" + ); + """ + + vaultInfos = sql """ SHOW STORAGE VAULT; """ + boolean exist = false + + for (int i = 0; i < vaultInfos.size(); i++) { + def name = vaultInfos[i][0] + logger.info("name is ${name}, info ${vaultInfos[i]}") + if (name.equals(vaultName)) { + assertTrue(false); + } + if (name.equals(newVaultName)) { + assertTrue(vaultInfos[i][2].contains("""user: "hdfs" """)) + exist = true + } + } + assertTrue(exist) + expectExceptionLike({sql """insert into ${suiteName} values("2", "2");"""}, "") +} diff --git a/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy new file mode 100644 index 00000000000000..723422c6e0b84d --- /dev/null +++ b/regression-test/suites/vault_p0/alter/test_alter_s3_vault.groovy @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_alter_s3_vault", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${suiteName} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${suiteName} case, because storage vault not enabled") + return + } + + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${suiteName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${suiteName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}" + ); + """ + + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${suiteName} + PROPERTIES ( + "type"="S3", + "s3.bucket" = "error_bucket" + ); + """ + }, "Alter property") + + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${suiteName} + PROPERTIES ( + "type"="S3", + "provider" = "${getS3Provider()}" + ); + """ + }, "Alter property") + + + def vaultName = suiteName + String properties; + + def vaultInfos = try_sql """show storage vault""" + + for (int i = 0; i < vaultInfos.size(); i++) { + def name = vaultInfos[i][0] + if (name.equals(vaultName)) { + properties = vaultInfos[i][2] + } + } + + def newVaultName = suiteName + "_new"; + + sql """ + ALTER STORAGE VAULT ${vaultName} + PROPERTIES ( + "type"="S3", + "VAULT_NAME" = "${newVaultName}", + "s3.access_key" = "new_ak" + ); + """ + + vaultInfos = sql """SHOW STORAGE VAULT;""" + boolean exist = false + + for (int i = 0; i < vaultInfos.size(); i++) { + def name = vaultInfos[i][0] + logger.info("name is ${name}, info ${vaultInfos[i]}") + if (name.equals(vaultName)) { + assertTrue(false); + } + if (name.equals(newVaultName)) { + assertTrue(vaultInfos[i][2].contains("new_ak")) + exist = true + } + } + assertTrue(exist) + // failed to insert due to the wrong ak + expectExceptionLike({ sql """insert into alter_s3_vault_tbl values("2", "2");""" }, "") +} diff --git a/regression-test/suites/vaults/create/create.groovy b/regression-test/suites/vault_p0/create/test_create_vault.groovy similarity index 54% rename from regression-test/suites/vaults/create/create.groovy rename to regression-test/suites/vault_p0/create/test_create_vault.groovy index 32f22dbd89a48c..bf6ddc756dfac8 100644 --- a/regression-test/suites/vaults/create/create.groovy +++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy @@ -15,9 +15,14 @@ // specific language governing permissions and limitations // under the License. -suite("create_vault", "nonConcurrent") { +suite("test_create_vault", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip test_create_vault case because not cloud mode") + return + } + if (!enableStoragevault()) { - logger.info("skip create storgage vault case") + logger.info("skip test_create_vault case") return } @@ -25,65 +30,57 @@ suite("create_vault", "nonConcurrent") { sql """ CREATE STORAGE VAULT IF NOT EXISTS failed_vault PROPERTIES ( - "type"="S3", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "ssb_sf1_p2", - "hadoop.username" = "hadoop" + "type"="S3", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "ssb_sf1_p2", + "hadoop.username" = "hadoop" ); - """ - }, "Missing") + """ + }, "Missing [s3.endpoint] in properties") expectExceptionLike({ sql """ CREATE STORAGE VAULT IF NOT EXISTS failed_vault PROPERTIES ( - "type"="hdfs", - "s3.bucket"="${getHmsHdfsFs()}", - "path_prefix" = "ssb_sf1_p2", - "hadoop.username" = "hadoop" + "type"="hdfs", + "s3.bucket"="${getHmsHdfsFs()}", + "path_prefix" = "ssb_sf1_p2", + "hadoop.username" = "hadoop" ); - """ + """ }, "invalid fs_name") expectExceptionLike({ - sql """ - CREATE STORAGE VAULT IF NOT EXISTS failed_vault - PROPERTIES ( - ); - """ - }, "Encountered") + sql """ CREATE STORAGE VAULT IF NOT EXISTS failed_vault PROPERTIES (); """ + }, "mismatched input ')'") sql """ CREATE STORAGE VAULT IF NOT EXISTS create_hdfs_vault PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "default_vault_ssb_hdfs_vault", - "hadoop.username" = "hadoop" + "type"="hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "default_vault_ssb_hdfs_vault", + "hadoop.username" = "hadoop" ); - """ + """ + + try_sql """ DROP TABLE IF EXISTS create_table_use_vault FORCE; """ - try_sql """ - drop table create_table_use_vault - """ - sql """ CREATE TABLE IF NOT EXISTS create_table_use_vault ( C_CUSTKEY INTEGER NOT NULL, C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = "create_hdfs_vault" - ) - """ + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = "create_hdfs_vault" + ) + """ - String create_stmt = sql """ - show create table create_table_use_vault - """ + String create_stmt = sql """ SHOW CREATE TABLE create_table_use_vault """ logger.info("the create table stmt is ${create_stmt}") assertTrue(create_stmt.contains("create_hdfs_vault")) @@ -92,9 +89,9 @@ suite("create_vault", "nonConcurrent") { sql """ CREATE STORAGE VAULT create_hdfs_vault PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "default_vault_ssb_hdfs_vault" + "type"="hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "default_vault_ssb_hdfs_vault" ); """ }, "already created") @@ -103,60 +100,54 @@ suite("create_vault", "nonConcurrent") { sql """ CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault PROPERTIES ( - "type"="S3", - "s3.endpoint"="${getS3Endpoint()}", - "s3.region" = "${getS3Region()}", - "s3.access_key" = "${getS3AK()}", - "s3.secret_key" = "${getS3SK()}", - "s3.root.path" = "ssb_sf1_p2_s3", - "s3.bucket" = "${getS3BucketName()}", - "s3.external_endpoint" = "", - "provider" = "${getS3Provider()}" - ); - """ - - expectExceptionLike({ - sql """ - CREATE STORAGE VAULT create_s3_vault - PROPERTIES ( "type"="S3", "s3.endpoint"="${getS3Endpoint()}", "s3.region" = "${getS3Region()}", "s3.access_key" = "${getS3AK()}", "s3.secret_key" = "${getS3SK()}", - "s3.root.path" = "ssb_sf1_p2_s3", + "s3.root.path" = "test_create_s3_vault", "s3.bucket" = "${getS3BucketName()}", "s3.external_endpoint" = "", "provider" = "${getS3Provider()}" + ); + """ + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT create_s3_vault + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "test_create_s3_vault", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}" ); """ }, "already created") sql """ CREATE TABLE IF NOT EXISTS create_table_use_s3_vault ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = "create_s3_vault" - ) + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = "create_s3_vault" + ) """ - sql """ - insert into create_table_use_s3_vault values(1,1); - """ + sql """ insert into create_table_use_s3_vault values(1,1); """ - sql """ - select * from create_table_use_s3_vault; - """ + sql """ select * from create_table_use_s3_vault; """ - def vaults_info = try_sql """ - show storage vault - """ + def vaults_info = try_sql """ show storage vault """ boolean create_hdfs_vault_exist = false; @@ -182,16 +173,32 @@ suite("create_vault", "nonConcurrent") { sql """ CREATE STORAGE VAULT IF NOT EXISTS built_in_storage_vault PROPERTIES ( - "type"="S3", - "s3.endpoint"="${getS3Endpoint()}", - "s3.region" = "${getS3Region()}", - "s3.access_key" = "${getS3AK()}", - "s3.secret_key" = "${getS3SK()}", - "s3.root.path" = "ssb_sf1_p2_s3", - "s3.bucket" = "${getS3BucketName()}", - "s3.external_endpoint" = "", - "provider" = "${getS3Provider()}" + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "test_built_in_storage_vault", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}" ); """ }, "already created") + + + expectExceptionLike({ + sql """ + CREATE TABLE IF NOT EXISTS create_table_with_not_exist_vault ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = "not_exist_vault" + ) + """ + }, "Storage vault 'not_exist_vault' does not exist") } diff --git a/regression-test/suites/vault_p0/default/test_default_vault.groovy b/regression-test/suites/vault_p0/default/test_default_vault.groovy new file mode 100644 index 00000000000000..0ee871458b083d --- /dev/null +++ b/regression-test/suites/vault_p0/default/test_default_vault.groovy @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_default_vault", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case") + return + } + + try { + sql """ UNSET DEFAULT STORAGE VAULT; """ + + expectExceptionLike({ + sql """ set not_exist as default storage vault """ + }, "invalid storage vault name") + + def tableName = "table_use_vault" + sql "DROP TABLE IF EXISTS ${tableName}" + + expectExceptionLike({ + sql """ + CREATE TABLE ${tableName} ( + `key` INT, + value INT + ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 + PROPERTIES ('replication_num' = '1') + """ + }, "No default storage vault") + + sql """ + CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault_for_default + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "create_s3_vault_for_default", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "set_as_default" = "true" + ); + """ + + sql """ set create_s3_vault_for_default as default storage vault """ + def vaultInfos = sql """ SHOW STORAGE VAULT """ + // check if create_s3_vault_for_default is set as default + for (int i = 0; i < vaultInfos.size(); i++) { + def name = vaultInfos[i][0] + if (name.equals("create_s3_vault_for_default")) { + // isDefault is true + assertEquals(vaultInfos[i][3], "true") + } + } + + sql """ UNSET DEFAULT STORAGE VAULT; """ + vaultInfos = sql """ SHOW STORAGE VAULT """ + for (int i = 0; i < vaultInfos.size(); i++) { + assertEquals(vaultInfos[i][3], "false") + } + + + sql """ set built_in_storage_vault as default storage vault """ + + sql "DROP TABLE IF EXISTS ${tableName} FORCE;" + sql """ + CREATE TABLE ${tableName} ( + `key` INT, + value INT + ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 + PROPERTIES ('replication_num' = '1') + """ + + sql """ insert into ${tableName} values(1, 1); """ + result """ select * from ${tableName}; """ + assertEqual(result.size(), 1) + assertEqual(result[0][0], 1) + + def create_table_stmt = sql """ show create table ${tableName} """ + assertTrue(create_table_stmt[0][1].contains("built_in_storage_vault")) + + sql """ + CREATE STORAGE VAULT IF NOT EXISTS create_default_hdfs_vault + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "default_vault_ssb_hdfs_vault", + "hadoop.username" = "hadoop" + ); + """ + + sql """ set create_default_hdfs_vault as default storage vault """ + + sql "DROP TABLE IF EXISTS ${tableName} FORCE;" + sql """ + CREATE TABLE ${tableName} ( + `key` INT, + value INT + ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 + PROPERTIES ('replication_num' = '1') + """ + + create_table_stmt = sql """ show create table ${tableName} """ + assertTrue(create_table_stmt[0][1].contains("create_default_hdfs_vault")) + + expectExceptionLike({ + sql """ + alter table ${tableName} set("storage_vault_name" = "built_in_storage_vault"); + """ + }, "You can not modify") + + } finally { + sql """ set built_in_storage_vault as default storage vault """ + sql """ set built_in_storage_vault as default storage vault """ + } +} diff --git a/regression-test/suites/vaults/forbid/forbid.groovy b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy similarity index 89% rename from regression-test/suites/vaults/forbid/forbid.groovy rename to regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy index 15fba18fc6da1f..da31ae532afe66 100644 --- a/regression-test/suites/vaults/forbid/forbid.groovy +++ b/regression-test/suites/vault_p0/forbid/test_forbid_vault.groovy @@ -15,14 +15,14 @@ // specific language governing permissions and limitations // under the License. -suite("forbid_vault") { - if (enableStoragevault()) { - logger.info("skip forbid storage vault case because storage vault enabled") +suite("test_forbid_vault") { + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") return } - if (!isCloudMode()) { - logger.info("skip forbid storage vault case because not cloud mode") + if (enableStoragevault()) { + logger.info("skip ${name} case, because storage vault enabled") return } diff --git a/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy b/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy new file mode 100644 index 00000000000000..feedbadb3b714e --- /dev/null +++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege.groovy @@ -0,0 +1,190 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; + +suite("test_vault_privilege", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + try { + def vault1 = "test_privilege_vault1" + def table1 = "test_privilege_vault_t1" + def table2 = "test_privilege_vault_t2" + def table3 = "test_privilege_vault_t3" + + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${vault1} + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "test_vault_privilege" + ); + """ + + def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> row[0]).collect(Collectors.toSet()) + assertTrue(storageVaults.contains(vault1)) + + sql """ + SET ${vault1} AS DEFAULT STORAGE VAULT + """ + sql """ + UNSET DEFAULT STORAGE VAULT + """ + + sql """ + DROP TABLE IF EXISTS ${table1}; + """ + + sql """ + CREATE TABLE IF NOT EXISTS ${table1} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + + def user1 = "test_privilege_vault_user1" + sql """drop user if exists ${user1}""" + sql """create user ${user1} identified by 'Cloud12345'""" + sql """ GRANT create_priv ON *.*.* TO '${user1}'; """ + + def vault2 = "test_privilege_vault2" + // Only users with admin role can create storage vault + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${vault2} + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "test_vault_privilege" + ); + """ + }, "denied") + } + + // Only users with admin role can set/unset default storage vault + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + SET ${vault1} AS DEFAULT STORAGE VAULT + """ + }, "denied") + } + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + UNSET DEFAULT STORAGE VAULT + """ + }, "denied") + } + + def result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + sql " SHOW STORAGE VAULT; " + } + assertTrue(result.isEmpty()) + + sql """ + DROP TABLE IF EXISTS ${table2}; + """ + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE TABLE IF NOT EXISTS ${table2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + }, "USAGE denied") + } + + sql """ + GRANT usage_priv ON STORAGE VAULT '${vault1}' TO '${user1}'; + """ + + result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + sql " SHOW STORAGE VAULT; " + } + storageVaults = result.stream().map(row -> row[0]).collect(Collectors.toSet()) + assertTrue(storageVaults.contains(vault1)) + + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """ + CREATE TABLE IF NOT EXISTS ${table2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + } + + sql """ + REVOKE usage_priv ON STORAGE VAULT '${vault1}' FROM '${user1}'; + """ + + result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + sql " SHOW STORAGE VAULT; " + } + assertTrue(result.isEmpty()) + + sql """ + DROP TABLE IF EXISTS ${table3}; + """ + connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE TABLE IF NOT EXISTS ${table3} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + }, "USAGE denied") + } + } finally { + sql """ set built_in_storage_vault as default storage vault """ + } +} \ No newline at end of file diff --git a/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy new file mode 100644 index 00000000000000..7ff5ec0792b90d --- /dev/null +++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege_restart.groovy @@ -0,0 +1,187 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; + +// This test suite is intent to test the granted privilege for specific user will +// not disappear +suite("test_vault_privilege_restart", "nonConcurrent") { + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + try { + // user1 will be kept before and after running this test in order to check + // the granted vault privilege is persisted well eventhough FE restarts many times + def user1 = "test_privilege_vault_restart_user1" + def passwd = "Cloud12345" + + def vault1 = "test_privilege_vault_restart_vault1" + // this vaule is derived from current file location: regression-test/vaults + def db = context.dbName + + def table1 = "test_privilege_vault_restart_t1" + def table2 = "test_privilege_vault_restart_t2" + def hdfsLinkWeDontReallyCare = "127.0.0.1:10086" // a dummy link, it doesn't need to work + + //========================================================================== + // prepare the basic vault and tables for further check + //========================================================================== + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${vault1} + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS"="${hdfsLinkWeDontReallyCare}", + "path_prefix" = "test_vault_privilege_restart" + ); + """ + + def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> row[0]).collect(Collectors.toSet()) + logger.info("all vaults: ${storageVaults}") + org.junit.Assert.assertTrue("${vault1} is not present after creating, all vaults: ${storageVaults}", storageVaults.contains(vault1)) + + def allTables = (sql " SHOW tables").stream().map(row -> row[0]).collect(Collectors.toSet()) + logger.info("all tables ${allTables}") + + // table1 is the sign to check if the user1 has been created and granted well + def targetTableExist = allTables.contains(table1) + + if (targetTableExist) { + // the grant procedure at least run once before, user1 has been granted vault1 + logger.info("${user1} has been granted with usage_priv to ${vault1} before") + } else { + logger.info("this is the frist run, or there was a crash during the very first run, ${user1} has not been granted with usage_priv to ${vault1} before") + // create user and grant storage vault and create a table with that vault + sql """drop user if exists ${user1}""" + sql """create user ${user1} identified by '${passwd}'""" + sql """ + GRANT usage_priv ON storage vault ${vault1} TO '${user1}'; + """ + sql """ + GRANT create_priv ON *.*.* TO '${user1}'; + """ + + // ATTN: create table1, if successful, the sign has been set + // there wont be any execuse that user1 misses the privilege to vault1 from now on + sql """ + CREATE TABLE IF NOT EXISTS ${table1} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + } + + //========================================================================== + // check the prepared users and tables + //========================================================================== + def allUsers = (sql " SHOW all grants ").stream().map(row -> row[0]).collect(Collectors.toSet()) + logger.info("all users: ${allUsers}") + def userPresent = !(allUsers.stream().filter(i -> i.contains(user1)).collect(Collectors.toSet()).isEmpty()) + org.junit.Assert.assertTrue("${user1} is not in the priv table ${allUsers}", userPresent) + + allTables = (sql " SHOW tables").stream().map(row -> row[0]).collect(Collectors.toSet()) + logger.info("all tables: ${allTables}") + org.junit.Assert.assertTrue("${table1} is not present, all tables: ${allUsers}", allTables.contains(table1)) + + // Test user privilege, the newly created user cannot create or set default vault + // Only users with admin role can create storage vault + connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${vault1} + PROPERTIES ( + "type"="hdfs", + "fs.defaultFS"="${hdfsLinkWeDontReallyCare}", + "path_prefix" = "test_vault_privilege" + ); + """ + }, "denied") + } + // Only users with admin role can set/unset default storage vault + connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + expectExceptionLike({ + sql """ + SET ${vault1} AS DEFAULT STORAGE VAULT + """ + }, "denied") + } + connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + expectExceptionLike({ + sql """ + UNSET DEFAULT STORAGE VAULT + """ + }, "denied") + } + + // user1 should see vault1 + def result = connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + sql " SHOW STORAGE VAULT; " + } + storageVaults = result.stream().map(row -> row[0]).collect(Collectors.toSet()) + org.junit.Assert.assertTrue("${user1} cannot see granted vault ${vault1} in result ${result}", storageVaults.contains(vault1)) + + + //========================================================================== + // to test that user1 has the privilege of vault1 to create new tables + // this is the main test for granted vault privilege after restarting FE + //========================================================================== + sql """ + DROP TABLE IF EXISTS ${table2} force; + """ + connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + sql """ + CREATE TABLE ${table2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${vault1} + ) + """ + } + + result = connect(user = user1, password = passwd, url = context.config.jdbcUrl) { + sql """use ${db}""" + sql " SHOW create table ${table2}; " + } + logger.info("show create table ${table2}, result ${result}") + org.junit.Assert.assertTrue("missing storage vault properties ${vault1} in table ${table2}", result.toString().contains(vault1)) + } finally { + sql """ set built_in_storage_vault as default storage vault """ + } +} diff --git a/regression-test/suites/vaults/alter/alter_hdfs.groovy b/regression-test/suites/vaults/alter/alter_hdfs.groovy deleted file mode 100644 index 1a1299a93cc8e5..00000000000000 --- a/regression-test/suites/vaults/alter/alter_hdfs.groovy +++ /dev/null @@ -1,119 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("alter_hdfs_vault", "nonConcurrent") { - if (!enableStoragevault()) { - logger.info("skip alter hdfs storgage vault case") - return - } - - sql """ - CREATE STORAGE VAULT IF NOT EXISTS alter_hdfs_vault - PROPERTIES ( - "type"="HDFS", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "ssb_sf1_p2", - "hadoop.username" = "hadoop" - ); - """ - - sql """ - CREATE TABLE IF NOT EXISTS alter_hdfs_vault_tbl ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = "alter_hdfs_vault" - ) - """ - - sql """ - insert into alter_hdfs_vault_tbl values("1", "1"); - """ - - expectExceptionLike({ - sql """ - ALTER STORAGE VAULT alter_hdfs_vault - PROPERTIES ( - "type"="hdfs", - "path_prefix" = "ssb_sf1_p3" - ); - """ - }, "Alter property") - - expectExceptionLike({ - sql """ - ALTER STORAGE VAULT alter_hdfs_vault - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS" = "ssb_sf1_p3" - ); - """ - }, "Alter property") - - def vault_name = "alter_hdfs_vault" - String properties; - - def vaults_info = try_sql """ - show storage vault - """ - - for (int i = 0; i < vaults_info.size(); i++) { - def name = vaults_info[i][0] - if (name.equals(vault_name)) { - properties = vaults_info[i][2] - } - } - - sql """ - ALTER STORAGE VAULT alter_hdfs_vault - PROPERTIES ( - "type"="hdfs", - "VAULT_NAME" = "alter_hdfs_vault_new_name", - "hadoop.username" = "hdfs" - ); - """ - - def new_vault_name = "alter_hdfs_vault_new_name" - - vaults_info = sql """ - SHOW STORAGE VAULT; - """ - boolean exist = false - - for (int i = 0; i < vaults_info.size(); i++) { - def name = vaults_info[i][0] - logger.info("name is ${name}, info ${vaults_info[i]}") - if (name.equals(vault_name)) { - exist = true - } - if (name.equals(new_vault_name)) { - assertTrue(vaults_info[i][2].contains(""""hadoop.username" = "hdfs""""")) - } - } - assertFalse(exist) - - // failed to insert due to the wrong ak - expectExceptionLike({ - sql """ - insert into alter_hdfs_vault_tbl values("2", "2"); - """ - }, "") -} diff --git a/regression-test/suites/vaults/alter/alter_s3.groovy b/regression-test/suites/vaults/alter/alter_s3.groovy deleted file mode 100644 index 37f9edd0415857..00000000000000 --- a/regression-test/suites/vaults/alter/alter_s3.groovy +++ /dev/null @@ -1,124 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("alter_s3_vault", "nonConcurrent") { - if (!enableStoragevault()) { - logger.info("skip alter s3 storgage vault case") - return - } - - sql """ - CREATE STORAGE VAULT IF NOT EXISTS alter_s3_vault - PROPERTIES ( - "type"="S3", - "s3.endpoint"="${getS3Endpoint()}", - "s3.region" = "${getS3Region()}", - "s3.access_key" = "${getS3AK()}", - "s3.secret_key" = "${getS3SK()}", - "s3.root.path" = "ssb_sf1_p2_s3", - "s3.bucket" = "${getS3BucketName()}", - "s3.external_endpoint" = "", - "provider" = "${getS3Provider()}" - ); - """ - - sql """ - CREATE TABLE IF NOT EXISTS alter_s3_vault_tbl ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = "alter_s3_vault" - ) - """ - - sql """ - insert into alter_s3_vault_tbl values("1", "1"); - """ - - expectExceptionLike({ - sql """ - ALTER STORAGE VAULT alter_s3_vault - PROPERTIES ( - "type"="S3", - "s3.bucket" = "error_bucket" - ); - """ - }, "Alter property") - expectExceptionLike({ - sql """ - ALTER STORAGE VAULT alter_s3_vault - PROPERTIES ( - "type"="S3", - "provider" = "${getS3Provider()}" - ); - """ - }, "Alter property") - - def vault_name = "alter_s3_vault" - String properties; - - def vaults_info = try_sql """ - show storage vault - """ - - for (int i = 0; i < vaults_info.size(); i++) { - def name = vaults_info[i][0] - if (name.equals(vault_name)) { - properties = vaults_info[i][2] - } - } - - sql """ - ALTER STORAGE VAULT alter_s3_vault - PROPERTIES ( - "type"="S3", - "VAULT_NAME" = "alter_s3_vault", - "s3.access_key" = "new_ak" - ); - """ - - def new_vault_name = "alter_s3_vault_new" - - vaults_info = sql """ - SHOW STORAGE VAULT; - """ - boolean exist = false - - for (int i = 0; i < vaults_info.size(); i++) { - def name = vaults_info[i][0] - logger.info("name is ${name}, info ${vaults_info[i]}") - if (name.equals(vault_name)) { - exist = true - } - if (name.equals(new_vault_name)) { - assertTrue(vaults_info[i][2].contains(""""s3.access_key" = "new_ak""""")) - } - } - assertFalse(exist) - - // failed to insert due to the wrong ak - expectExceptionLike({ - sql """ - insert into alter_s3_vault_tbl values("2", "2"); - """ - }, "") - -} diff --git a/regression-test/suites/vaults/default/default.groovy b/regression-test/suites/vaults/default/default.groovy deleted file mode 100644 index 6d3f5e3d3dedf1..00000000000000 --- a/regression-test/suites/vaults/default/default.groovy +++ /dev/null @@ -1,139 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -suite("default_vault", "nonConcurrent") { - if (!enableStoragevault()) { - logger.info("skip create storgage vault case") - return - } - expectExceptionLike({ - sql """ - set not_exist as default storage vault - """ - }, "invalid storage vault name") - - def tableName = "table_use_vault" - - expectExceptionLike({ - sql "DROP TABLE IF EXISTS ${tableName}" - sql """ - CREATE TABLE ${tableName} ( - `key` INT, - value INT - ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 - PROPERTIES ('replication_num' = '1') - """ - }, "supply") - - sql """ - CREATE STORAGE VAULT IF NOT EXISTS create_s3_vault_for_default - PROPERTIES ( - "type"="S3", - "s3.endpoint"="${getS3Endpoint()}", - "s3.region" = "${getS3Region()}", - "s3.access_key" = "${getS3AK()}", - "s3.secret_key" = "${getS3SK()}", - "s3.root.path" = "ssb_sf1_p2_s3", - "s3.bucket" = "${getS3BucketName()}", - "s3.external_endpoint" = "", - "provider" = "${getS3Provider()}", - "set_as_default" = "true" - ); - """ - - def vaults_info = sql """ - show storage vault - """ - - // check if create_s3_vault_for_default is set as default - for (int i = 0; i < vaults_info.size(); i++) { - def name = vaults_info[i][0] - if (name.equals("create_s3_vault_for_default")) { - // isDefault is true - assertEquals(vaults_info[i][3], "true") - } - } - - - sql """ - set built_in_storage_vault as default storage vault - """ - - - sql "DROP TABLE IF EXISTS ${tableName}" - sql """ - CREATE TABLE ${tableName} ( - `key` INT, - value INT - ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 - PROPERTIES ('replication_num' = '1') - """ - - - sql """ - set built_in_storage_vault as default storage vault - """ - - sql """ - CREATE STORAGE VAULT IF NOT EXISTS create_default_hdfs_vault - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "default_vault_ssb_hdfs_vault", - "hadoop.username" = "hadoop" - ); - """ - - sql """ - set create_default_hdfs_vault as default storage vault - """ - - sql "DROP TABLE IF EXISTS ${tableName}" - sql """ - CREATE TABLE ${tableName} ( - `key` INT, - value INT - ) DUPLICATE KEY (`key`) DISTRIBUTED BY HASH (`key`) BUCKETS 1 - PROPERTIES ('replication_num' = '1') - """ - sql """ - insert into ${tableName} values(1, 1); - """ - sql """ - select * from ${tableName}; - """ - - def create_table_stmt = sql """ - show create table ${tableName} - """ - - assertTrue(create_table_stmt[0][1].contains("create_default_hdfs_vault")) - - expectExceptionLike({ - sql """ - alter table ${tableName} set("storage_vault_name" = "built_in_storage_vault"); - """ - }, "You can not modify") - - try { - sql """ - set null as default storage vault - """ - } catch (Exception e) { - } - -} diff --git a/regression-test/suites/vaults/privilege.groovy b/regression-test/suites/vaults/privilege.groovy deleted file mode 100644 index 3225c6a2915f5f..00000000000000 --- a/regression-test/suites/vaults/privilege.groovy +++ /dev/null @@ -1,183 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import java.util.stream.Collectors; - -suite("test_privilege_vault", "nonConcurrent") { - if (!enableStoragevault()) { - logger.info("skip test_privilege_vault case") - return - } - - def vault1 = "test_privilege_vault1" - def table1 = "test_privilege_vault_t1" - def table2 = "test_privilege_vault_t2" - def table3 = "test_privilege_vault_t3" - - sql """ - CREATE STORAGE VAULT IF NOT EXISTS ${vault1} - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "test_vault_privilege" - ); - """ - - def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> row[0]).collect(Collectors.toSet()) - assertTrue(storageVaults.contains(vault1)) - - sql """ - SET ${vault1} AS DEFAULT STORAGE VAULT - """ - sql """ - UNSET DEFAULT STORAGE VAULT - """ - - sql """ - DROP TABLE IF EXISTS ${table1}; - """ - - sql """ - CREATE TABLE IF NOT EXISTS ${table1} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - - def user1 = "test_privilege_vault_user1" - sql """drop user if exists ${user1}""" - sql """create user ${user1} identified by 'Cloud12345'""" - sql """ - GRANT create_priv ON *.*.* TO '${user1}'; - """ - - def vault2 = "test_privilege_vault2" - // Only users with admin role can create storage vault - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - expectExceptionLike({ - sql """ - CREATE STORAGE VAULT IF NOT EXISTS ${vault2} - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${getHmsHdfsFs()}", - "path_prefix" = "test_vault_privilege" - ); - """ - }, "denied") - } - - // Only users with admin role can set/unset default storage vault - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - expectExceptionLike({ - sql """ - SET ${vault1} AS DEFAULT STORAGE VAULT - """ - }, "denied") - } - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - expectExceptionLike({ - sql """ - UNSET DEFAULT STORAGE VAULT - """ - }, "denied") - } - - def result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - sql " SHOW STORAGE VAULT; " - } - assertTrue(result.isEmpty()) - - sql """ - DROP TABLE IF EXISTS ${table2}; - """ - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - expectExceptionLike({ - sql """ - CREATE TABLE IF NOT EXISTS ${table2} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - }, "USAGE denied") - } - - sql """ - GRANT usage_priv ON STORAGE VAULT '${vault1}' TO '${user1}'; - """ - - result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - sql " SHOW STORAGE VAULT; " - } - storageVaults = result.stream().map(row -> row[0]).collect(Collectors.toSet()) - assertTrue(storageVaults.contains(vault1)) - - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - sql """ - CREATE TABLE IF NOT EXISTS ${table2} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - } - - sql """ - REVOKE usage_priv ON STORAGE VAULT '${vault1}' FROM '${user1}'; - """ - - result = connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - sql " SHOW STORAGE VAULT; " - } - assertTrue(result.isEmpty()) - - sql """ - DROP TABLE IF EXISTS ${table3}; - """ - connect(user = user1, password = 'Cloud12345', url = context.config.jdbcUrl) { - expectExceptionLike({ - sql """ - CREATE TABLE IF NOT EXISTS ${table3} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - }, "USAGE denied") - } -} \ No newline at end of file diff --git a/regression-test/suites/vaults/privilege_restart.groovy b/regression-test/suites/vaults/privilege_restart.groovy deleted file mode 100644 index 4e8c8fcc04dade..00000000000000 --- a/regression-test/suites/vaults/privilege_restart.groovy +++ /dev/null @@ -1,178 +0,0 @@ - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import java.util.stream.Collectors; - -// This test suite is intent to test the granted privilege for specific user will -// not disappear -suite("test_privilege_vault_restart", "nonConcurrent") { - if (!enableStoragevault()) { - logger.info("skip test_privilege_vault_restart case") - return - } - - // user1 will be kept before and after running this test in order to check - // the granted vault privilege is persisted well eventhough FE restarts many times - def user1 = "test_privilege_vault_restart_user1" - def passwd = "Cloud12345" - - def vault1 = "test_privilege_vault_restart_vault1" - // this vaule is derived from current file location: regression-test/vaults - def db = "regression_test_vaults" - def table1 = "test_privilege_vault_restart_t1" - def table2 = "test_privilege_vault_restart_t2" - def hdfsLinkWeDontReallyCare = "127.0.0.1:10086" // a dummy link, it doesn't need to work - - //========================================================================== - // prepare the basic vault and tables for further check - //========================================================================== - sql """ - CREATE STORAGE VAULT IF NOT EXISTS ${vault1} - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${hdfsLinkWeDontReallyCare}", - "path_prefix" = "test_vault_privilege_restart" - ); - """ - - def storageVaults = (sql " SHOW STORAGE VAULT; ").stream().map(row -> row[0]).collect(Collectors.toSet()) - logger.info("all vaults: ${storageVaults}") - org.junit.Assert.assertTrue("${vault1} is not present after creating, all vaults: ${storageVaults}", storageVaults.contains(vault1)) - - def allTables = (sql " SHOW tables").stream().map(row -> row[0]).collect(Collectors.toSet()) - logger.info("all tables ${allTables}") - - // table1 is the sign to check if the user1 has been created and granted well - def targetTableExist = allTables.contains(table1) - - if (targetTableExist) { - // the grant procedure at least run once before, user1 has been granted vault1 - logger.info("${user1} has been granted with usage_priv to ${vault1} before") - } else { - logger.info("this is the frist run, or there was a crash during the very first run, ${user1} has not been granted with usage_priv to ${vault1} before") - // create user and grant storage vault and create a table with that vault - sql """drop user if exists ${user1}""" - sql """create user ${user1} identified by '${passwd}'""" - sql """ - GRANT usage_priv ON storage vault ${vault1} TO '${user1}'; - """ - sql """ - GRANT create_priv ON *.*.* TO '${user1}'; - """ - - // ATTN: create table1, if successful, the sign has been set - // there wont be any execuse that user1 misses the privilege to vault1 from now on - sql """ - CREATE TABLE IF NOT EXISTS ${table1} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - } - - //========================================================================== - // check the prepared users and tables - //========================================================================== - def allUsers = (sql " SHOW all grants ").stream().map(row -> row[0]).collect(Collectors.toSet()) - logger.info("all users: ${allUsers}") - def userPresent = !(allUsers.stream().filter(i -> i.contains(user1)).collect(Collectors.toSet()).isEmpty()) - org.junit.Assert.assertTrue("${user1} is not in the priv table ${allUsers}", userPresent) - - allTables = (sql " SHOW tables").stream().map(row -> row[0]).collect(Collectors.toSet()) - logger.info("all tables: ${allTables}") - org.junit.Assert.assertTrue("${table1} is not present, all tables: ${allUsers}", allTables.contains(table1)) - - // Test user privilege, the newly created user cannot create or set default vault - // Only users with admin role can create storage vault - connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - expectExceptionLike({ - sql """ - CREATE STORAGE VAULT IF NOT EXISTS ${vault1} - PROPERTIES ( - "type"="hdfs", - "fs.defaultFS"="${hdfsLinkWeDontReallyCare}", - "path_prefix" = "test_vault_privilege" - ); - """ - }, "denied") - } - // Only users with admin role can set/unset default storage vault - connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - expectExceptionLike({ - sql """ - SET ${vault1} AS DEFAULT STORAGE VAULT - """ - }, "denied") - } - connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - expectExceptionLike({ - sql """ - UNSET DEFAULT STORAGE VAULT - """ - }, "denied") - } - - // user1 should see vault1 - def result = connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - sql " SHOW STORAGE VAULT; " - } - storageVaults = result.stream().map(row -> row[0]).collect(Collectors.toSet()) - org.junit.Assert.assertTrue("${user1} cannot see granted vault ${vault1} in result ${result}", storageVaults.contains(vault1)) - - - //========================================================================== - // to test that user1 has the privilege of vault1 to create new tables - // this is the main test for granted vault privilege after restarting FE - //========================================================================== - sql """ - DROP TABLE IF EXISTS ${table2} force; - """ - connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - sql """ - CREATE TABLE ${table2} ( - C_CUSTKEY INTEGER NOT NULL, - C_NAME INTEGER NOT NULL - ) - DUPLICATE KEY(C_CUSTKEY, C_NAME) - DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 - PROPERTIES ( - "replication_num" = "1", - "storage_vault_name" = ${vault1} - ) - """ - } - - result = connect(user = user1, password = passwd, url = context.config.jdbcUrl) { - sql """use ${db}""" - sql " SHOW create table ${table2}; " - } - logger.info("show create table ${table2}, result ${result}") - org.junit.Assert.assertTrue("missing storage vault properties ${vault1} in table ${table2}", result.toString().contains(vault1)) - -} diff --git a/tools/auto-pick-script.py b/tools/auto-pick-script.py index 2431c76c6235d4..8f5376863d3043 100644 --- a/tools/auto-pick-script.py +++ b/tools/auto-pick-script.py @@ -89,11 +89,11 @@ # Create a new PR for the cherry-picked changes new_pr = repo.create_pull( title=f"{TARGET_BRANCH}: {pr.title}", # Prefix with branch name - body=pr.body, # Keep the original PR body + body=f"PR Body: {pr.body} \n Cherry-picked from #{pr.number}", # Keep the original PR body head=new_branch_name, base=TARGET_BRANCH ) - + new_pr.create_issue_comment("run buildall") print(f"Created a new PR #{new_pr.number} for cherry-picked changes.") else: print(f"Commit {merge_commit_sha} was not found in {new_branch_name} after cherry-picking.") diff --git a/tools/fdb/fdb_ctl.sh b/tools/fdb/fdb_ctl.sh index 09aaaaf3f2a0d8..5a334faae35077 100755 --- a/tools/fdb/fdb_ctl.sh +++ b/tools/fdb/fdb_ctl.sh @@ -81,17 +81,35 @@ function download_fdb() { return fi - local URL="https://github.com/apple/foundationdb/releases/download/${FDB_VERSION}/" - local TMP="${FDB_PKG_DIR}-tmp" - - rm -rf "${TMP}" - mkdir -p "${TMP}" + arch=$(uname -m) + if [[ "${arch}" == "x86_64" ]]; then + local URL="https://github.com/apple/foundationdb/releases/download/${FDB_VERSION}/" + local TMP="${FDB_PKG_DIR}-tmp" + + rm -rf "${TMP}" + mkdir -p "${TMP}" + + wget "${URL}/fdbbackup.x86_64" -O "${TMP}/fdbbackup" + wget "${URL}/fdbserver.x86_64" -O "${TMP}/fdbserver" + wget "${URL}/fdbcli.x86_64" -O "${TMP}/fdbcli" + wget "${URL}/fdbmonitor.x86_64" -O "${TMP}/fdbmonitor" + wget "${URL}/libfdb_c.x86_64.so" -O "${TMP}/libfdb_c.x86_64.so" + elif [[ "${arch}" == "aarch64" ]]; then + local URL="https://doris-build.oss-cn-beijing.aliyuncs.com/thirdparty/fdb/aarch64" + local TMP="${FDB_PKG_DIR}-tmp" + + rm -rf "${TMP}" + mkdir -p "${TMP}" + + wget "${URL}/fdbbackup" -O "${TMP}/fdbbackup" + wget "${URL}/fdbserver" -O "${TMP}/fdbserver" + wget "${URL}/fdbcli" -O "${TMP}/fdbcli" + wget "${URL}/fdbmonitor" -O "${TMP}/fdbmonitor" + wget "${URL}/libfdb_c.aarch64.so" -O "${TMP}/libfdb_c.aarch64.so" + else + echo "Unsupported architecture: ""${arch}" + fi - wget "${URL}/fdbbackup.x86_64" -O "${TMP}/fdbbackup" - wget "${URL}/fdbserver.x86_64" -O "${TMP}/fdbserver" - wget "${URL}/fdbcli.x86_64" -O "${TMP}/fdbcli" - wget "${URL}/fdbmonitor.x86_64" -O "${TMP}/fdbmonitor" - wget "${URL}/libfdb_c.x86_64.so" -O "${TMP}/libfdb_c.x86_64.so" chmod +x "${TMP}"/fdb* mv "${TMP}" "${FDB_PKG_DIR}"