Skip to content

Commit

Permalink
Merge branch 'master' into 20240712_fix_stack
Browse files Browse the repository at this point in the history
  • Loading branch information
xinyiZzz authored Jul 15, 2024
2 parents 748014a + 01f00aa commit 97c8a0c
Show file tree
Hide file tree
Showing 43 changed files with 1,773 additions and 500 deletions.
35 changes: 28 additions & 7 deletions be/src/olap/compaction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -642,9 +642,13 @@ Status Compaction::do_inverted_index_compaction() {
// format: rowsetId_segmentId
std::vector<std::unique_ptr<InvertedIndexFileWriter>> inverted_index_file_writers(
dest_segment_num);
for (int i = 0; i < dest_segment_num; ++i) {

// Some columns have already been indexed
// key: seg_id, value: inverted index file size
std::unordered_map<int, int64_t> compacted_idx_file_size;
for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) {
std::string index_path_prefix {
InvertedIndexDescriptor::get_index_file_path_prefix(ctx.segment_path(i))};
InvertedIndexDescriptor::get_index_file_path_prefix(ctx.segment_path(seg_id))};
auto inverted_index_file_reader = std::make_unique<InvertedIndexFileReader>(
ctx.fs(), index_path_prefix,
_cur_tablet_schema->get_inverted_index_storage_format());
Expand All @@ -654,16 +658,31 @@ Status Compaction::do_inverted_index_compaction() {
if (st.ok()) {
auto index_not_need_to_compact =
DORIS_TRY(inverted_index_file_reader->get_all_directories());
// V1: each index is a separate file
// V2: all indexes are in a single file
if (_cur_tablet_schema->get_inverted_index_storage_format() !=
doris::InvertedIndexStorageFormatPB::V1) {
int64_t fsize = 0;
st = ctx.fs()->file_size(
InvertedIndexDescriptor::get_index_file_path_v2(index_path_prefix), &fsize);
if (!st.ok()) {
LOG(ERROR) << "file size error in index compaction, error:" << st.msg();
return st;
}
compacted_idx_file_size[seg_id] = fsize;
}
auto inverted_index_file_writer = std::make_unique<InvertedIndexFileWriter>(
ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), i,
ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), seg_id,
_cur_tablet_schema->get_inverted_index_storage_format());
RETURN_IF_ERROR(inverted_index_file_writer->initialize(index_not_need_to_compact));
inverted_index_file_writers[i] = std::move(inverted_index_file_writer);
inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer);
} else if (st.is<ErrorCode::INVERTED_INDEX_FILE_NOT_FOUND>()) {
auto inverted_index_file_writer = std::make_unique<InvertedIndexFileWriter>(
ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), i,
ctx.fs(), index_path_prefix, ctx.rowset_id.to_string(), seg_id,
_cur_tablet_schema->get_inverted_index_storage_format());
inverted_index_file_writers[i] = std::move(inverted_index_file_writer);
inverted_index_file_writers[seg_id] = std::move(inverted_index_file_writer);
// no index file
compacted_idx_file_size[seg_id] = 0;
} else {
LOG(ERROR) << "inverted_index_file_reader init failed in index compaction, error:"
<< st;
Expand Down Expand Up @@ -744,11 +763,13 @@ Status Compaction::do_inverted_index_compaction() {
}

uint64_t inverted_index_file_size = 0;
for (auto& inverted_index_file_writer : inverted_index_file_writers) {
for (int seg_id = 0; seg_id < dest_segment_num; ++seg_id) {
auto inverted_index_file_writer = inverted_index_file_writers[seg_id].get();
if (Status st = inverted_index_file_writer->close(); !st.ok()) {
status = Status::Error<INVERTED_INDEX_COMPACTION_ERROR>(st.msg());
} else {
inverted_index_file_size += inverted_index_file_writer->get_index_file_size();
inverted_index_file_size -= compacted_idx_file_size[seg_id];
}
}
// check index compaction status. If status is not ok, we should return error and end this compaction round.
Expand Down
15 changes: 9 additions & 6 deletions be/src/olap/rowset/segment_v2/segment_iterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1925,7 +1925,8 @@ Status SegmentIterator::_read_columns(const std::vector<ColumnId>& column_ids,
}

Status SegmentIterator::_init_current_block(
vectorized::Block* block, std::vector<vectorized::MutableColumnPtr>& current_columns) {
vectorized::Block* block, std::vector<vectorized::MutableColumnPtr>& current_columns,
uint32_t nrows_read_limit) {
block->clear_column_data(_schema->num_column_ids());

for (size_t i = 0; i < _schema->num_column_ids(); i++) {
Expand All @@ -1945,7 +1946,7 @@ Status SegmentIterator::_init_current_block(
column_desc->path() == nullptr ? "" : column_desc->path()->get_path());
// TODO reuse
current_columns[cid] = file_column_type->create_column();
current_columns[cid]->reserve(_opts.block_row_max);
current_columns[cid]->reserve(nrows_read_limit);
} else {
// the column in block must clear() here to insert new data
if (_is_pred_column[cid] ||
Expand All @@ -1964,7 +1965,7 @@ Status SegmentIterator::_init_current_block(
} else if (column_desc->type() == FieldType::OLAP_FIELD_TYPE_DATETIME) {
current_columns[cid]->set_datetime_type();
}
current_columns[cid]->reserve(_opts.block_row_max);
current_columns[cid]->reserve(nrows_read_limit);
}
}
}
Expand Down Expand Up @@ -2378,14 +2379,16 @@ Status SegmentIterator::_next_batch_internal(vectorized::Block* block) {
}
}
}
RETURN_IF_ERROR(_init_current_block(block, _current_return_columns));
_converted_column_ids.assign(_schema->columns().size(), 0);

_current_batch_rows_read = 0;
uint32_t nrows_read_limit = _opts.block_row_max;
if (_can_opt_topn_reads()) {
nrows_read_limit = std::min(static_cast<uint32_t>(_opts.topn_limit), nrows_read_limit);
}

RETURN_IF_ERROR(_init_current_block(block, _current_return_columns, nrows_read_limit));
_converted_column_ids.assign(_schema->columns().size(), 0);

_current_batch_rows_read = 0;
RETURN_IF_ERROR(_read_columns_by_index(
nrows_read_limit, _current_batch_rows_read,
_lazy_materialization_read || _opts.record_rowids || _is_need_expr_eval));
Expand Down
3 changes: 2 additions & 1 deletion be/src/olap/rowset/segment_v2/segment_iterator.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,8 @@ class SegmentIterator : public RowwiseIterator {
bool set_block_rowid);
void _replace_version_col(size_t num_rows);
Status _init_current_block(vectorized::Block* block,
std::vector<vectorized::MutableColumnPtr>& non_pred_vector);
std::vector<vectorized::MutableColumnPtr>& non_pred_vector,
uint32_t nrows_read_limit);
uint16_t _evaluate_vectorization_predicate(uint16_t* sel_rowid_idx, uint16_t selected_size);
uint16_t _evaluate_short_circuit_predicate(uint16_t* sel_rowid_idx, uint16_t selected_size);
void _output_non_pred_columns(vectorized::Block* block);
Expand Down
5 changes: 0 additions & 5 deletions be/src/pipeline/exec/analytic_source_operator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -559,11 +559,6 @@ Status AnalyticLocalState::close(RuntimeState* state) {

std::vector<vectorized::MutableColumnPtr> tmp_result_window_columns;
_result_window_columns.swap(tmp_result_window_columns);
// Some kinds of source operators has a 1-1 relationship with a sink operator (such as AnalyticOperator).
// We must ensure AnalyticSinkOperator will not be blocked if AnalyticSourceOperator already closed.
if (_shared_state && _shared_state->sink_deps.size() == 1) {
_shared_state->sink_deps.front()->set_always_ready();
}
return PipelineXLocalState<AnalyticSharedState>::close(state);
}

Expand Down
6 changes: 5 additions & 1 deletion be/src/pipeline/exec/hashjoin_build_sink.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -567,7 +567,11 @@ Status HashJoinBuildSinkOperatorX::sink(RuntimeState* state, vectorized::Block*
} else if (!local_state._should_build_hash_table) {
DCHECK(_shared_hashtable_controller != nullptr);
DCHECK(_shared_hash_table_context != nullptr);
CHECK(_shared_hash_table_context->signaled);
// the instance which is not build hash table, it's should wait the signal of hash table build finished.
// but if it's running and signaled == false, maybe the source operator have closed caused by some short circuit,
if (!_shared_hash_table_context->signaled) {
return Status::Error<ErrorCode::END_OF_FILE>("source have closed");
}

if (!_shared_hash_table_context->status.ok()) {
return _shared_hash_table_context->status;
Expand Down
115 changes: 71 additions & 44 deletions be/src/pipeline/exec/multi_cast_data_streamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,63 +23,97 @@

namespace doris::pipeline {

MultiCastBlock::MultiCastBlock(vectorized::Block* block, int used_count, size_t mem_size)
: _used_count(used_count), _mem_size(mem_size) {
MultiCastBlock::MultiCastBlock(vectorized::Block* block, int used_count, int un_finish_copy,
size_t mem_size)
: _used_count(used_count), _un_finish_copy(un_finish_copy), _mem_size(mem_size) {
_block = vectorized::Block::create_unique(block->get_columns_with_type_and_name());
block->clear();
}

Status MultiCastDataStreamer::pull(int sender_idx, doris::vectorized::Block* block, bool* eos) {
std::lock_guard l(_mutex);
auto& pos_to_pull = _sender_pos_to_read[sender_idx];
if (pos_to_pull != _multi_cast_blocks.end()) {
if (pos_to_pull->_used_count == 1) {
DCHECK(pos_to_pull == _multi_cast_blocks.begin());
pos_to_pull->_block->swap(*block);

_cumulative_mem_size -= pos_to_pull->_mem_size;
pos_to_pull++;
_multi_cast_blocks.pop_front();
} else {
pos_to_pull->_block->create_same_struct_block(0)->swap(*block);
RETURN_IF_ERROR(vectorized::MutableBlock(block).merge(*pos_to_pull->_block));
pos_to_pull->_used_count--;
pos_to_pull++;
int* un_finish_copy = nullptr;
int use_count = 0;
{
std::lock_guard l(_mutex);
auto& pos_to_pull = _sender_pos_to_read[sender_idx];
const auto end = _multi_cast_blocks.end();
DCHECK(pos_to_pull != end);

*block = *pos_to_pull->_block;

_cumulative_mem_size -= pos_to_pull->_mem_size;

pos_to_pull->_used_count--;
use_count = pos_to_pull->_used_count;
un_finish_copy = &pos_to_pull->_un_finish_copy;

pos_to_pull++;

if (pos_to_pull == end) {
_block_reading(sender_idx);
}

*eos = _eos and pos_to_pull == end;
}
*eos = _eos and pos_to_pull == _multi_cast_blocks.end();
if (pos_to_pull == _multi_cast_blocks.end()) {
_block_reading(sender_idx);

if (use_count == 0) {
// will clear _multi_cast_blocks
_wait_copy_block(block, *un_finish_copy);
} else {
_copy_block(block, *un_finish_copy);
}

return Status::OK();
}

void MultiCastDataStreamer::_copy_block(vectorized::Block* block, int& un_finish_copy) {
const auto rows = block->rows();
for (int i = 0; i < block->columns(); ++i) {
block->get_by_position(i).column = block->get_by_position(i).column->clone_resized(rows);
}

std::unique_lock l(_mutex);
un_finish_copy--;
if (un_finish_copy == 0) {
l.unlock();
_cv.notify_one();
}
}

void MultiCastDataStreamer::_wait_copy_block(vectorized::Block* block, int& un_finish_copy) {
std::unique_lock l(_mutex);
_cv.wait(l, [&]() { return un_finish_copy == 0; });
_multi_cast_blocks.pop_front();
}

Status MultiCastDataStreamer::push(RuntimeState* state, doris::vectorized::Block* block, bool eos) {
auto rows = block->rows();
COUNTER_UPDATE(_process_rows, rows);

auto block_mem_size = block->allocated_bytes();
std::lock_guard l(_mutex);
int need_process_count = _cast_sender_count - _closed_sender_count;
if (need_process_count == 0) {
return Status::EndOfFile("All data streamer is EOF");
}
// TODO: if the [queue back block rows + block->rows()] < batch_size, better
// do merge block. but need check the need_process_count and used_count whether
// equal
_multi_cast_blocks.emplace_back(block, need_process_count, block_mem_size);
const auto block_mem_size = block->allocated_bytes();
_cumulative_mem_size += block_mem_size;
COUNTER_SET(_peak_mem_usage, std::max(_cumulative_mem_size, _peak_mem_usage->value()));

auto end = _multi_cast_blocks.end();
end--;
for (int i = 0; i < _sender_pos_to_read.size(); ++i) {
if (_sender_pos_to_read[i] == _multi_cast_blocks.end()) {
_sender_pos_to_read[i] = end;
_set_ready_for_read(i);
{
std::lock_guard l(_mutex);
_multi_cast_blocks.emplace_back(block, _cast_sender_count, _cast_sender_count - 1,
block_mem_size);
// last elem
auto end = std::prev(_multi_cast_blocks.end());
for (int i = 0; i < _sender_pos_to_read.size(); ++i) {
if (_sender_pos_to_read[i] == _multi_cast_blocks.end()) {
_sender_pos_to_read[i] = end;
_set_ready_for_read(i);
}
}
_eos = eos;
}

if (_eos) {
for (auto* read_dep : _dependencies) {
read_dep->set_always_ready();
}
}
_eos = eos;
return Status::OK();
}

Expand All @@ -92,13 +126,6 @@ void MultiCastDataStreamer::_set_ready_for_read(int sender_idx) {
dep->set_ready();
}

void MultiCastDataStreamer::_set_ready_for_read() {
for (auto* dep : _dependencies) {
DCHECK(dep);
dep->set_ready();
}
}

void MultiCastDataStreamer::_block_reading(int sender_idx) {
if (_dependencies.empty()) {
return;
Expand Down
16 changes: 7 additions & 9 deletions be/src/pipeline/exec/multi_cast_data_streamer.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@ namespace doris::pipeline {

class Dependency;
struct MultiCastBlock {
MultiCastBlock(vectorized::Block* block, int used_count, size_t mem_size);
MultiCastBlock(vectorized::Block* block, int used_count, int need_copy, size_t mem_size);

std::unique_ptr<vectorized::Block> _block;
int _used_count;
int _un_finish_copy;
size_t _mem_size;
};

Expand Down Expand Up @@ -58,30 +59,27 @@ class MultiCastDataStreamer {

RuntimeProfile* profile() { return _profile; }

void set_eos() {
std::lock_guard l(_mutex);
_eos = true;
_set_ready_for_read();
}

void set_dep_by_sender_idx(int sender_idx, Dependency* dep) {
_dependencies[sender_idx] = dep;
_block_reading(sender_idx);
}

private:
void _set_ready_for_read(int sender_idx);
void _set_ready_for_read();
void _block_reading(int sender_idx);

void _copy_block(vectorized::Block* block, int& un_finish_copy);

void _wait_copy_block(vectorized::Block* block, int& un_finish_copy);

const RowDescriptor& _row_desc;
RuntimeProfile* _profile = nullptr;
std::list<MultiCastBlock> _multi_cast_blocks;
std::vector<std::list<MultiCastBlock>::iterator> _sender_pos_to_read;
std::condition_variable _cv;
std::mutex _mutex;
bool _eos = false;
int _cast_sender_count = 0;
int _closed_sender_count = 0;
int64_t _cumulative_mem_size = 0;

RuntimeProfile::Counter* _process_rows = nullptr;
Expand Down
5 changes: 5 additions & 0 deletions be/src/pipeline/exec/operator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,11 @@ Status PipelineXLocalState<SharedStateArg>::close(RuntimeState* state) {
_peak_memory_usage_counter->set(_mem_tracker->peak_consumption());
}
_closed = true;
// Some kinds of source operators has a 1-1 relationship with a sink operator (such as AnalyticOperator).
// We must ensure AnalyticSinkOperator will not be blocked if AnalyticSourceOperator already closed.
if (_shared_state && _shared_state->sink_deps.size() == 1) {
_shared_state->sink_deps.front()->set_always_ready();
}
return Status::OK();
}

Expand Down
8 changes: 5 additions & 3 deletions be/src/util/bitmap_value.h
Original file line number Diff line number Diff line change
Expand Up @@ -1252,8 +1252,7 @@ class BitmapValue {
std::vector<const detail::Roaring64Map*> bitmaps;
std::vector<uint64_t> single_values;
std::vector<const SetContainer<uint64_t>*> sets;
for (int i = 0; i < values.size(); ++i) {
auto* value = values[i];
for (const auto* value : values) {
switch (value->_type) {
case EMPTY:
break;
Expand All @@ -1280,7 +1279,9 @@ class BitmapValue {
_bitmap->add(_sv);
break;
case BITMAP:
*_bitmap |= detail::Roaring64Map::fastunion(bitmaps.size(), bitmaps.data());
for (const auto* bitmap : bitmaps) {
*_bitmap |= *bitmap;
}
break;
case SET: {
*_bitmap = detail::Roaring64Map::fastunion(bitmaps.size(), bitmaps.data());
Expand Down Expand Up @@ -1315,6 +1316,7 @@ class BitmapValue {
_bitmap->add(v);
}
_type = BITMAP;
_set.clear();
break;
case SET: {
break;
Expand Down
2 changes: 1 addition & 1 deletion be/src/vec/exprs/vexpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ std::string VExpr::gen_predicate_result_sign(Block& block, const ColumnNumbers&
std::string column_name = block.get_by_position(arguments[0]).name;
pred_result_sign +=
BeConsts::BLOCK_TEMP_COLUMN_PREFIX + column_name + "_" + function_name + "_";
if (function_name == "in") {
if (function_name == "in" || function_name == "not_in") {
// Generating 'result_sign' from 'inlist' requires sorting the values.
std::set<std::string> values;
for (size_t i = 1; i < arguments.size(); i++) {
Expand Down
Loading

0 comments on commit 97c8a0c

Please sign in to comment.