Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update vendored sources #20

Merged
merged 2 commits into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists.txt.in
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ include_directories(${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2})

include_directories(${INCLUDE_FILES})
add_definitions(${DEFINES})
add_definitions(-DDUCKDB_EXTENSION_AUTOLOAD_DEFAULT=1 -DDUCKDB_EXTENSION_AUTOINSTALL_DEFAULT=1)

file(GLOB_RECURSE JAVA_SRC_FILES src/main/java/org/duckdb/*.java)
file(GLOB_RECURSE JAVA_TEST_FILES src/test/java/org/duckdb/*.java)
Expand Down
8 changes: 7 additions & 1 deletion src/duckdb/extension/parquet/parquet_extension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,13 @@ class ParquetScanFunction {
bool require_extra_columns =
result->multi_file_reader_state && result->multi_file_reader_state->RequiresExtraColumns();
if (input.CanRemoveFilterColumns() || require_extra_columns) {
result->projection_ids = input.projection_ids;
if (!input.projection_ids.empty()) {
result->projection_ids = input.projection_ids;
} else {
result->projection_ids.resize(input.column_ids.size());
iota(begin(result->projection_ids), end(result->projection_ids), 0);
}

const auto table_types = bind_data.types;
for (const auto &col_idx : input.column_ids) {
if (IsRowIdColumnId(col_idx)) {
Expand Down
4 changes: 4 additions & 0 deletions src/duckdb/src/catalog/dependency_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,10 @@ void DependencyManager::AlterObject(CatalogTransaction transaction, CatalogEntry
disallow_alter = false;
break;
}
case AlterTableType::ADD_COLUMN: {
disallow_alter = false;
break;
}
default:
break;
}
Expand Down
29 changes: 29 additions & 0 deletions src/duckdb/src/common/enum_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "duckdb/common/enums/catalog_lookup_behavior.hpp"
#include "duckdb/common/enums/catalog_type.hpp"
#include "duckdb/common/enums/compression_type.hpp"
#include "duckdb/common/enums/copy_overwrite_mode.hpp"
#include "duckdb/common/enums/cte_materialize.hpp"
#include "duckdb/common/enums/date_part_specifier.hpp"
#include "duckdb/common/enums/debug_initialize.hpp"
Expand Down Expand Up @@ -1306,6 +1307,34 @@ ConstraintType EnumUtil::FromString<ConstraintType>(const char *value) {
throw NotImplementedException(StringUtil::Format("Enum value: '%s' not implemented", value));
}

template<>
const char* EnumUtil::ToChars<CopyOverwriteMode>(CopyOverwriteMode value) {
switch(value) {
case CopyOverwriteMode::COPY_ERROR_ON_CONFLICT:
return "COPY_ERROR_ON_CONFLICT";
case CopyOverwriteMode::COPY_OVERWRITE:
return "COPY_OVERWRITE";
case CopyOverwriteMode::COPY_OVERWRITE_OR_IGNORE:
return "COPY_OVERWRITE_OR_IGNORE";
default:
throw NotImplementedException(StringUtil::Format("Enum value: '%d' not implemented", value));
}
}

template<>
CopyOverwriteMode EnumUtil::FromString<CopyOverwriteMode>(const char *value) {
if (StringUtil::Equals(value, "COPY_ERROR_ON_CONFLICT")) {
return CopyOverwriteMode::COPY_ERROR_ON_CONFLICT;
}
if (StringUtil::Equals(value, "COPY_OVERWRITE")) {
return CopyOverwriteMode::COPY_OVERWRITE;
}
if (StringUtil::Equals(value, "COPY_OVERWRITE_OR_IGNORE")) {
return CopyOverwriteMode::COPY_OVERWRITE_OR_IGNORE;
}
throw NotImplementedException(StringUtil::Format("Enum value: '%s' not implemented", value));
}

template<>
const char* EnumUtil::ToChars<DataFileType>(DataFileType value) {
switch(value) {
Expand Down
7 changes: 5 additions & 2 deletions src/duckdb/src/common/error_data.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "duckdb/common/error_data.hpp"
#include "duckdb/common/exception.hpp"

#include "duckdb/common/exception.hpp"
#include "duckdb/common/string_util.hpp"
#include "duckdb/common/to_string.hpp"
#include "duckdb/common/types.hpp"
Expand Down Expand Up @@ -50,7 +50,10 @@ ErrorData::ErrorData(const string &message) : initialized(true), type(ExceptionT

const string &ErrorData::Message() {
if (final_message.empty()) {
final_message = Exception::ExceptionTypeToString(type) + " Error: " + raw_message;
if (type != ExceptionType::UNKNOWN_TYPE) {
final_message = Exception::ExceptionTypeToString(type) + " ";
}
final_message += "Error: " + raw_message;
if (type == ExceptionType::INTERNAL) {
final_message += "\nThis error signals an assertion failure within DuckDB. This usually occurs due to "
"unexpected conditions or errors in the program's logic.\nFor more information, see "
Expand Down
5 changes: 5 additions & 0 deletions src/duckdb/src/common/local_file_system.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -485,9 +485,14 @@ int64_t LocalFileSystem::Write(FileHandle &handle, void *buffer, int64_t nr_byte

bool LocalFileSystem::Trim(FileHandle &handle, idx_t offset_bytes, idx_t length_bytes) {
#if defined(__linux__)
// FALLOC_FL_PUNCH_HOLE requires glibc 2.18 or up
#if __GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 18)
return false;
#else
int fd = handle.Cast<UnixFileHandle>().fd;
int res = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset_bytes, length_bytes);
return res == 0;
#endif
#else
return false;
#endif
Expand Down
3 changes: 3 additions & 0 deletions src/duckdb/src/common/multi_file_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@

namespace duckdb {

MultiFileReaderGlobalState::~MultiFileReaderGlobalState() {
}

MultiFileReader::~MultiFileReader() {
}

Expand Down
2 changes: 1 addition & 1 deletion src/duckdb/src/common/printer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ idx_t Printer::TerminalWidth() {
#ifndef DUCKDB_DISABLE_PRINT
#ifdef DUCKDB_WINDOWS
CONSOLE_SCREEN_BUFFER_INFO csbi;
int columns, rows;
int rows;

GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
rows = csbi.srWindow.Right - csbi.srWindow.Left + 1;
Expand Down
2 changes: 1 addition & 1 deletion src/duckdb/src/common/string_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ bool StringUtil::CIEquals(const string &l1, const string &l2) {
bool StringUtil::CILessThan(const string &s1, const string &s2) {
const auto charmap = UpperFun::ASCII_TO_UPPER_MAP;

unsigned char u1, u2;
unsigned char u1 {}, u2 {};

idx_t length = MinValue<idx_t>(s1.length(), s2.length());
length += s1.length() != s2.length();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,16 +181,25 @@ unique_ptr<FunctionData> BindApproxQuantile(ClientContext &context, AggregateFun
throw BinderException("APPROXIMATE QUANTILE can only take constant quantile parameters");
}
Value quantile_val = ExpressionExecutor::EvaluateScalar(context, *arguments[1]);
if (quantile_val.IsNull()) {
throw BinderException("APPROXIMATE QUANTILE parameter list cannot be NULL");
}

vector<float> quantiles;
if (quantile_val.type().id() != LogicalTypeId::LIST) {
quantiles.push_back(CheckApproxQuantile(quantile_val));
} else if (quantile_val.IsNull()) {
throw BinderException("APPROXIMATE QUANTILE parameter list cannot be NULL");
} else {
switch (quantile_val.type().id()) {
case LogicalTypeId::LIST:
for (const auto &element_val : ListValue::GetChildren(quantile_val)) {
quantiles.push_back(CheckApproxQuantile(element_val));
}
break;
case LogicalTypeId::ARRAY:
for (const auto &element_val : ArrayValue::GetChildren(quantile_val)) {
quantiles.push_back(CheckApproxQuantile(element_val));
}
break;
default:
quantiles.push_back(CheckApproxQuantile(quantile_val));
break;
}

// remove the quantile argument so we can use the unary aggregate
Expand Down
14 changes: 11 additions & 3 deletions src/duckdb/src/core_functions/aggregate/holistic/quantile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1509,12 +1509,20 @@ unique_ptr<FunctionData> BindQuantile(ClientContext &context, AggregateFunction
throw BinderException("QUANTILE argument must not be NULL");
}
vector<Value> quantiles;
if (quantile_val.type().id() != LogicalTypeId::LIST) {
quantiles.push_back(CheckQuantile(quantile_val));
} else {
switch (quantile_val.type().id()) {
case LogicalTypeId::LIST:
for (const auto &element_val : ListValue::GetChildren(quantile_val)) {
quantiles.push_back(CheckQuantile(element_val));
}
break;
case LogicalTypeId::ARRAY:
for (const auto &element_val : ArrayValue::GetChildren(quantile_val)) {
quantiles.push_back(CheckQuantile(element_val));
}
break;
default:
quantiles.push_back(CheckQuantile(quantile_val));
break;
}

Function::EraseArgument(function, arguments, arguments.size() - 1);
Expand Down
4 changes: 2 additions & 2 deletions src/duckdb/src/execution/operator/helper/physical_load.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ static void InstallFromRepository(ClientContext &context, const LoadInfo &info)
}

ExtensionHelper::InstallExtension(context, info.filename, info.load_type == LoadType::FORCE_INSTALL, repository,
info.version);
true, info.version);
}

SourceResultType PhysicalLoad::GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const {
if (info->load_type == LoadType::INSTALL || info->load_type == LoadType::FORCE_INSTALL) {
if (info->repository.empty()) {
ExtensionHelper::InstallExtension(context.client, info->filename,
info->load_type == LoadType::FORCE_INSTALL, nullptr, info->version);
info->load_type == LoadType::FORCE_INSTALL, nullptr, true, info->version);
} else {
InstallFromRepository(context.client, *info);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,10 @@ SourceResultType PhysicalCopyDatabase::GetData(ExecutionContext &context, DataCh
catalog.CreateTable(context.client, *bound_info);
break;
}
case CatalogType::INDEX_ENTRY:
default:
throw InternalException("Entry type not supported in PhysicalCopyDatabase");
throw NotImplementedException("Entry type %s not supported in PhysicalCopyDatabase",
CatalogTypeToString(create_info->type));
}
}
return SourceResultType::FINISHED;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,12 +228,16 @@ unique_ptr<LocalSinkState> PhysicalCopyToFile::GetLocalSinkState(ExecutionContex
return std::move(res);
}

void CheckDirectory(FileSystem &fs, const string &file_path, bool overwrite) {
if (fs.IsRemoteFile(file_path) && overwrite) {
// we only remove files for local file systems
// as remote file systems (e.g. S3) do not support RemoveFile
void CheckDirectory(FileSystem &fs, const string &file_path, CopyOverwriteMode overwrite_mode) {
if (overwrite_mode == CopyOverwriteMode::COPY_OVERWRITE_OR_IGNORE) {
// with overwrite or ignore we fully ignore the presence of any files instead of erasing them
return;
}
if (fs.IsRemoteFile(file_path) && overwrite_mode == CopyOverwriteMode::COPY_OVERWRITE) {
// we can only remove files for local file systems currently
// as remote file systems (e.g. S3) do not support RemoveFile
throw NotImplementedException("OVERWRITE is not supported for remote file systems");
}
vector<string> file_list;
vector<string> directory_list;
directory_list.push_back(file_path);
Expand All @@ -251,13 +255,12 @@ void CheckDirectory(FileSystem &fs, const string &file_path, bool overwrite) {
if (file_list.empty()) {
return;
}
if (overwrite) {
if (overwrite_mode == CopyOverwriteMode::COPY_OVERWRITE) {
for (auto &file : file_list) {
fs.RemoveFile(file);
}
} else {
throw IOException("Directory \"%s\" is not empty! Enable OVERWRITE_OR_IGNORE option to force writing",
file_path);
throw IOException("Directory \"%s\" is not empty! Enable OVERWRITE option to overwrite files", file_path);
}
}

Expand All @@ -272,11 +275,11 @@ unique_ptr<GlobalSinkState> PhysicalCopyToFile::GetGlobalSinkState(ClientContext
throw IOException("Cannot write to \"%s\" - it exists and is a file, not a directory!", file_path);
} else {
// for local files we can remove the file if OVERWRITE_OR_IGNORE is enabled
if (overwrite_or_ignore) {
if (overwrite_mode == CopyOverwriteMode::COPY_OVERWRITE) {
fs.RemoveFile(file_path);
} else {
throw IOException("Cannot write to \"%s\" - it exists and is a file, not a directory! Enable "
"OVERWRITE_OR_IGNORE option to force writing",
"OVERWRITE option to overwrite the file",
file_path);
}
}
Expand All @@ -285,7 +288,7 @@ unique_ptr<GlobalSinkState> PhysicalCopyToFile::GetGlobalSinkState(ClientContext
if (!fs.DirectoryExists(file_path)) {
fs.CreateDirectory(file_path);
} else {
CheckDirectory(fs, file_path, overwrite_or_ignore);
CheckDirectory(fs, file_path, overwrite_mode);
}

auto state = make_uniq<CopyToFunctionGlobalState>(nullptr);
Expand Down
4 changes: 2 additions & 2 deletions src/duckdb/src/execution/physical_plan/plan_copy_to_file.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ unique_ptr<PhysicalOperator> PhysicalPlanGenerator::CreatePlan(LogicalCopyToFile
op.file_path = fs.JoinPath(path, "tmp_" + base);
}
if (op.per_thread_output || op.file_size_bytes.IsValid() || op.partition_output || !op.partition_columns.empty() ||
op.overwrite_or_ignore) {
op.overwrite_mode != CopyOverwriteMode::COPY_ERROR_ON_CONFLICT) {
// hive-partitioning/per-thread output does not care about insertion order, and does not support batch indexes
preserve_insertion_order = false;
supports_batch_index = false;
Expand All @@ -42,7 +42,7 @@ unique_ptr<PhysicalOperator> PhysicalPlanGenerator::CreatePlan(LogicalCopyToFile
auto copy = make_uniq<PhysicalCopyToFile>(op.types, op.function, std::move(op.bind_data), op.estimated_cardinality);
copy->file_path = op.file_path;
copy->use_tmp_file = op.use_tmp_file;
copy->overwrite_or_ignore = op.overwrite_or_ignore;
copy->overwrite_mode = op.overwrite_mode;
copy->filename_pattern = op.filename_pattern;
copy->file_extension = op.file_extension;
copy->per_thread_output = op.per_thread_output;
Expand Down
5 changes: 2 additions & 3 deletions src/duckdb/src/function/table/system/duckdb_extensions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ unique_ptr<GlobalTableFunctionState> DuckDBExtensionsInit(ClientContext &context
if (entry == installed_extensions.end()) {
installed_extensions[info.name] = std::move(info);
} else {
if (!entry->second.loaded) {
if (entry->second.install_mode != ExtensionInstallMode::STATICALLY_LINKED) {
entry->second.file_path = info.file_path;
entry->second.install_mode = info.install_mode;
entry->second.installed_from = info.installed_from;
Expand All @@ -144,13 +144,12 @@ unique_ptr<GlobalTableFunctionState> DuckDBExtensionsInit(ClientContext &context
auto &ext_info = e.second;
auto entry = installed_extensions.find(ext_name);
if (entry == installed_extensions.end() || !entry->second.installed) {
ExtensionInformation info;
ExtensionInformation &info = installed_extensions[ext_name];
info.name = ext_name;
info.loaded = true;
info.extension_version = ext_info.version;
info.installed = ext_info.mode == ExtensionInstallMode::STATICALLY_LINKED;
info.install_mode = ext_info.mode;
installed_extensions[ext_name] = std::move(info);
} else {
entry->second.loaded = true;
entry->second.extension_version = ext_info.version;
Expand Down
6 changes: 3 additions & 3 deletions src/duckdb/src/function/table/version/pragma_version.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#ifndef DUCKDB_PATCH_VERSION
#define DUCKDB_PATCH_VERSION "3"
#define DUCKDB_PATCH_VERSION "4-dev105"
#endif
#ifndef DUCKDB_MINOR_VERSION
#define DUCKDB_MINOR_VERSION 10
Expand All @@ -8,10 +8,10 @@
#define DUCKDB_MAJOR_VERSION 0
#endif
#ifndef DUCKDB_VERSION
#define DUCKDB_VERSION "v0.10.3"
#define DUCKDB_VERSION "v0.10.4-dev105"
#endif
#ifndef DUCKDB_SOURCE_ID
#define DUCKDB_SOURCE_ID "70fd6a8a24"
#define DUCKDB_SOURCE_ID "1f98600c2c"
#endif
#include "duckdb/function/table/system_functions.hpp"
#include "duckdb/main/database.hpp"
Expand Down
Loading
Loading