diff --git a/.github/workflows/kvrocks.yaml b/.github/workflows/kvrocks.yaml index b7767575574..affa0a385fa 100644 --- a/.github/workflows/kvrocks.yaml +++ b/.github/workflows/kvrocks.yaml @@ -292,7 +292,7 @@ jobs: GOCASE_RUN_ARGS="" if [[ -n "${{ matrix.with_openssl }}" ]] && [[ "${{ matrix.os }}" == ubuntu* ]]; then git clone https://github.com/jsha/minica - cd minica && go build && cd .. + cd minica && git checkout 96a5c93723cf3d34b50b3e723a9f05cd3765bc67 && go build && cd .. ./minica/minica --domains localhost cp localhost/cert.pem tests/gocase/tls/cert/server.crt cp localhost/key.pem tests/gocase/tls/cert/server.key @@ -376,7 +376,7 @@ jobs: name: Check Docker image needs: [precondition, check-and-lint, check-typos] if: ${{ needs.precondition.outputs.docs_only != 'true' }} - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - name: Get core numbers diff --git a/NOTICE b/NOTICE index 8488d466c93..7e674d153d6 100644 --- a/NOTICE +++ b/NOTICE @@ -6,6 +6,10 @@ The Apache Software Foundation (http://www.apache.org/). ================================================================ +Thanks to designers Lingyu Tian and Shili Fan for contributing the logo of Kvrocks. + +================================================================ + This product includes a number of Dependencies with separate copyright notices and license terms. Your use of these submodules is subject to the terms and conditions of the following licenses. diff --git a/README.md b/README.md index f07c66b0ec4..e33cdabb29a 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,6 @@ Kvrocks has the following key features: * High Availability: Support Redis sentinel to failover when master or slave was failed. * Cluster: Centralized management but accessible via any Redis cluster client. -Thanks to designers [Lingyu Tian](https://github.com/tianlingyu1997) and Shili Fan for contributing the logo of Kvrocks. - ## Who uses Kvrocks You can find Kvrocks users at [the Users page](https://kvrocks.apache.org/users/). @@ -185,41 +183,6 @@ Documents are hosted at the [official website](https://kvrocks.apache.org/docs/g Kvrocks community welcomes all forms of contribution and you can find out how to get involved on the [Community](https://kvrocks.apache.org/community/) and [How to Contribute](https://kvrocks.apache.org/community/contributing) pages. -## Performance - -### Hardware - -* CPU: 48 cores Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz -* Memory: 32 GiB -* NET: Intel Corporation I350 Gigabit Network Connection -* DISK: 2TB NVMe Intel SSD DC P4600 - -> Benchmark Client: multi-thread redis-benchmark(unstable branch) - -### 1. Commands QPS - -> kvrocks: workers = 16, benchmark: 8 threads/ 512 conns / 128 payload - -latency: 99.9% < 10ms - -![image](assets/chart-commands.png) - -### 2. QPS on different payloads - -> kvrocks: workers = 16, benchmark: 8 threads/ 512 conns - -latency: 99.9% < 10ms - -![image](assets/chart-values.png) - -### 3. QPS on different workers - -> kvrocks: workers = 16, benchmark: 8 threads/ 512 conns / 128 payload - -latency: 99.9% < 10ms - -![image](assets/chart-threads.png) - ## License Apache Kvrocks is licensed under the Apache License Version 2.0. See the [LICENSE](LICENSE) file for details. diff --git a/assets/KQIR.png b/assets/KQIR.png new file mode 100644 index 00000000000..e0a605d928a Binary files /dev/null and b/assets/KQIR.png differ diff --git a/assets/chart-commands.png b/assets/chart-commands.png deleted file mode 100644 index d9a250a633f..00000000000 Binary files a/assets/chart-commands.png and /dev/null differ diff --git a/assets/chart-threads.png b/assets/chart-threads.png deleted file mode 100644 index 591263664ed..00000000000 Binary files a/assets/chart-threads.png and /dev/null differ diff --git a/assets/chart-values.png b/assets/chart-values.png deleted file mode 100644 index f7e0d708f20..00000000000 Binary files a/assets/chart-values.png and /dev/null differ diff --git a/cmake/jsoncons.cmake b/cmake/jsoncons.cmake index 02024fa89b1..bdb5ee2d9fb 100644 --- a/cmake/jsoncons.cmake +++ b/cmake/jsoncons.cmake @@ -20,8 +20,8 @@ include_guard() include(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(jsoncons - danielaparker/jsoncons v0.174.0 - MD5=1e620831477adbed19e85248c33cbb89 + danielaparker/jsoncons v0.175.0 + MD5=1ee4a655719dc3333b5c1fbf5a6e9321 ) FetchContent_MakeAvailableWithArgs(jsoncons diff --git a/src/cluster/cluster.cc b/src/cluster/cluster.cc index 8bdf9095b0e..d080ed34452 100644 --- a/src/cluster/cluster.cc +++ b/src/cluster/cluster.cc @@ -22,9 +22,11 @@ #include +#include #include #include #include +#include #include "cluster/cluster_defs.h" #include "commands/commander.h" @@ -37,11 +39,11 @@ #include "time_util.h" ClusterNode::ClusterNode(std::string id, std::string host, int port, int role, std::string master_id, - std::bitset slots) + const std::bitset &slots) : id(std::move(id)), host(std::move(host)), port(port), role(role), master_id(std::move(master_id)), slots(slots) {} Cluster::Cluster(Server *srv, std::vector binds, int port) - : srv_(srv), binds_(std::move(binds)), port_(port), size_(0), version_(-1), myself_(nullptr) { + : srv_(srv), binds_(std::move(binds)), port_(port) { for (auto &slots_node : slots_nodes_) { slots_node = nullptr; } @@ -53,10 +55,10 @@ Cluster::Cluster(Server *srv, std::vector binds, int port) // cluster data, so these commands should be executed exclusively, and ReadWriteLock // also can guarantee accessing data is safe. bool Cluster::SubCommandIsExecExclusive(const std::string &subcommand) { - for (auto v : {"setnodes", "setnodeid", "setslot", "import", "reset"}) { - if (util::EqualICase(v, subcommand)) return true; - } - return false; + std::array subcommands = {"setnodes", "setnodeid", "setslot", "import", "reset"}; + + return std::any_of(std::begin(subcommands), std::end(subcommands), + [&subcommand](const std::string &val) { return util::EqualICase(val, subcommand); }); } Status Cluster::SetNodeId(const std::string &node_id) { @@ -170,26 +172,26 @@ Status Cluster::SetClusterNodes(const std::string &nodes_str, int64_t version, b size_ = 0; // Update slots to nodes - for (const auto &n : slots_nodes) { - slots_nodes_[n.first] = nodes_[n.second]; + for (const auto &[slot, node_id] : slots_nodes) { + slots_nodes_[slot] = nodes_[node_id]; } // Update replicas info and size - for (auto &n : nodes_) { - if (n.second->role == kClusterSlave) { - if (nodes_.find(n.second->master_id) != nodes_.end()) { - nodes_[n.second->master_id]->replicas.push_back(n.first); + for (const auto &[node_id, node] : nodes_) { + if (node->role == kClusterSlave) { + if (nodes_.find(node->master_id) != nodes_.end()) { + nodes_[node->master_id]->replicas.push_back(node_id); } } - if (n.second->role == kClusterMaster && n.second->slots.count() > 0) { + if (node->role == kClusterMaster && node->slots.count() > 0) { size_++; } } if (myid_.empty() || force) { - for (auto &n : nodes_) { - if (n.second->port == port_ && util::MatchListeningIP(binds_, n.second->host)) { - myid_ = n.first; + for (const auto &[node_id, node] : nodes_) { + if (node->port == port_ && util::MatchListeningIP(binds_, node->host)) { + myid_ = node_id; break; } } @@ -210,9 +212,9 @@ Status Cluster::SetClusterNodes(const std::string &nodes_str, int64_t version, b // Clear data of migrated slots if (!migrated_slots_.empty()) { - for (auto &it : migrated_slots_) { - if (slots_nodes_[it.first] != myself_) { - auto s = srv_->slot_migrator->ClearKeysOfSlot(kDefaultNamespace, it.first); + for (const auto &[slot, _] : migrated_slots_) { + if (slots_nodes_[slot] != myself_) { + auto s = srv_->slot_migrator->ClearKeysOfSlot(kDefaultNamespace, slot); if (!s.ok()) { LOG(ERROR) << "failed to clear data of migrated slots: " << s.ToString(); } @@ -521,34 +523,32 @@ std::string Cluster::genNodesDescription() { auto now = util::GetTimeStampMS(); std::string nodes_desc; - for (const auto &item : nodes_) { - const std::shared_ptr n = item.second; - + for (const auto &[_, node] : nodes_) { std::string node_str; // ID, host, port - node_str.append(n->id + " "); - node_str.append(fmt::format("{}:{}@{} ", n->host, n->port, n->port + kClusterPortIncr)); + node_str.append(node->id + " "); + node_str.append(fmt::format("{}:{}@{} ", node->host, node->port, node->port + kClusterPortIncr)); // Flags - if (n->id == myid_) node_str.append("myself,"); - if (n->role == kClusterMaster) { + if (node->id == myid_) node_str.append("myself,"); + if (node->role == kClusterMaster) { node_str.append("master - "); } else { - node_str.append("slave " + n->master_id + " "); + node_str.append("slave " + node->master_id + " "); } // Ping sent, pong received, config epoch, link status node_str.append(fmt::format("{} {} {} connected", now - 1, now, version_)); - if (n->role == kClusterMaster) { - auto iter = slots_infos.find(n->id); - if (iter != slots_infos.end() && iter->second.size() > 0) { + if (node->role == kClusterMaster) { + auto iter = slots_infos.find(node->id); + if (iter != slots_infos.end() && !iter->second.empty()) { node_str.append(" " + iter->second); } } // Just for MYSELF node to show the importing/migrating slot - if (n->id == myid_) { + if (node->id == myid_) { if (srv_->slot_migrator) { auto migrating_slot = srv_->slot_migrator->GetMigratingSlot(); if (migrating_slot != -1) { @@ -567,10 +567,10 @@ std::string Cluster::genNodesDescription() { return nodes_desc; } -std::map Cluster::getClusterNodeSlots() const { +std::map> Cluster::getClusterNodeSlots() const { int start = -1; // node id => slots info string - std::map slots_infos; + std::map> slots_infos; std::shared_ptr n = nullptr; for (int i = 0; i <= kClusterSlots; i++) { @@ -600,30 +600,29 @@ std::map Cluster::getClusterNodeSlots() const { return slots_infos; } -std::string Cluster::genNodesInfo() { +std::string Cluster::genNodesInfo() const { auto slots_infos = getClusterNodeSlots(); std::string nodes_info; - for (const auto &item : nodes_) { - const std::shared_ptr &n = item.second; + for (const auto &[_, node] : nodes_) { std::string node_str; node_str.append("node "); // ID - node_str.append(n->id + " "); + node_str.append(node->id + " "); // Host + Port - node_str.append(fmt::format("{} {} ", n->host, n->port)); + node_str.append(fmt::format("{} {} ", node->host, node->port)); // Role - if (n->role == kClusterMaster) { + if (node->role == kClusterMaster) { node_str.append("master - "); } else { - node_str.append("slave " + n->master_id + " "); + node_str.append("slave " + node->master_id + " "); } // Slots - if (n->role == kClusterMaster) { - auto iter = slots_infos.find(n->id); - if (iter != slots_infos.end() && iter->second.size() > 0) { + if (node->role == kClusterMaster) { + auto iter = slots_infos.find(node->id); + if (iter != slots_infos.end() && !iter->second.empty()) { node_str.append(" " + iter->second); } } @@ -694,7 +693,7 @@ Status Cluster::LoadClusterNodes(const std::string &file_path) { Status Cluster::parseClusterNodes(const std::string &nodes_str, ClusterNodes *nodes, std::unordered_map *slots_nodes) { std::vector nodes_info = util::Split(nodes_str, "\n"); - if (nodes_info.size() == 0) { + if (nodes_info.empty()) { return {Status::ClusterInvalidInfo, errInvalidClusterNodeInfo}; } @@ -803,16 +802,17 @@ Status Cluster::parseClusterNodes(const std::string &nodes_str, ClusterNodes *no return Status::OK(); } -bool Cluster::IsWriteForbiddenSlot(int slot) { return srv_->slot_migrator->GetForbiddenSlot() == slot; } +bool Cluster::IsWriteForbiddenSlot(int slot) const { return srv_->slot_migrator->GetForbiddenSlot() == slot; } Status Cluster::CanExecByMySelf(const redis::CommandAttributes *attributes, const std::vector &cmd_tokens, redis::Connection *conn) { std::vector keys_indexes; - auto s = redis::CommandTable::GetKeysFromCommand(attributes, cmd_tokens, &keys_indexes); + // No keys - if (!s.IsOK()) return Status::OK(); + if (auto s = redis::CommandTable::GetKeysFromCommand(attributes, cmd_tokens, &keys_indexes); !s.IsOK()) + return Status::OK(); - if (keys_indexes.size() == 0) return Status::OK(); + if (keys_indexes.empty()) return Status::OK(); int slot = -1; for (auto i : keys_indexes) { diff --git a/src/cluster/cluster.h b/src/cluster/cluster.h index c98ea668082..335d5ef16b1 100644 --- a/src/cluster/cluster.h +++ b/src/cluster/cluster.h @@ -39,7 +39,7 @@ class ClusterNode { public: explicit ClusterNode(std::string id, std::string host, int port, int role, std::string master_id, - std::bitset slots); + const std::bitset &slots); std::string id; std::string host; int port; @@ -81,7 +81,7 @@ class Cluster { int64_t GetVersion() const { return version_; } static bool IsValidSlot(int slot) { return slot >= 0 && slot < kClusterSlots; } bool IsNotMaster(); - bool IsWriteForbiddenSlot(int slot); + bool IsWriteForbiddenSlot(int slot) const; Status CanExecByMySelf(const redis::CommandAttributes *attributes, const std::vector &cmd_tokens, redis::Connection *conn); Status SetMasterSlaveRepl(); @@ -97,16 +97,16 @@ class Cluster { private: std::string getNodeIDBySlot(int slot) const; std::string genNodesDescription(); - std::string genNodesInfo(); - std::map getClusterNodeSlots() const; + std::string genNodesInfo() const; + std::map> getClusterNodeSlots() const; SlotInfo genSlotNodeInfo(int start, int end, const std::shared_ptr &n); static Status parseClusterNodes(const std::string &nodes_str, ClusterNodes *nodes, std::unordered_map *slots_nodes); Server *srv_; std::vector binds_; int port_; - int size_; - int64_t version_; + int size_ = 0; + int64_t version_ = -1; std::string myid_; std::shared_ptr myself_; ClusterNodes nodes_; diff --git a/src/cluster/redis_slot.cc b/src/cluster/redis_slot.cc index 5934fd2d601..991b5d863e7 100644 --- a/src/cluster/redis_slot.cc +++ b/src/cluster/redis_slot.cc @@ -20,8 +20,6 @@ #include "redis_slot.h" -#include - #include #include #include diff --git a/src/cluster/replication.cc b/src/cluster/replication.cc index 4df05a47683..57d8b9bc17f 100644 --- a/src/cluster/replication.cc +++ b/src/cluster/replication.cc @@ -201,7 +201,7 @@ void ReplicationThread::CallbacksStateMachine::ReadWriteCB(bufferevent *bev) { assert(handler_idx_ <= handlers_.size()); DLOG(INFO) << "[replication] Execute handler[" << getHandlerName(handler_idx_) << "]"; auto st = getHandlerFunc(handler_idx_)(repl_, bev); - repl_->last_io_time_.store(util::GetTimeStamp(), std::memory_order_relaxed); + repl_->last_io_time_secs_.store(util::GetTimeStamp(), std::memory_order_relaxed); switch (st) { case CBState::NEXT: ++handler_idx_; diff --git a/src/cluster/replication.h b/src/cluster/replication.h index b7f49717cc1..b223bd6a0e5 100644 --- a/src/cluster/replication.h +++ b/src/cluster/replication.h @@ -98,7 +98,7 @@ class ReplicationThread : private EventCallbackBase { Status Start(std::function &&pre_fullsync_cb, std::function &&post_fullsync_cb); void Stop(); ReplState State() { return repl_state_.load(std::memory_order_relaxed); } - time_t LastIOTime() { return last_io_time_.load(std::memory_order_relaxed); } + int64_t LastIOTimeSecs() const { return last_io_time_secs_.load(std::memory_order_relaxed); } void TimerCB(int, int16_t); @@ -155,7 +155,7 @@ class ReplicationThread : private EventCallbackBase { Server *srv_ = nullptr; engine::Storage *storage_ = nullptr; std::atomic repl_state_; - std::atomic last_io_time_ = 0; + std::atomic last_io_time_secs_ = 0; bool next_try_old_psync_ = false; bool next_try_without_announce_ip_address_ = false; diff --git a/src/commands/cmd_json.cc b/src/commands/cmd_json.cc index e29bad83855..43421399e67 100644 --- a/src/commands/cmd_json.cc +++ b/src/commands/cmd_json.cc @@ -559,6 +559,11 @@ class CommandJsonStrLen : public Commander { Optionals results; auto s = json.StrLen(args_[1], path, &results); + if (s.IsNotFound()) { + *output = conn->NilString(); + return Status::OK(); + } + if (!s.ok()) return {Status::RedisExecErr, s.ToString()}; *output = OptionalsToString(conn, results); diff --git a/src/commands/cmd_key.cc b/src/commands/cmd_key.cc index 589fa1ed1ae..24d8fe29c4d 100644 --- a/src/commands/cmd_key.cc +++ b/src/commands/cmd_key.cc @@ -424,6 +424,120 @@ class CommandCopy : public Commander { bool replace_ = false; }; +template +class CommandSort : public Commander { + public: + Status Parse(const std::vector &args) override { + CommandParser parser(args, 2); + while (parser.Good()) { + if (parser.EatEqICase("BY")) { + if (!sort_argument_.sortby.empty()) return {Status::InvalidArgument, "don't use multiple BY parameters"}; + sort_argument_.sortby = GET_OR_RET(parser.TakeStr()); + + if (sort_argument_.sortby.find('*') == std::string::npos) { + sort_argument_.dontsort = true; + } else { + /* TODO: + * If BY is specified with a real pattern, we can't accept it in cluster mode, + * unless we can make sure the keys formed by the pattern are in the same slot + * as the key to sort. + * If BY is specified with a real pattern, we can't accept + * it if no full ACL key access is applied for this command. */ + } + } else if (parser.EatEqICase("LIMIT")) { + sort_argument_.offset = GET_OR_RET(parser.template TakeInt()); + sort_argument_.count = GET_OR_RET(parser.template TakeInt()); + } else if (parser.EatEqICase("GET")) { + /* TODO: + * If GET is specified with a real pattern, we can't accept it in cluster mode, + * unless we can make sure the keys formed by the pattern are in the same slot + * as the key to sort. */ + sort_argument_.getpatterns.push_back(GET_OR_RET(parser.TakeStr())); + } else if (parser.EatEqICase("ASC")) { + sort_argument_.desc = false; + } else if (parser.EatEqICase("DESC")) { + sort_argument_.desc = true; + } else if (parser.EatEqICase("ALPHA")) { + sort_argument_.alpha = true; + } else if (parser.EatEqICase("STORE")) { + if constexpr (ReadOnly) { + return {Status::RedisParseErr, "SORT_RO is read-only and does not support the STORE parameter"}; + } + sort_argument_.storekey = GET_OR_RET(parser.TakeStr()); + } else { + return parser.InvalidSyntax(); + } + } + + return Status::OK(); + } + + Status Execute(Server *srv, Connection *conn, std::string *output) override { + redis::Database redis(srv->storage, conn->GetNamespace()); + RedisType type = kRedisNone; + if (auto s = redis.Type(args_[1], &type); !s.ok()) { + return {Status::RedisExecErr, s.ToString()}; + } + + if (type != RedisType::kRedisList && type != RedisType::kRedisSet && type != RedisType::kRedisZSet) { + *output = Error("WRONGTYPE Operation against a key holding the wrong kind of value"); + return Status::OK(); + } + + /* When sorting a set with no sort specified, we must sort the output + * so the result is consistent across scripting and replication. + * + * The other types (list, sorted set) will retain their native order + * even if no sort order is requested, so they remain stable across + * scripting and replication. + * + * TODO: support CLIENT_SCRIPT flag, (!storekey_.empty() || c->flags & CLIENT_SCRIPT)) */ + if (sort_argument_.dontsort && type == RedisType::kRedisSet && (!sort_argument_.storekey.empty())) { + /* Force ALPHA sorting */ + sort_argument_.dontsort = false; + sort_argument_.alpha = true; + sort_argument_.sortby = ""; + } + + std::vector> sorted_elems; + Database::SortResult res = Database::SortResult::DONE; + + if (auto s = redis.Sort(type, args_[1], sort_argument_, &sorted_elems, &res); !s.ok()) { + return {Status::RedisExecErr, s.ToString()}; + } + + switch (res) { + case Database::SortResult::UNKNOWN_TYPE: + *output = redis::Error("Unknown Type"); + break; + case Database::SortResult::DOUBLE_CONVERT_ERROR: + *output = redis::Error("One or more scores can't be converted into double"); + break; + case Database::SortResult::LIMIT_EXCEEDED: + *output = redis::Error("The number of elements to be sorted exceeds SORT_LENGTH_LIMIT = " + + std::to_string(SORT_LENGTH_LIMIT)); + break; + case Database::SortResult::DONE: + if (sort_argument_.storekey.empty()) { + std::vector output_vec; + output_vec.reserve(sorted_elems.size()); + for (const auto &elem : sorted_elems) { + output_vec.emplace_back(elem.has_value() ? redis::BulkString(elem.value()) : conn->NilString()); + } + *output = redis::Array(output_vec); + } else { + *output = Integer(sorted_elems.size()); + } + break; + } + + return Status::OK(); + } + + private: + SortArgument sort_argument_; +}; + REDIS_REGISTER_COMMANDS(MakeCmdAttr("ttl", 2, "read-only", 1, 1, 1), MakeCmdAttr("pttl", 2, "read-only", 1, 1, 1), MakeCmdAttr("type", 2, "read-only", 1, 1, 1), @@ -442,6 +556,8 @@ REDIS_REGISTER_COMMANDS(MakeCmdAttr("ttl", 2, "read-only", 1, 1, 1), MakeCmdAttr("unlink", -2, "write no-dbsize-check", 1, -1, 1), MakeCmdAttr("rename", 3, "write", 1, 2, 1), MakeCmdAttr("renamenx", 3, "write", 1, 2, 1), - MakeCmdAttr("copy", -3, "write", 1, 2, 1), ) + MakeCmdAttr("copy", -3, "write", 1, 2, 1), + MakeCmdAttr>("sort", -2, "write", 1, 1, 1), + MakeCmdAttr>("sort_ro", -2, "read-only", 1, 1, 1)) } // namespace redis diff --git a/src/commands/cmd_replication.cc b/src/commands/cmd_replication.cc index 0a86a9cc619..6beffea8c31 100644 --- a/src/commands/cmd_replication.cc +++ b/src/commands/cmd_replication.cc @@ -242,8 +242,8 @@ class CommandFetchMeta : public Commander { } else { LOG(WARNING) << "[replication] Fail to send full data file info " << ip << ", error: " << strerror(errno); } - auto now = static_cast(util::GetTimeStamp()); - srv->storage->SetCheckpointAccessTime(now); + auto now_secs = static_cast(util::GetTimeStamp()); + srv->storage->SetCheckpointAccessTimeSecs(now_secs); })); if (auto s = util::ThreadDetach(t); !s) { @@ -311,8 +311,8 @@ class CommandFetchFile : public Commander { usleep(shortest - duration); } } - auto now = static_cast(util::GetTimeStamp()); - srv->storage->SetCheckpointAccessTime(now); + auto now_secs = util::GetTimeStamp(); + srv->storage->SetCheckpointAccessTimeSecs(now_secs); srv->DecrFetchFileThread(); })); diff --git a/src/commands/cmd_stream.cc b/src/commands/cmd_stream.cc index 7492171f358..3c1329e4934 100644 --- a/src/commands/cmd_stream.cc +++ b/src/commands/cmd_stream.cc @@ -710,7 +710,7 @@ class CommandXInfo : public Commander { } output->append(redis::MultiLen(result_vector.size())); - auto now = util::GetTimeStampMS(); + auto now_ms = util::GetTimeStampMS(); for (auto const &it : result_vector) { output->append(conn->HeaderOfMap(4)); output->append(redis::BulkString("name")); @@ -718,9 +718,9 @@ class CommandXInfo : public Commander { output->append(redis::BulkString("pending")); output->append(redis::Integer(it.second.pending_number)); output->append(redis::BulkString("idle")); - output->append(redis::Integer(now - it.second.last_idle)); + output->append(redis::Integer(now_ms - it.second.last_idle_ms)); output->append(redis::BulkString("inactive")); - output->append(redis::Integer(now - it.second.last_active)); + output->append(redis::Integer(now_ms - it.second.last_active_ms)); } return Status::OK(); diff --git a/src/common/cron.cc b/src/common/cron.cc index f4c223bf507..2c4a03bae8b 100644 --- a/src/common/cron.cc +++ b/src/common/cron.cc @@ -52,7 +52,7 @@ Status Cron::SetScheduleTime(const std::vector &args) { return Status::OK(); } -bool Cron::IsTimeMatch(tm *tm) { +bool Cron::IsTimeMatch(const tm *tm) { if (tm->tm_min == last_tm_.tm_min && tm->tm_hour == last_tm_.tm_hour && tm->tm_mday == last_tm_.tm_mday && tm->tm_mon == last_tm_.tm_mon && tm->tm_wday == last_tm_.tm_wday) { return false; diff --git a/src/common/cron.h b/src/common/cron.h index cba6d275af3..5385a0efe85 100644 --- a/src/common/cron.h +++ b/src/common/cron.h @@ -43,7 +43,7 @@ class Cron { ~Cron() = default; Status SetScheduleTime(const std::vector &args); - bool IsTimeMatch(tm *tm); + bool IsTimeMatch(const tm *tm); std::string ToString() const; bool IsEnabled() const; diff --git a/src/common/encoding.h b/src/common/encoding.h index d497793068a..5d05e45614a 100644 --- a/src/common/encoding.h +++ b/src/common/encoding.h @@ -102,6 +102,21 @@ inline bool GetFixed16(rocksdb::Slice *input, uint16_t *value) { return GetFixed inline bool GetFixed32(rocksdb::Slice *input, uint32_t *value) { return GetFixed(input, value); } inline bool GetFixed64(rocksdb::Slice *input, uint64_t *value) { return GetFixed(input, value); } +inline void PutSizedString(std::string *dst, rocksdb::Slice value) { + PutFixed32(dst, value.size()); + dst->append(value.ToStringView()); +} + +inline bool GetSizedString(rocksdb::Slice *input, rocksdb::Slice *value) { + uint32_t size = 0; + if (!GetFixed32(input, &size)) return false; + + if (input->size() < size) return false; + *value = rocksdb::Slice(input->data(), size); + input->remove_prefix(size); + return true; +} + char *EncodeDouble(char *buf, double value); void PutDouble(std::string *dst, double value); double DecodeDouble(const char *ptr); diff --git a/src/common/time_util.h b/src/common/time_util.h index 1c8dc7b6272..9eb6daa4266 100644 --- a/src/common/time_util.h +++ b/src/common/time_util.h @@ -24,6 +24,7 @@ namespace util { +/// Get the system timestamp in seconds, milliseconds or microseconds. template auto GetTimeStamp() { return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); diff --git a/src/search/README.md b/src/search/README.md new file mode 100644 index 00000000000..65c2baccb31 --- /dev/null +++ b/src/search/README.md @@ -0,0 +1,27 @@ +## KQIR: Kvrocks Query Intermediate Representation + +Here, *KQIR* refers to both +- the multiple-level *query intermediate representation* for Apache Kvrocks, and +- the *architecture and toolset* for the query optimization and execution. + +### Architecture + +![Architecture of KQIR](../../assets/KQIR.png) + +### Components + +- User Interface: both SQL and Redis Query syntax is supported to be the frontend language of KQIR + - SQL Parser: A parser that accepts an extended subset of MySQL syntax + - Redis Query Parser: A parser that accepts [Redis query syntax](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/query_syntax/) (only DIALECT 2 or greater is planned to be supported) +- KQIR: a multiple level query intermediate representation, currently including two levels (syntactical IR and planning IR) + - Syntactical IR: A high level IR that syntactically represents the query language + - Planning IR: A low level IR that represents plan operators for query execution +- KQIR passes: analysis and transformation procedures on KQIR + - Semantic Checker: to check if there is any semantic errors in the IR + - Expression Passes: passes for query expressions, especially for logical expressions + - Numeric Passes: passes for numeric & arithmetic properties + - Plan Passes: passes on the plan operators + - Pass Manager: to manage the pass execution sequence and order + - Cost Model: to analyze the cost for the current plan, used by some plan passes +- Plan Executor: a component for query execution via iterator model +- Indexer: to perform the indexing for various types of fields during data changes diff --git a/src/search/executors/filter_executor.h b/src/search/executors/filter_executor.h new file mode 100644 index 00000000000..6820ae586eb --- /dev/null +++ b/src/search/executors/filter_executor.h @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "parse_util.h" +#include "search/ir.h" +#include "search/plan_executor.h" +#include "search/search_encoding.h" +#include "string_util.h" + +namespace kqir { + +struct QueryExprEvaluator { + ExecutorContext *ctx; + ExecutorNode::RowType &row; + + StatusOr Transform(QueryExpr *e) const { + if (auto v = dynamic_cast(e)) { + return Visit(v); + } + if (auto v = dynamic_cast(e)) { + return Visit(v); + } + if (auto v = dynamic_cast(e)) { + return Visit(v); + } + if (auto v = dynamic_cast(e)) { + return Visit(v); + } + if (auto v = dynamic_cast(e)) { + return Visit(v); + } + + CHECK(false) << "unreachable"; + } + + StatusOr Visit(AndExpr *v) const { + for (const auto &n : v->inners) { + if (!GET_OR_RET(Transform(n.get()))) return false; + } + + return true; + } + + StatusOr Visit(OrExpr *v) const { + for (const auto &n : v->inners) { + if (GET_OR_RET(Transform(n.get()))) return true; + } + + return false; + } + + StatusOr Visit(NotExpr *v) const { return !GET_OR_RET(Transform(v->inner.get())); } + + StatusOr Visit(TagContainExpr *v) const { + auto val = GET_OR_RET(ctx->Retrieve(row, v->field->info)); + auto meta = v->field->info->MetadataAs(); + + auto split = util::Split(val, std::string(1, meta->separator)); + return std::find(split.begin(), split.end(), v->tag->val) != split.end(); + } + + StatusOr Visit(NumericCompareExpr *v) const { + auto l_str = GET_OR_RET(ctx->Retrieve(row, v->field->info)); + + // TODO: reconsider how to handle failure case here + auto l = GET_OR_RET(ParseFloat(l_str)); + auto r = v->num->val; + + switch (v->op) { + case NumericCompareExpr::EQ: + return l == r; + case NumericCompareExpr::NE: + return l != r; + case NumericCompareExpr::LT: + return l < r; + case NumericCompareExpr::LET: + return l <= r; + case NumericCompareExpr::GT: + return l > r; + case NumericCompareExpr::GET: + return l >= r; + default: + CHECK(false) << "unreachable"; + __builtin_unreachable(); + } + } +}; + +struct FilterExecutor : ExecutorNode { + Filter *filter; + + FilterExecutor(ExecutorContext *ctx, Filter *filter) : ExecutorNode(ctx), filter(filter) {} + + StatusOr Next() override { + while (true) { + auto v = GET_OR_RET(ctx->Get(filter->source)->Next()); + + if (std::holds_alternative(v)) return end; + + QueryExprEvaluator eval{ctx, std::get(v)}; + + bool res = GET_OR_RET(eval.Transform(filter->filter_expr.get())); + + if (res) { + return v; + } + } + } +}; + +} // namespace kqir diff --git a/src/search/executors/full_index_scan_executor.h b/src/search/executors/full_index_scan_executor.h new file mode 100644 index 00000000000..0afeae0455b --- /dev/null +++ b/src/search/executors/full_index_scan_executor.h @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include "db_util.h" +#include "search/plan_executor.h" +#include "storage/redis_db.h" +#include "storage/redis_metadata.h" +#include "storage/storage.h" + +namespace kqir { + +struct FullIndexScanExecutor : ExecutorNode { + FullIndexScan *scan; + redis::LatestSnapShot ss; + util::UniqueIterator iter{nullptr}; + const std::string *prefix_iter; + + FullIndexScanExecutor(ExecutorContext *ctx, FullIndexScan *scan) + : ExecutorNode(ctx), scan(scan), ss(ctx->storage), prefix_iter(scan->index->info->prefixes.begin()) {} + + std::string NSKey(const std::string &user_key) { + return ComposeNamespaceKey(scan->index->info->ns, user_key, ctx->storage->IsSlotIdEncoded()); + } + + StatusOr Next() override { + if (prefix_iter == scan->index->info->prefixes.end()) { + return end; + } + + auto ns_key = NSKey(*prefix_iter); + if (!iter) { + rocksdb::ReadOptions read_options = ctx->storage->DefaultScanOptions(); + read_options.snapshot = ss.GetSnapShot(); + iter = util::UniqueIterator(ctx->storage, read_options, + ctx->storage->GetCFHandle(engine::kMetadataColumnFamilyName)); + iter->Seek(ns_key); + } + + while (!iter->Valid() || !iter->key().starts_with(ns_key)) { + prefix_iter++; + if (prefix_iter == scan->index->info->prefixes.end()) { + return end; + } + + ns_key = NSKey(*prefix_iter); + iter->Seek(ns_key); + } + + auto [_, key] = ExtractNamespaceKey(iter->key(), ctx->storage->IsSlotIdEncoded()); + auto key_str = key.ToString(); + + iter->Next(); + return RowType{key_str, {}, scan->index->info}; + } +}; + +} // namespace kqir diff --git a/src/search/executors/limit_executor.h b/src/search/executors/limit_executor.h new file mode 100644 index 00000000000..8b1d4916c9f --- /dev/null +++ b/src/search/executors/limit_executor.h @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include "search/plan_executor.h" + +namespace kqir { + +struct LimitExecutor : ExecutorNode { + Limit *limit; + size_t step = 0; + + LimitExecutor(ExecutorContext *ctx, Limit *limit) : ExecutorNode(ctx), limit(limit) {} + + StatusOr Next() override { + auto offset = limit->limit->offset; + auto count = limit->limit->count; + + if (step == count) { + return end; + } + + if (step == 0) { + while (offset--) { + auto res = GET_OR_RET(ctx->Get(limit->op)->Next()); + + if (std::holds_alternative(res)) { + return end; + } + } + } + + auto res = GET_OR_RET(ctx->Get(limit->op)->Next()); + step++; + return res; + } +}; + +} // namespace kqir diff --git a/src/search/executors/merge_executor.h b/src/search/executors/merge_executor.h new file mode 100644 index 00000000000..66b7bb85650 --- /dev/null +++ b/src/search/executors/merge_executor.h @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "search/plan_executor.h" + +namespace kqir { + +struct MergeExecutor : ExecutorNode { + Merge *merge; + decltype(merge->ops)::iterator iter; + + MergeExecutor(ExecutorContext *ctx, Merge *merge) : ExecutorNode(ctx), merge(merge), iter(merge->ops.begin()) {} + + StatusOr Next() override { + if (iter == merge->ops.end()) { + return end; + } + + auto v = GET_OR_RET(ctx->Get(*iter)->Next()); + while (std::holds_alternative(v)) { + iter++; + if (iter == merge->ops.end()) { + return end; + } + + v = GET_OR_RET(ctx->Get(*iter)->Next()); + } + + return v; + } +}; + +} // namespace kqir diff --git a/src/search/executors/mock_executor.h b/src/search/executors/mock_executor.h new file mode 100644 index 00000000000..f9cdf57d131 --- /dev/null +++ b/src/search/executors/mock_executor.h @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "search/ir_plan.h" +#include "search/plan_executor.h" + +namespace kqir { + +// this operator is only for executor-testing/debugging purpose +struct Mock : PlanOperator { + std::vector rows; + + explicit Mock(std::vector rows) : rows(std::move(rows)) {} + + std::string Dump() const override { return "mock"; } + std::string_view Name() const override { return "Mock"; } + + std::unique_ptr Clone() const override { return std::make_unique(rows); } +}; + +struct MockExecutor : ExecutorNode { + Mock *mock; + decltype(mock->rows)::iterator iter; + + MockExecutor(ExecutorContext *ctx, Mock *mock) : ExecutorNode(ctx), mock(mock), iter(mock->rows.begin()) {} + + StatusOr Next() override { + if (iter == mock->rows.end()) { + return end; + } + + return *(iter++); + } +}; + +} // namespace kqir diff --git a/src/search/executors/noop_executor.h b/src/search/executors/noop_executor.h new file mode 100644 index 00000000000..1e3685cac50 --- /dev/null +++ b/src/search/executors/noop_executor.h @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include "search/plan_executor.h" + +namespace kqir { + +struct NoopExecutor : ExecutorNode { + Noop *noop; + + NoopExecutor(ExecutorContext *ctx, Noop *noop) : ExecutorNode(ctx), noop(noop) {} + + StatusOr Next() override { return end; } +}; + +} // namespace kqir diff --git a/src/search/executors/numeric_field_scan_executor.h b/src/search/executors/numeric_field_scan_executor.h new file mode 100644 index 00000000000..1609f433e44 --- /dev/null +++ b/src/search/executors/numeric_field_scan_executor.h @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "db_util.h" +#include "encoding.h" +#include "search/plan_executor.h" +#include "search/search_encoding.h" +#include "storage/redis_db.h" +#include "storage/redis_metadata.h" +#include "storage/storage.h" + +namespace kqir { + +struct NumericFieldScanExecutor : ExecutorNode { + NumericFieldScan *scan; + redis::LatestSnapShot ss; + util::UniqueIterator iter{nullptr}; + + IndexInfo *index; + std::string ns_key; + + NumericFieldScanExecutor(ExecutorContext *ctx, NumericFieldScan *scan) + : ExecutorNode(ctx), scan(scan), ss(ctx->storage), index(scan->field->info->index) { + ns_key = ComposeNamespaceKey(index->ns, index->name, ctx->storage->IsSlotIdEncoded()); + } + + std::string IndexKey(double num) { + return InternalKey(ns_key, redis::ConstructNumericFieldSubkey(scan->field->name, num, {}), index->metadata.version, + ctx->storage->IsSlotIdEncoded()) + .Encode(); + } + + bool InRangeDecode(Slice key, Slice field, double num, double *curr, Slice *user_key) { + auto ikey = InternalKey(key, ctx->storage->IsSlotIdEncoded()); + if (ikey.GetVersion() != index->metadata.version) return false; + auto subkey = ikey.GetSubKey(); + + uint8_t flag = 0; + if (!GetFixed8(&subkey, &flag)) return false; + if (flag != (uint8_t)redis::SearchSubkeyType::NUMERIC_FIELD) return false; + + Slice value; + if (!GetSizedString(&subkey, &value)) return false; + if (value != field) return false; + + if (!GetDouble(&subkey, curr)) return false; + + if (!GetSizedString(&subkey, user_key)) return false; + + return true; + } + + StatusOr Next() override { + if (!iter) { + rocksdb::ReadOptions read_options = ctx->storage->DefaultScanOptions(); + read_options.snapshot = ss.GetSnapShot(); + + iter = + util::UniqueIterator(ctx->storage, read_options, ctx->storage->GetCFHandle(engine::kSearchColumnFamilyName)); + if (scan->order == SortByClause::ASC) { + iter->Seek(IndexKey(scan->range.l)); + } else { + iter->SeekForPrev(IndexKey(IntervalSet::PrevNum(scan->range.r))); + } + } + + if (!iter->Valid()) { + return end; + } + + double curr = 0; + Slice user_key; + if (!InRangeDecode(iter->key(), scan->field->name, scan->range.r, &curr, &user_key)) { + return end; + } + + if (scan->order == SortByClause::ASC ? curr >= scan->range.r : curr < scan->range.l) { + return end; + } + + auto key_str = user_key.ToString(); + + if (scan->order == SortByClause::ASC) { + iter->Next(); + } else { + iter->Prev(); + } + return RowType{key_str, {{scan->field->info, std::to_string(curr)}}, scan->field->info->index}; + } +}; + +} // namespace kqir diff --git a/src/search/executors/projection_executor.h b/src/search/executors/projection_executor.h new file mode 100644 index 00000000000..fe167334500 --- /dev/null +++ b/src/search/executors/projection_executor.h @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "search/plan_executor.h" + +namespace kqir { + +struct ProjectionExecutor : ExecutorNode { + Projection *proj; + + ProjectionExecutor(ExecutorContext *ctx, Projection *proj) : ExecutorNode(ctx), proj(proj) {} + + StatusOr Next() override { + auto v = GET_OR_RET(ctx->Get(proj->source)->Next()); + + if (std::holds_alternative(v)) return end; + + auto &row = std::get(v); + if (proj->select->fields.empty()) { + for (const auto &field : row.index->fields) { + GET_OR_RET(ctx->Retrieve(row, &field.second)); + } + } else { + std::map res; + + for (const auto &field : proj->select->fields) { + auto r = GET_OR_RET(ctx->Retrieve(row, field->info)); + res.emplace(field->info, std::move(r)); + } + + return RowType{row.key, res, row.index}; + } + + return v; + } +}; + +} // namespace kqir diff --git a/src/search/executors/sort_executor.h b/src/search/executors/sort_executor.h new file mode 100644 index 00000000000..ed4b205db57 --- /dev/null +++ b/src/search/executors/sort_executor.h @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include "search/plan_executor.h" + +namespace kqir { + +struct SortExecutor : ExecutorNode { + Sort *sort; + + SortExecutor(ExecutorContext *ctx, Sort *sort) : ExecutorNode(ctx), sort(sort) {} + + StatusOr Next() override { + // most of the sort operator will be eliminated via the optimizer passes, + // so currently we don't support this operator since external sort is a little complicated + return {Status::NotSupported, "sort operator is currently not supported"}; + } +}; + +} // namespace kqir diff --git a/src/search/executors/tag_field_scan_executor.h b/src/search/executors/tag_field_scan_executor.h new file mode 100644 index 00000000000..a3781c11a4f --- /dev/null +++ b/src/search/executors/tag_field_scan_executor.h @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "db_util.h" +#include "encoding.h" +#include "search/plan_executor.h" +#include "search/search_encoding.h" +#include "storage/redis_db.h" +#include "storage/redis_metadata.h" +#include "storage/storage.h" + +namespace kqir { + +struct TagFieldScanExecutor : ExecutorNode { + TagFieldScan *scan; + redis::LatestSnapShot ss; + util::UniqueIterator iter{nullptr}; + + IndexInfo *index; + std::string ns_key; + std::string index_key; + + TagFieldScanExecutor(ExecutorContext *ctx, TagFieldScan *scan) + : ExecutorNode(ctx), scan(scan), ss(ctx->storage), index(scan->field->info->index) { + ns_key = ComposeNamespaceKey(index->ns, index->name, ctx->storage->IsSlotIdEncoded()); + index_key = InternalKey(ns_key, redis::ConstructTagFieldSubkey(scan->field->name, scan->tag, {}), + index->metadata.version, ctx->storage->IsSlotIdEncoded()) + .Encode(); + } + + bool InRangeDecode(Slice key, Slice field, Slice *user_key) { + auto ikey = InternalKey(key, ctx->storage->IsSlotIdEncoded()); + if (ikey.GetVersion() != index->metadata.version) return false; + auto subkey = ikey.GetSubKey(); + + uint8_t flag = 0; + if (!GetFixed8(&subkey, &flag)) return false; + if (flag != (uint8_t)redis::SearchSubkeyType::TAG_FIELD) return false; + + Slice value; + if (!GetSizedString(&subkey, &value)) return false; + if (value != field) return false; + + Slice tag; + if (!GetSizedString(&subkey, &tag)) return false; + if (tag != scan->tag) return false; + + if (!GetSizedString(&subkey, user_key)) return false; + + return true; + } + + StatusOr Next() override { + if (!iter) { + rocksdb::ReadOptions read_options = ctx->storage->DefaultScanOptions(); + read_options.snapshot = ss.GetSnapShot(); + + iter = + util::UniqueIterator(ctx->storage, read_options, ctx->storage->GetCFHandle(engine::kSearchColumnFamilyName)); + iter->Seek(index_key); + } + + if (!iter->Valid()) { + return end; + } + + Slice user_key; + if (!InRangeDecode(iter->key(), scan->field->name, &user_key)) { + return end; + } + + auto key_str = user_key.ToString(); + + iter->Next(); + return RowType{key_str, {}, scan->field->info->index}; + } +}; + +} // namespace kqir diff --git a/src/search/executors/topn_sort_executor.h b/src/search/executors/topn_sort_executor.h new file mode 100644 index 00000000000..741a968928b --- /dev/null +++ b/src/search/executors/topn_sort_executor.h @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include +#include + +#include "parse_util.h" +#include "search/plan_executor.h" + +namespace kqir { + +struct TopNSortExecutor : ExecutorNode { + TopNSort *sort; + + struct ComparedRow { + RowType row; + double val; + + ComparedRow(RowType row, double val) : row(std::move(row)), val(val) {} + + friend bool operator<(const ComparedRow &l, const ComparedRow &r) { return l.val < r.val; } + }; + + std::vector rows; + decltype(rows)::iterator rows_iter; + bool initialized = false; + + TopNSortExecutor(ExecutorContext *ctx, TopNSort *sort) : ExecutorNode(ctx), sort(sort) {} + + StatusOr Next() override { + if (!initialized) { + auto total = sort->limit->offset + sort->limit->count; + if (total == 0) return end; + + auto v = GET_OR_RET(ctx->Get(sort->op)->Next()); + + while (!std::holds_alternative(v)) { + auto &row = std::get(v); + + auto get_order = [this](RowType &row) -> StatusOr { + auto order_str = GET_OR_RET(ctx->Retrieve(row, sort->order->field->info)); + auto order = GET_OR_RET(ParseFloat(order_str)); + return order; + }; + + if (rows.size() == total) { + std::make_heap(rows.begin(), rows.end()); + } + + if (rows.size() < total) { + auto order = GET_OR_RET(get_order(row)); + rows.emplace_back(row, order); + } else { + auto order = GET_OR_RET(get_order(row)); + + if (order < rows[0].val) { + std::pop_heap(rows.begin(), rows.end()); + rows.back() = ComparedRow{row, order}; + std::push_heap(rows.begin(), rows.end()); + } + } + + v = GET_OR_RET(ctx->Get(sort->op)->Next()); + } + + if (rows.size() <= sort->limit->offset) { + return end; + } + + std::sort(rows.begin(), rows.end()); + rows_iter = rows.begin() + static_cast(sort->limit->offset); + initialized = true; + } + + if (rows_iter == rows.end()) { + return end; + } + + auto res = rows_iter->row; + rows_iter++; + return res; + } +}; + +} // namespace kqir diff --git a/src/search/index_info.h b/src/search/index_info.h index 5b0cb7077d7..1751549d690 100644 --- a/src/search/index_info.h +++ b/src/search/index_info.h @@ -54,6 +54,7 @@ struct IndexInfo { SearchMetadata metadata; FieldMap fields; redis::SearchPrefixesMetadata prefixes; + std::string ns; IndexInfo(std::string name, SearchMetadata metadata) : name(std::move(name)), metadata(std::move(metadata)) {} @@ -64,6 +65,6 @@ struct IndexInfo { } }; -using IndexMap = std::map; +using IndexMap = std::map>; } // namespace kqir diff --git a/src/search/indexer.cc b/src/search/indexer.cc index 3e7bbf1fb46..4a4a949cc54 100644 --- a/src/search/indexer.cc +++ b/src/search/indexer.cc @@ -66,7 +66,7 @@ rocksdb::Status FieldValueRetriever::Retrieve(std::string_view field, std::strin return hash.storage_->Get(read_options, sub_key, output); } else if (std::holds_alternative(db)) { auto &value = std::get(db); - auto s = value.Get(field); + auto s = value.Get(field.front() == '$' ? field : fmt::format("$.{}", field)); if (!s.IsOK()) return rocksdb::Status::Corruption(s.Msg()); if (s->value.size() != 1) return rocksdb::Status::NotFound("json value specified by the field (json path) should exist and be unique"); @@ -231,7 +231,7 @@ Status IndexUpdater::Update(const FieldValues &original, std::string_view key, c void GlobalIndexer::Add(IndexUpdater updater) { updater.indexer = this; - for (const auto &prefix : updater.info->prefixes.prefixes) { + for (const auto &prefix : updater.info->prefixes) { prefix_map.insert(prefix, updater); } } diff --git a/src/search/interval.h b/src/search/interval.h index 5ce90a45282..efe462b4074 100644 --- a/src/search/interval.h +++ b/src/search/interval.h @@ -128,11 +128,32 @@ struct IntervalSet { } friend IntervalSet operator&(const IntervalSet &l, const IntervalSet &r) { - if (l.IsEmpty() || r.IsEmpty()) { - return IntervalSet(); + IntervalSet result; + + if (l.intervals.empty() || r.intervals.empty()) { + return result; + } + + auto it_l = l.intervals.begin(); + auto it_r = r.intervals.begin(); + + while (it_l != l.intervals.end() && it_r != r.intervals.end()) { + // Find overlap between current intervals + double start = std::max(it_l->first, it_r->first); + double end = std::min(it_l->second, it_r->second); + + if (start <= end) { + result.intervals.emplace_back(start, end); + } + + if (it_l->second < it_r->second) { + ++it_l; + } else { + ++it_r; + } } - return ~(~l | ~r); + return result; } friend IntervalSet operator|(const IntervalSet &l, const IntervalSet &r) { diff --git a/src/search/ir.h b/src/search/ir.h index b841c0fa64d..be235e0dbc6 100644 --- a/src/search/ir.h +++ b/src/search/ir.h @@ -76,6 +76,14 @@ struct Node { if (casted) original.release(); return std::unique_ptr(casted); } + + template + static std::vector> List(std::unique_ptr... args) { + std::vector> result; + result.reserve(sizeof...(Args)); + (result.push_back(std::move(args)), ...); + return result; + } }; struct Ref : Node {}; @@ -379,6 +387,7 @@ struct IndexRef : Ref { const IndexInfo *info = nullptr; explicit IndexRef(std::string name) : name(std::move(name)) {} + explicit IndexRef(std::string name, const IndexInfo *info) : name(std::move(name)), info(info) {} std::string_view Name() const override { return "IndexRef"; } std::string Dump() const override { return name; } diff --git a/src/search/ir_sema_checker.h b/src/search/ir_sema_checker.h index d8982e5c6eb..170e646fb09 100644 --- a/src/search/ir_sema_checker.h +++ b/src/search/ir_sema_checker.h @@ -42,7 +42,7 @@ struct SemaChecker { if (auto v = dynamic_cast(node)) { auto index_name = v->index->name; if (auto iter = index_map.find(index_name); iter != index_map.end()) { - current_index = &iter->second; + current_index = iter->second.get(); v->index->info = current_index; GET_OR_RET(Check(v->select.get())); diff --git a/src/search/plan_executor.cc b/src/search/plan_executor.cc new file mode 100644 index 00000000000..7de9f5188bd --- /dev/null +++ b/src/search/plan_executor.cc @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#include "plan_executor.h" + +#include + +#include "search/executors/filter_executor.h" +#include "search/executors/full_index_scan_executor.h" +#include "search/executors/limit_executor.h" +#include "search/executors/merge_executor.h" +#include "search/executors/mock_executor.h" +#include "search/executors/noop_executor.h" +#include "search/executors/numeric_field_scan_executor.h" +#include "search/executors/projection_executor.h" +#include "search/executors/sort_executor.h" +#include "search/executors/tag_field_scan_executor.h" +#include "search/executors/topn_sort_executor.h" +#include "search/indexer.h" +#include "search/ir_plan.h" + +namespace kqir { + +namespace details { + +struct ExecutorContextVisitor { + ExecutorContext *ctx; + + void Transform(PlanOperator *op) { + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + if (auto v = dynamic_cast(op)) { + return Visit(v); + } + + CHECK(false) << "unreachable"; + } + + void Visit(Limit *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + Transform(op->op.get()); + } + + void Visit(Sort *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + Transform(op->op.get()); + } + + void Visit(Noop *op) { ctx->nodes[op] = std::make_unique(ctx, op); } + + void Visit(Merge *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + for (const auto &child : op->ops) Transform(child.get()); + } + + void Visit(Filter *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + Transform(op->source.get()); + } + + void Visit(Projection *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + Transform(op->source.get()); + } + + void Visit(TopNSort *op) { + ctx->nodes[op] = std::make_unique(ctx, op); + Transform(op->op.get()); + } + + void Visit(FullIndexScan *op) { ctx->nodes[op] = std::make_unique(ctx, op); } + + void Visit(NumericFieldScan *op) { ctx->nodes[op] = std::make_unique(ctx, op); } + + void Visit(TagFieldScan *op) { ctx->nodes[op] = std::make_unique(ctx, op); } + + void Visit(Mock *op) { ctx->nodes[op] = std::make_unique(ctx, op); } +}; + +} // namespace details + +ExecutorContext::ExecutorContext(PlanOperator *op) : root(op) { + details::ExecutorContextVisitor visitor{this}; + visitor.Transform(root); +} + +ExecutorContext::ExecutorContext(PlanOperator *op, engine::Storage *storage) : root(op), storage(storage) { + details::ExecutorContextVisitor visitor{this}; + visitor.Transform(root); +} + +auto ExecutorContext::Retrieve(RowType &row, const FieldInfo *field) -> StatusOr { // NOLINT + if (auto iter = row.fields.find(field); iter != row.fields.end()) { + return iter->second; + } + + auto retriever = GET_OR_RET( + redis::FieldValueRetriever::Create(field->index->metadata.on_data_type, row.key, storage, field->index->ns)); + + std::string result; + auto s = retriever.Retrieve(field->name, &result); + if (!s.ok()) return {Status::NotOK, s.ToString()}; + + row.fields.emplace(field, result); + return result; +} + +} // namespace kqir diff --git a/src/search/plan_executor.h b/src/search/plan_executor.h new file mode 100644 index 00000000000..82d8e73e6c0 --- /dev/null +++ b/src/search/plan_executor.h @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +#pragma once + +#include + +#include "ir_plan.h" +#include "search/index_info.h" +#include "storage/storage.h" +#include "string_util.h" + +namespace kqir { + +struct ExecutorContext; + +struct ExecutorNode { + using KeyType = std::string; + using ValueType = std::string; + struct RowType { + KeyType key; + std::map fields; + const IndexInfo *index; + + bool operator==(const RowType &another) const { + return key == another.key && fields == another.fields && index == another.index; + } + + bool operator!=(const RowType &another) const { return !(*this == another); } + + // for debug purpose + friend std::ostream &operator<<(std::ostream &os, const RowType &row) { + if (row.index) { + os << row.key << "@" << row.index->name; + } else { + os << row.key; + } + return os << " {" << util::StringJoin(row.fields, [](const auto &v) { return v.first->name + ": " + v.second; }) + << "}"; + } + }; + + static constexpr inline const struct End { + } end{}; + friend constexpr bool operator==(End, End) noexcept { return true; } + friend constexpr bool operator!=(End, End) noexcept { return false; } + + using Result = std::variant; + + ExecutorContext *ctx; + explicit ExecutorNode(ExecutorContext *ctx) : ctx(ctx) {} + + virtual StatusOr Next() = 0; + virtual ~ExecutorNode() = default; +}; + +struct ExecutorContext { + std::map> nodes; + PlanOperator *root; + engine::Storage *storage; + + using Result = ExecutorNode::Result; + using RowType = ExecutorNode::RowType; + using KeyType = ExecutorNode::KeyType; + using ValueType = ExecutorNode::ValueType; + + explicit ExecutorContext(PlanOperator *op); + explicit ExecutorContext(PlanOperator *op, engine::Storage *storage); + + ExecutorNode *Get(PlanOperator *op) { + if (auto iter = nodes.find(op); iter != nodes.end()) { + return iter->second.get(); + } + + return nullptr; + } + + ExecutorNode *Get(const std::unique_ptr &op) { return Get(op.get()); } + + StatusOr Next() { return Get(root)->Next(); } + StatusOr Retrieve(RowType &row, const FieldInfo *field); +}; + +} // namespace kqir diff --git a/src/search/search_encoding.h b/src/search/search_encoding.h index 14bf2923911..32f244ca237 100644 --- a/src/search/search_encoding.h +++ b/src/search/search_encoding.h @@ -45,6 +45,16 @@ inline std::string ConstructSearchPrefixesSubkey() { return {(char)SearchSubkeyT struct SearchPrefixesMetadata { std::vector prefixes; + static inline const std::string all[] = {""}; + + auto begin() const { // NOLINT + return prefixes.empty() ? std::begin(all) : prefixes.data(); + } + + auto end() const { // NOLINT + return prefixes.empty() ? std::end(all) : prefixes.data() + prefixes.size(); + } + void Encode(std::string *dst) const { for (const auto &prefix : prefixes) { PutFixed32(dst, prefix.size()); @@ -131,22 +141,17 @@ struct SearchNumericFieldMetadata : SearchSortableFieldMetadata {}; inline std::string ConstructTagFieldSubkey(std::string_view field_name, std::string_view tag, std::string_view key) { std::string res = {(char)SearchSubkeyType::TAG_FIELD}; - PutFixed32(&res, field_name.size()); - res.append(field_name); - PutFixed32(&res, tag.size()); - res.append(tag); - PutFixed32(&res, key.size()); - res.append(key); + PutSizedString(&res, field_name); + PutSizedString(&res, tag); + PutSizedString(&res, key); return res; } inline std::string ConstructNumericFieldSubkey(std::string_view field_name, double number, std::string_view key) { std::string res = {(char)SearchSubkeyType::NUMERIC_FIELD}; - PutFixed32(&res, field_name.size()); - res.append(field_name); + PutSizedString(&res, field_name); PutDouble(&res, number); - PutFixed32(&res, key.size()); - res.append(key); + PutSizedString(&res, key); return res; } diff --git a/src/server/server.cc b/src/server/server.cc index e482aefb561..7c50a00803c 100644 --- a/src/server/server.cc +++ b/src/server/server.cc @@ -52,7 +52,7 @@ #include "worker.h" Server::Server(engine::Storage *storage, Config *config) - : storage(storage), start_time_(util::GetTimeStamp()), config_(config), namespace_(storage) { + : storage(storage), start_time_secs_(util::GetTimeStamp()), config_(config), namespace_(storage) { // init commands stats here to prevent concurrent insert, and cause core auto commands = redis::CommandTable::GetOriginal(); for (const auto &iter : *commands) { @@ -179,7 +179,7 @@ Status Server::Start() { compaction_checker_thread_ = GET_OR_RET(util::CreateThread("compact-check", [this] { uint64_t counter = 0; - time_t last_compact_date = 0; + int64_t last_compact_date = 0; CompactionChecker compaction_checker{this->storage}; while (!stop_) { @@ -192,11 +192,9 @@ Status Server::Start() { if (!is_loading_ && ++counter % 600 == 0 // check every minute && config_->compaction_checker_range.Enabled()) { - auto now = static_cast(util::GetTimeStamp()); - std::tm local_time{}; - localtime_r(&now, &local_time); - if (local_time.tm_hour >= config_->compaction_checker_range.start && - local_time.tm_hour <= config_->compaction_checker_range.stop) { + auto now_hours = util::GetTimeStamp(); + if (now_hours >= config_->compaction_checker_range.start && + now_hours <= config_->compaction_checker_range.stop) { std::vector cf_names = {engine::kMetadataColumnFamilyName, engine::kSubkeyColumnFamilyName, engine::kZSetScoreColumnFamilyName, engine::kStreamColumnFamilyName}; for (const auto &cf_name : cf_names) { @@ -204,8 +202,8 @@ Status Server::Start() { } } // compact once per day - if (now != 0 && last_compact_date != now / 86400) { - last_compact_date = now / 86400; + if (now_hours != 0 && last_compact_date != now_hours / 24) { + last_compact_date = now_hours / 24; compaction_checker.CompactPropagateAndPubSubFiles(); } } @@ -344,9 +342,9 @@ void Server::CleanupExitedSlaves() { void Server::FeedMonitorConns(redis::Connection *conn, const std::vector &tokens) { if (monitor_clients_ <= 0) return; - auto now = util::GetTimeStampUS(); + auto now_us = util::GetTimeStampUS(); std::string output = - fmt::format("{}.{} [{} {}]", now / 1000000, now % 1000000, conn->GetNamespace(), conn->GetAddr()); + fmt::format("{}.{} [{} {}]", now_us / 1000000, now_us % 1000000, conn->GetNamespace(), conn->GetAddr()); for (const auto &token : tokens) { output += " \""; output += util::EscapeString(token); @@ -674,7 +672,7 @@ void Server::OnEntryAddedToStream(const std::string &ns, const std::string &key, } } -void Server::updateCachedTime() { unix_time.store(util::GetTimeStamp()); } +void Server::updateCachedTime() { unix_time_secs.store(util::GetTimeStamp()); } int Server::IncrClientNum() { total_clients_.fetch_add(1, std::memory_order_relaxed); @@ -787,13 +785,14 @@ void Server::cron() { // No replica uses this checkpoint, we can remove it. if (counter != 0 && counter % 100 == 0) { - time_t create_time = storage->GetCheckpointCreateTime(); - time_t access_time = storage->GetCheckpointAccessTime(); + int64_t create_time_secs = storage->GetCheckpointCreateTimeSecs(); + int64_t access_time_secs = storage->GetCheckpointAccessTimeSecs(); if (storage->ExistCheckpoint()) { // TODO(shooterit): support to config the alive time of checkpoint - auto now = static_cast(util::GetTimeStamp()); - if ((GetFetchFileThreadNum() == 0 && now - access_time > 30) || (now - create_time > 24 * 60 * 60)) { + int64_t now_secs = util::GetTimeStamp(); + if ((GetFetchFileThreadNum() == 0 && now_secs - access_time_secs > 30) || + (now_secs - create_time_secs > 24 * 60 * 60)) { auto s = rocksdb::DestroyDB(config_->checkpoint_dir, rocksdb::Options()); if (!s.ok()) { LOG(WARNING) << "[server] Fail to clean checkpoint, error: " << s.ToString(); @@ -963,9 +962,9 @@ void Server::GetServerInfo(std::string *info) { string_stream << "arch_bits:" << sizeof(void *) * 8 << "\r\n"; string_stream << "process_id:" << getpid() << "\r\n"; string_stream << "tcp_port:" << config_->port << "\r\n"; - int64_t now = util::GetTimeStamp(); - string_stream << "uptime_in_seconds:" << now - start_time_ << "\r\n"; - string_stream << "uptime_in_days:" << (now - start_time_) / 86400 << "\r\n"; + int64_t now_secs = util::GetTimeStamp(); + string_stream << "uptime_in_seconds:" << now_secs - start_time_secs_ << "\r\n"; + string_stream << "uptime_in_days:" << (now_secs - start_time_secs_) / 86400 << "\r\n"; *info = string_stream.str(); } @@ -1000,14 +999,14 @@ void Server::GetReplicationInfo(std::string *info) { string_stream << "# Replication\r\n"; string_stream << "role:" << (IsSlave() ? "slave" : "master") << "\r\n"; if (IsSlave()) { - time_t now = util::GetTimeStamp(); + int64_t now_secs = util::GetTimeStamp(); string_stream << "master_host:" << master_host_ << "\r\n"; string_stream << "master_port:" << master_port_ << "\r\n"; ReplState state = GetReplicationState(); string_stream << "master_link_status:" << (state == kReplConnected ? "up" : "down") << "\r\n"; string_stream << "master_sync_unrecoverable_error:" << (state == kReplError ? "yes" : "no") << "\r\n"; string_stream << "master_sync_in_progress:" << (state == kReplFetchMeta || state == kReplFetchSST) << "\r\n"; - string_stream << "master_last_io_seconds_ago:" << now - replication_thread_->LastIOTime() << "\r\n"; + string_stream << "master_last_io_seconds_ago:" << now_secs - replication_thread_->LastIOTimeSecs() << "\r\n"; string_stream << "slave_repl_offset:" << storage->LatestSeqNumber() << "\r\n"; string_stream << "slave_priority:" << config_->slave_priority << "\r\n"; } @@ -1091,15 +1090,15 @@ void Server::SetLastRandomKeyCursor(const std::string &cursor) { } int64_t Server::GetCachedUnixTime() { - if (unix_time.load() == 0) { + if (unix_time_secs.load() == 0) { updateCachedTime(); } - return unix_time.load(); + return unix_time_secs.load(); } int64_t Server::GetLastBgsaveTime() { std::lock_guard lg(db_job_mu_); - return last_bgsave_time_ == -1 ? start_time_ : last_bgsave_time_; + return last_bgsave_timestamp_secs_ == -1 ? start_time_secs_ : last_bgsave_timestamp_secs_; } void Server::GetStatsInfo(std::string *info) { @@ -1141,7 +1140,7 @@ void Server::GetCommandsStatsInfo(std::string *info) { auto latency = cmd_stat.second.latency.load(); string_stream << "cmdstat_" << cmd_stat.first << ":calls=" << calls << ",usec=" << latency - << ",usec_per_call=" << ((calls == 0) ? 0 : static_cast(latency / calls)) << "\r\n"; + << ",usec_per_call=" << static_cast(latency / calls) << "\r\n"; } *info = string_stream.str(); @@ -1195,9 +1194,10 @@ void Server::GetInfo(const std::string &ns, const std::string §ion, std::str std::lock_guard lg(db_job_mu_); string_stream << "bgsave_in_progress:" << (is_bgsave_in_progress_ ? 1 : 0) << "\r\n"; - string_stream << "last_bgsave_time:" << (last_bgsave_time_ == -1 ? start_time_ : last_bgsave_time_) << "\r\n"; + string_stream << "last_bgsave_time:" + << (last_bgsave_timestamp_secs_ == -1 ? start_time_secs_ : last_bgsave_timestamp_secs_) << "\r\n"; string_stream << "last_bgsave_status:" << last_bgsave_status_ << "\r\n"; - string_stream << "last_bgsave_time_sec:" << last_bgsave_time_sec_ << "\r\n"; + string_stream << "last_bgsave_time_sec:" << last_bgsave_duration_secs_ << "\r\n"; } if (all || section == "stats") { @@ -1249,8 +1249,9 @@ void Server::GetInfo(const std::string &ns, const std::string §ion, std::str KeyNumStats stats; GetLatestKeyNumStats(ns, &stats); - time_t last_scan_time = GetLastScanTime(ns); - tm last_scan_tm{}; + // FIXME(mwish): output still requires std::tm. + auto last_scan_time = static_cast(GetLastScanTime(ns)); + std::tm last_scan_tm{}; localtime_r(&last_scan_time, &last_scan_tm); if (section_cnt++) string_stream << "\r\n"; @@ -1393,15 +1394,15 @@ Status Server::AsyncBgSaveDB() { is_bgsave_in_progress_ = true; return task_runner_.TryPublish([this] { - auto start_bgsave_time = util::GetTimeStamp(); + auto start_bgsave_time_secs = util::GetTimeStamp(); Status s = storage->CreateBackup(); - auto stop_bgsave_time = util::GetTimeStamp(); + auto stop_bgsave_time_secs = util::GetTimeStamp(); std::lock_guard lg(db_job_mu_); is_bgsave_in_progress_ = false; - last_bgsave_time_ = start_bgsave_time; + last_bgsave_timestamp_secs_ = start_bgsave_time_secs; last_bgsave_status_ = s.IsOK() ? "ok" : "err"; - last_bgsave_time_sec_ = stop_bgsave_time - start_bgsave_time; + last_bgsave_duration_secs_ = stop_bgsave_time_secs - start_bgsave_time_secs; }); } @@ -1436,7 +1437,7 @@ Status Server::AsyncScanDBSize(const std::string &ns) { std::lock_guard lg(db_job_mu_); db_scan_infos_[ns].key_num_stats = stats; - db_scan_infos_[ns].last_scan_time = util::GetTimeStamp(); + db_scan_infos_[ns].last_scan_time_secs = util::GetTimeStamp(); db_scan_infos_[ns].is_scanning = false; }); } @@ -1529,10 +1530,10 @@ void Server::GetLatestKeyNumStats(const std::string &ns, KeyNumStats *stats) { } } -time_t Server::GetLastScanTime(const std::string &ns) { +int64_t Server::GetLastScanTime(const std::string &ns) const { auto iter = db_scan_infos_.find(ns); if (iter != db_scan_infos_.end()) { - return iter->second.last_scan_time; + return iter->second.last_scan_time_secs; } return 0; } diff --git a/src/server/server.h b/src/server/server.h index a0f0477b276..ad967c77f40 100644 --- a/src/server/server.h +++ b/src/server/server.h @@ -56,7 +56,8 @@ constexpr const char *REDIS_VERSION = "4.0.0"; struct DBScanInfo { - time_t last_scan_time = 0; + // Last scan system clock in seconds + int64_t last_scan_time_secs = 0; KeyNumStats key_num_stats; bool is_scanning = false; }; @@ -249,7 +250,7 @@ class Server { Status AsyncPurgeOldBackups(uint32_t num_backups_to_keep, uint32_t backup_max_keep_hours); Status AsyncScanDBSize(const std::string &ns); void GetLatestKeyNumStats(const std::string &ns, KeyNumStats *stats); - time_t GetLastScanTime(const std::string &ns); + int64_t GetLastScanTime(const std::string &ns) const; std::string GenerateCursorFromKeyName(const std::string &key_name, CursorType cursor_type, const char *prefix = ""); std::string GetKeyNameFromCursor(const std::string &cursor, CursorType cursor_type); @@ -294,7 +295,7 @@ class Server { Stats stats; engine::Storage *storage; std::unique_ptr cluster; - static inline std::atomic unix_time = 0; + static inline std::atomic unix_time_secs = 0; std::unique_ptr slot_migrator; std::unique_ptr slot_import; @@ -325,7 +326,7 @@ class Server { std::atomic stop_ = false; std::atomic is_loading_ = false; - int64_t start_time_; + int64_t start_time_secs_; std::mutex slaveof_mu_; std::string master_host_; uint32_t master_port_ = 0; @@ -355,9 +356,9 @@ class Server { std::mutex db_job_mu_; bool db_compacting_ = false; bool is_bgsave_in_progress_ = false; - int64_t last_bgsave_time_ = -1; + int64_t last_bgsave_timestamp_secs_ = -1; std::string last_bgsave_status_ = "ok"; - int64_t last_bgsave_time_sec_ = -1; + int64_t last_bgsave_duration_secs_ = -1; std::map db_scan_infos_; diff --git a/src/stats/stats.cc b/src/stats/stats.cc index 115fc4d9e13..ae18638b221 100644 --- a/src/stats/stats.cc +++ b/src/stats/stats.cc @@ -29,7 +29,7 @@ Stats::Stats() { for (int i = 0; i < STATS_METRIC_COUNT; i++) { InstMetric im; - im.last_sample_time = 0; + im.last_sample_time_ms = 0; im.last_sample_count = 0; im.idx = 0; for (uint64_t &sample : im.samples) { @@ -93,15 +93,15 @@ void Stats::IncrLatency(uint64_t latency, const std::string &command_name) { } void Stats::TrackInstantaneousMetric(int metric, uint64_t current_reading) { - uint64_t curr_time = util::GetTimeStampMS(); + uint64_t curr_time_ms = util::GetTimeStampMS(); std::unique_lock lock(inst_metrics_mutex); - uint64_t t = curr_time - inst_metrics[metric].last_sample_time; + uint64_t t = curr_time_ms - inst_metrics[metric].last_sample_time_ms; uint64_t ops = current_reading - inst_metrics[metric].last_sample_count; uint64_t ops_sec = t > 0 ? (ops * 1000 / t) : 0; inst_metrics[metric].samples[inst_metrics[metric].idx] = ops_sec; inst_metrics[metric].idx++; inst_metrics[metric].idx %= STATS_METRIC_SAMPLES; - inst_metrics[metric].last_sample_time = curr_time; + inst_metrics[metric].last_sample_time_ms = curr_time_ms; inst_metrics[metric].last_sample_count = current_reading; } diff --git a/src/stats/stats.h b/src/stats/stats.h index 88ab2108b09..6fdba09a194 100644 --- a/src/stats/stats.h +++ b/src/stats/stats.h @@ -49,8 +49,8 @@ struct CommandStat { }; struct InstMetric { - uint64_t last_sample_time; // Timestamp of the last sample in ms - uint64_t last_sample_count; // Count in the last sample + uint64_t last_sample_time_ms; // Timestamp of the last sample in ms + uint64_t last_sample_count; // Count in the last sample uint64_t samples[STATS_METRIC_SAMPLES]; int idx; }; diff --git a/src/storage/rdb.cc b/src/storage/rdb.cc index 8c5f6f11976..f513caca698 100644 --- a/src/storage/rdb.cc +++ b/src/storage/rdb.cc @@ -459,11 +459,11 @@ Status RDB::saveRdbObject(int type, const std::string &key, const RedisObjValue if (type == RDBTypeString) { const auto &value = std::get(obj); redis::String string_db(storage_, ns_); - uint64_t expire = 0; + uint64_t expire_ms = 0; if (ttl_ms > 0) { - expire = ttl_ms + util::GetTimeStampMS(); + expire_ms = ttl_ms + util::GetTimeStampMS(); } - db_status = string_db.SetEX(key, value, expire); + db_status = string_db.SetEX(key, value, expire_ms); } else if (type == RDBTypeSet || type == RDBTypeSetIntSet || type == RDBTypeSetListPack) { const auto &members = std::get>(obj); redis::Set set_db(storage_, ns_); @@ -567,21 +567,20 @@ Status RDB::LoadRdb(uint32_t db_index, bool overwrite_exist_key) { return {Status::NotOK, fmt::format("Can't handle RDB format version {}", rdb_ver)}; } - uint64_t expire_time = 0; + uint64_t expire_time_ms = 0; int64_t expire_keys = 0; int64_t load_keys = 0; int64_t empty_keys_skipped = 0; - auto now = util::GetTimeStampMS(); + auto now_ms = util::GetTimeStampMS(); uint32_t db_id = 0; uint64_t skip_exist_keys = 0; while (true) { auto type = GET_OR_RET(LogWhenError(loadRdbType())); if (type == RDBOpcodeExpireTime) { - expire_time = static_cast(GET_OR_RET(LogWhenError(loadExpiredTimeSeconds()))); - expire_time *= 1000; + expire_time_ms = static_cast(GET_OR_RET(LogWhenError(loadExpiredTimeSeconds()))) * 1000; continue; } else if (type == RDBOpcodeExpireTimeMs) { - expire_time = GET_OR_RET(LogWhenError(loadExpiredTimeMilliseconds(rdb_ver))); + expire_time_ms = GET_OR_RET(LogWhenError(loadExpiredTimeMilliseconds(rdb_ver))); continue; } else if (type == RDBOpcodeFreq) { // LFU frequency: not use in kvrocks GET_OR_RET(LogWhenError(stream_->ReadByte())); // discard the value @@ -637,8 +636,8 @@ Status RDB::LoadRdb(uint32_t db_index, bool overwrite_exist_key) { LOG(WARNING) << "skipping empty key: " << key; } continue; - } else if (expire_time != 0 && - expire_time < now) { // in redis this used to feed this deletion to any connected replicas + } else if (expire_time_ms != 0 && + expire_time_ms < now_ms) { // in redis this used to feed this deletion to any connected replicas expire_keys++; continue; } @@ -655,7 +654,7 @@ Status RDB::LoadRdb(uint32_t db_index, bool overwrite_exist_key) { } } - auto ret = saveRdbObject(type, key, value, expire_time); + auto ret = saveRdbObject(type, key, value, expire_time_ms); if (!ret.IsOK()) { LOG(WARNING) << "save rdb object key " << key << " failed: " << ret.Msg(); } else { @@ -730,7 +729,7 @@ Status RDB::SaveObjectType(const RedisType type) { } else if (type == kRedisHash) { robj_type = RDBTypeHash; } else if (type == kRedisList) { - robj_type = RDBTypeListQuickList2; + robj_type = RDBTypeListQuickList; } else if (type == kRedisSet) { robj_type = RDBTypeSet; } else if (type == kRedisZSet) { @@ -892,12 +891,7 @@ Status RDB::SaveListObject(const std::vector &elems) { } for (const auto &elem : elems) { - status = RdbSaveLen(1 /*plain container mode */); - if (!status.IsOK()) { - return {Status::RedisExecErr, status.Msg()}; - } - - status = SaveStringObject(elem); + auto status = rdbSaveZipListObject(elem); if (!status.IsOK()) { return {Status::RedisExecErr, status.Msg()}; } @@ -1005,3 +999,35 @@ Status RDB::rdbSaveBinaryDoubleValue(double val) { memrev64ifbe(&val); return stream_->Write((const char *)(&val), sizeof(val)); } + +Status RDB::rdbSaveZipListObject(const std::string &elem) { + // calc total ziplist size + uint prevlen = 0; + const size_t ziplist_size = zlHeaderSize + zlEndSize + elem.length() + + ZipList::ZipStorePrevEntryLength(nullptr, 0, prevlen) + + ZipList::ZipStoreEntryEncoding(nullptr, 0, elem.length()); + auto zl_string = std::string(ziplist_size, '\0'); + auto zl_ptr = reinterpret_cast(&zl_string[0]); + + // set ziplist header + ZipList::SetZipListBytes(zl_ptr, ziplist_size, (static_cast(ziplist_size))); + ZipList::SetZipListTailOffset(zl_ptr, ziplist_size, intrev32ifbe(zlHeaderSize)); + + // set ziplist entry + auto pos = ZipList::GetZipListEntryHead(zl_ptr, ziplist_size); + pos += ZipList::ZipStorePrevEntryLength(pos, ziplist_size, prevlen); + pos += ZipList::ZipStoreEntryEncoding(pos, ziplist_size, elem.length()); + assert(pos + elem.length() <= zl_ptr + ziplist_size); + memcpy(pos, elem.c_str(), elem.length()); + + // set ziplist end + ZipList::SetZipListLength(zl_ptr, ziplist_size, 1); + zl_ptr[ziplist_size - 1] = zlEnd; + + auto status = SaveStringObject(zl_string); + if (!status.IsOK()) { + return {Status::RedisExecErr, status.Msg()}; + } + + return Status::OK(); +} diff --git a/src/storage/rdb.h b/src/storage/rdb.h index 5d7f78f3740..3a08df84a40 100644 --- a/src/storage/rdb.h +++ b/src/storage/rdb.h @@ -153,4 +153,5 @@ class RDB { static bool isEmptyRedisObject(const RedisObjValue &value); static int rdbEncodeInteger(long long value, unsigned char *enc); Status rdbSaveBinaryDoubleValue(double val); + Status rdbSaveZipListObject(const std::string &elem); }; diff --git a/src/storage/rdb_ziplist.cc b/src/storage/rdb_ziplist.cc index 772226eaa47..b51dc8ddadc 100644 --- a/src/storage/rdb_ziplist.cc +++ b/src/storage/rdb_ziplist.cc @@ -20,11 +20,9 @@ #include "rdb_ziplist.h" -#include "vendor/endianconv.h" +#include -constexpr const int zlHeaderSize = 10; -constexpr const uint8_t ZipListBigLen = 0xFE; -constexpr const uint8_t zlEnd = 0xFF; +#include "vendor/endianconv.h" constexpr const uint8_t ZIP_STR_MASK = 0xC0; constexpr const uint8_t ZIP_STR_06B = (0 << 6); @@ -52,7 +50,7 @@ StatusOr ZipList::Next() { std::string value; if ((encoding) < ZIP_STR_MASK) { // For integer type, needs to convert to uint8_t* to avoid signed extension - auto data = reinterpret_cast(input_.data()); + auto data = reinterpret_cast(input_.data()); if ((encoding) == ZIP_STR_06B) { len_bytes = 1; len = data[pos_] & 0x3F; @@ -91,7 +89,7 @@ StatusOr ZipList::Next() { } else if ((encoding) == ZIP_INT_24B) { GET_OR_RET(peekOK(3)); int32_t i32 = 0; - memcpy(reinterpret_cast(&i32) + 1, input_.data() + pos_, sizeof(int32_t) - 1); + memcpy(reinterpret_cast(&i32) + 1, input_.data() + pos_, sizeof(int32_t) - 1); memrev32ifbe(&i32); i32 >>= 8; setPreEntryLen(4); // 3byte for encoding and 1byte for the prev entry length @@ -126,7 +124,7 @@ StatusOr ZipList::Next() { StatusOr> ZipList::Entries() { GET_OR_RET(peekOK(zlHeaderSize)); // ignore 8 bytes of total bytes and tail of zip list - auto zl_len = intrev16ifbe(*reinterpret_cast(input_.data() + 8)); + auto zl_len = intrev16ifbe(*reinterpret_cast(input_.data() + 8)); pos_ += zlHeaderSize; std::vector entries; @@ -152,3 +150,72 @@ Status ZipList::peekOK(size_t n) { } uint32_t ZipList::getEncodedLengthSize(uint32_t len) { return len < ZipListBigLen ? 1 : 5; } + +uint32_t ZipList::ZipStorePrevEntryLengthLarge(unsigned char *p, size_t zl_size, unsigned int len) { + uint32_t u32 = 0; + if (p != nullptr) { + p[0] = ZipListBigLen; + u32 = len; + assert(zl_size >= 1 + sizeof(uint32_t) + zlHeaderSize); + memcpy(p + 1, &u32, sizeof(u32)); + memrev32ifbe(p + 1); + } + return 1 + sizeof(uint32_t); +} + +uint32_t ZipList::ZipStorePrevEntryLength(unsigned char *p, size_t zl_size, unsigned int len) { + if (p == nullptr) { + return (len < ZipListBigLen) ? 1 : sizeof(uint32_t) + 1; + } + if (len < ZipListBigLen) { + p[0] = len; + return 1; + } + return ZipStorePrevEntryLengthLarge(p, zl_size, len); +} + +uint32_t ZipList::ZipStoreEntryEncoding(unsigned char *p, size_t zl_size, unsigned int rawlen) { + unsigned char len = 1, buf[5]; + + /* Although encoding is given it may not be set for strings, + * so we determine it here using the raw length. */ + if (rawlen <= 0x3f) { + if (!p) return len; + buf[0] = ZIP_STR_06B | rawlen; + } else if (rawlen <= 0x3fff) { + len += 1; + if (!p) return len; + buf[0] = ZIP_STR_14B | ((rawlen >> 8) & 0x3f); + buf[1] = rawlen & 0xff; + } else { + len += 4; + if (!p) return len; + buf[0] = ZIP_STR_32B; + buf[1] = (rawlen >> 24) & 0xff; + buf[2] = (rawlen >> 16) & 0xff; + buf[3] = (rawlen >> 8) & 0xff; + buf[4] = rawlen & 0xff; + } + assert(zl_size >= static_cast(zlHeaderSize) + len); + /* Store this length at p. */ + memcpy(p, buf, len); + return len; +} + +void ZipList::SetZipListBytes(unsigned char *zl, size_t zl_size, uint32_t value) { + assert(zl_size >= sizeof(uint32_t)); + memcpy(zl, &value, sizeof(uint32_t)); +} +void ZipList::SetZipListTailOffset(unsigned char *zl, size_t zl_size, uint32_t value) { + assert(zl_size >= sizeof(uint32_t) * 2); + memcpy(zl + sizeof(uint32_t), &value, sizeof(uint32_t)); +} +void ZipList::SetZipListLength(unsigned char *zl, size_t zl_size, uint16_t value) { + assert(zl_size >= sizeof(uint32_t) * 2 + sizeof(uint16_t)); + memcpy(zl + sizeof(uint32_t) * 2, &value, sizeof(uint16_t)); +} + +unsigned char *ZipList::GetZipListEntryHead(unsigned char *zl, size_t zl_size) { + assert(zl_size >= zlHeaderSize); + return ((zl) + zlHeaderSize); +} diff --git a/src/storage/rdb_ziplist.h b/src/storage/rdb_ziplist.h index e9d05fde716..8f0d99c6693 100644 --- a/src/storage/rdb_ziplist.h +++ b/src/storage/rdb_ziplist.h @@ -25,6 +25,11 @@ #include "common/status.h" +constexpr const int zlHeaderSize = 10; +constexpr const int zlEndSize = 1; +constexpr const uint8_t ZipListBigLen = 0xFE; +constexpr const uint8_t zlEnd = 0xFF; + class ZipList { public: explicit ZipList(std::string_view input) : input_(input){}; @@ -32,6 +37,13 @@ class ZipList { StatusOr Next(); StatusOr> Entries(); + static uint32_t ZipStorePrevEntryLengthLarge(unsigned char *p, size_t zl_size, unsigned int len); + static uint32_t ZipStorePrevEntryLength(unsigned char *p, size_t zl_size, unsigned int len); + static uint32_t ZipStoreEntryEncoding(unsigned char *p, size_t zl_size, unsigned int rawlen); + static void SetZipListBytes(unsigned char *zl, size_t zl_size, uint32_t value); + static void SetZipListTailOffset(unsigned char *zl, size_t zl_size, uint32_t value); + static void SetZipListLength(unsigned char *zl, size_t zl_size, uint16_t value); + static unsigned char *GetZipListEntryHead(unsigned char *zl, size_t zl_size); private: std::string_view input_; diff --git a/src/storage/redis_db.cc b/src/storage/redis_db.cc index 3ff1fa0a374..13f9bd2f5c0 100644 --- a/src/storage/redis_db.cc +++ b/src/storage/redis_db.cc @@ -35,6 +35,11 @@ #include "storage/redis_metadata.h" #include "storage/storage.h" #include "time_util.h" +#include "types/redis_hash.h" +#include "types/redis_list.h" +#include "types/redis_set.h" +#include "types/redis_string.h" +#include "types/redis_zset.h" namespace redis { @@ -768,4 +773,211 @@ rocksdb::Status Database::Copy(const std::string &key, const std::string &new_ke return storage_->Write(storage_->DefaultWriteOptions(), batch->GetWriteBatch()); } +std::optional Database::lookupKeyByPattern(const std::string &pattern, const std::string &subst) { + if (pattern == "#") { + return subst; + } + + auto match_pos = pattern.find('*'); + if (match_pos == std::string::npos) { + return std::nullopt; + } + + // hash field + std::string field; + auto arrow_pos = pattern.find("->", match_pos + 1); + if (arrow_pos != std::string::npos && arrow_pos + 2 < pattern.size()) { + field = pattern.substr(arrow_pos + 2); + } + + std::string key = pattern.substr(0, match_pos + 1); + key.replace(match_pos, 1, subst); + + std::string value; + RedisType type = RedisType::kRedisNone; + if (!field.empty()) { + auto hash_db = redis::Hash(storage_, namespace_); + if (auto s = hash_db.Type(key, &type); !s.ok() || type != RedisType::kRedisHash) { + return std::nullopt; + } + + if (auto s = hash_db.Get(key, field, &value); !s.ok()) { + return std::nullopt; + } + } else { + auto string_db = redis::String(storage_, namespace_); + if (auto s = string_db.Type(key, &type); !s.ok() || type != RedisType::kRedisString) { + return std::nullopt; + } + if (auto s = string_db.Get(key, &value); !s.ok()) { + return std::nullopt; + } + } + return value; +} + +rocksdb::Status Database::Sort(RedisType type, const std::string &key, const SortArgument &args, + std::vector> *elems, SortResult *res) { + // Obtain the length of the object to sort. + const std::string ns_key = AppendNamespacePrefix(key); + Metadata metadata(type, false); + auto s = GetMetadata(GetOptions{}, {type}, ns_key, &metadata); + if (!s.ok()) return s; + + if (metadata.size > SORT_LENGTH_LIMIT) { + *res = SortResult::LIMIT_EXCEEDED; + return rocksdb::Status::OK(); + } + auto vectorlen = static_cast(metadata.size); + + // Adjust the offset and count of the limit + int offset = args.offset >= vectorlen ? 0 : std::clamp(args.offset, 0, vectorlen - 1); + int count = args.offset >= vectorlen ? 0 : std::clamp(args.count, -1, vectorlen - offset); + if (count == -1) count = vectorlen - offset; + + // Get the elements that need to be sorted + std::vector str_vec; + if (count != 0) { + if (type == RedisType::kRedisList) { + auto list_db = redis::List(storage_, namespace_); + + if (args.dontsort) { + if (args.desc) { + s = list_db.Range(key, -count - offset, -1 - offset, &str_vec); + if (!s.ok()) return s; + std::reverse(str_vec.begin(), str_vec.end()); + } else { + s = list_db.Range(key, offset, offset + count - 1, &str_vec); + if (!s.ok()) return s; + } + } else { + s = list_db.Range(key, 0, -1, &str_vec); + if (!s.ok()) return s; + } + } else if (type == RedisType::kRedisSet) { + auto set_db = redis::Set(storage_, namespace_); + s = set_db.Members(key, &str_vec); + if (!s.ok()) return s; + + if (args.dontsort) { + str_vec = std::vector(std::make_move_iterator(str_vec.begin() + offset), + std::make_move_iterator(str_vec.begin() + offset + count)); + } + } else if (type == RedisType::kRedisZSet) { + auto zset_db = redis::ZSet(storage_, namespace_); + std::vector member_scores; + + if (args.dontsort) { + RangeRankSpec spec; + spec.start = offset; + spec.stop = offset + count - 1; + spec.reversed = args.desc; + s = zset_db.RangeByRank(key, spec, &member_scores, nullptr); + if (!s.ok()) return s; + + for (auto &member_score : member_scores) { + str_vec.emplace_back(std::move(member_score.member)); + } + } else { + s = zset_db.GetAllMemberScores(key, &member_scores); + if (!s.ok()) return s; + + for (auto &member_score : member_scores) { + str_vec.emplace_back(std::move(member_score.member)); + } + } + } else { + *res = SortResult::UNKNOWN_TYPE; + return s; + } + } + + std::vector sort_vec(str_vec.size()); + for (size_t i = 0; i < str_vec.size(); ++i) { + sort_vec[i].obj = str_vec[i]; + } + + // Sort by BY, ALPHA, ASC/DESC + if (!args.dontsort) { + for (size_t i = 0; i < sort_vec.size(); ++i) { + std::string byval; + if (!args.sortby.empty()) { + auto lookup = lookupKeyByPattern(args.sortby, str_vec[i]); + if (!lookup.has_value()) continue; + byval = std::move(lookup.value()); + } else { + byval = str_vec[i]; + } + + if (args.alpha && !args.sortby.empty()) { + sort_vec[i].v = byval; + } else if (!args.alpha && !byval.empty()) { + auto double_byval = ParseFloat(byval); + if (!double_byval) { + *res = SortResult::DOUBLE_CONVERT_ERROR; + return rocksdb::Status::OK(); + } + sort_vec[i].v = *double_byval; + } + } + + std::sort(sort_vec.begin(), sort_vec.end(), [&args](const RedisSortObject &a, const RedisSortObject &b) { + return RedisSortObject::SortCompare(a, b, args); + }); + + // Gets the element specified by Limit + if (offset != 0 || count != vectorlen) { + sort_vec = std::vector(std::make_move_iterator(sort_vec.begin() + offset), + std::make_move_iterator(sort_vec.begin() + offset + count)); + } + } + + // Perform storage + for (auto &elem : sort_vec) { + if (args.getpatterns.empty()) { + elems->emplace_back(elem.obj); + } + for (const std::string &pattern : args.getpatterns) { + std::optional val = lookupKeyByPattern(pattern, elem.obj); + if (val.has_value()) { + elems->emplace_back(val.value()); + } else { + elems->emplace_back(std::nullopt); + } + } + } + + if (!args.storekey.empty()) { + std::vector store_elems; + store_elems.reserve(elems->size()); + for (const auto &e : *elems) { + store_elems.emplace_back(e.value_or("")); + } + redis::List list_db(storage_, namespace_); + s = list_db.Trim(args.storekey, -1, 0); + if (!s.ok()) return s; + uint64_t new_size = 0; + s = list_db.Push(args.storekey, std::vector(store_elems.cbegin(), store_elems.cend()), false, &new_size); + if (!s.ok()) return s; + } + + return rocksdb::Status::OK(); +} + +bool RedisSortObject::SortCompare(const RedisSortObject &a, const RedisSortObject &b, const SortArgument &args) { + if (!args.alpha) { + double score_a = std::get(a.v); + double score_b = std::get(b.v); + return !args.desc ? score_a < score_b : score_a > score_b; + } else { + if (!args.sortby.empty()) { + std::string cmp_a = std::get(a.v); + std::string cmp_b = std::get(b.v); + return !args.desc ? cmp_a < cmp_b : cmp_a > cmp_b; + } else { + return !args.desc ? a.obj < b.obj : a.obj > b.obj; + } + } +} + } // namespace redis diff --git a/src/storage/redis_db.h b/src/storage/redis_db.h index 73a5a6545c0..84579b107fd 100644 --- a/src/storage/redis_db.h +++ b/src/storage/redis_db.h @@ -21,15 +21,52 @@ #pragma once #include +#include #include #include +#include #include #include "redis_metadata.h" +#include "server/redis_reply.h" #include "storage.h" namespace redis { +/// SORT_LENGTH_LIMIT limits the number of elements to be sorted +/// to avoid using too much memory and causing system crashes. +/// TODO: Expect to expand or eliminate SORT_LENGTH_LIMIT +/// through better mechanisms such as memory restriction logic. +constexpr uint64_t SORT_LENGTH_LIMIT = 512; + +struct SortArgument { + std::string sortby; // BY + bool dontsort = false; // DONT SORT + int offset = 0; // LIMIT OFFSET + int count = -1; // LIMIT COUNT + std::vector getpatterns; // GET + bool desc = false; // ASC/DESC + bool alpha = false; // ALPHA + std::string storekey; // STORE +}; + +struct RedisSortObject { + std::string obj; + std::variant v; + + /// SortCompare is a helper function that enables `RedisSortObject` to be sorted based on `SortArgument`. + /// + /// It can assist in implementing the third parameter `Compare comp` required by `std::sort` + /// + /// \param args The basis used to compare two RedisSortObjects. + /// If `args.alpha` is false, `RedisSortObject.v` will be taken as double for comparison + /// If `args.alpha` is true and `args.sortby` is not empty, `RedisSortObject.v` will be taken as string for comparison + /// If `args.alpha` is true and `args.sortby` is empty, the comparison is by `RedisSortObject.obj`. + /// + /// \return If `desc` is false, returns true when `a < b`, otherwise returns true when `a > b` + static bool SortCompare(const RedisSortObject &a, const RedisSortObject &b, const SortArgument &args); +}; + /// Database is a wrapper of underlying storage engine, it provides /// some common operations for redis commands. class Database { @@ -107,6 +144,17 @@ class Database { enum class CopyResult { KEY_NOT_EXIST, KEY_ALREADY_EXIST, DONE }; [[nodiscard]] rocksdb::Status Copy(const std::string &key, const std::string &new_key, bool nx, bool delete_old, CopyResult *res); + enum class SortResult { UNKNOWN_TYPE, DOUBLE_CONVERT_ERROR, LIMIT_EXCEEDED, DONE }; + /// Sort sorts keys of the specified type according to SortArgument + /// + /// \param type is the type of sort key, which must be LIST, SET or ZSET + /// \param key is to be sorted + /// \param args provide the parameters to sort by + /// \param elems contain the sorted results + /// \param res represents the sorted result type. + /// When status is not ok, `res` should not been checked, otherwise it should be checked whether `res` is `DONE` + [[nodiscard]] rocksdb::Status Sort(RedisType type, const std::string &key, const SortArgument &args, + std::vector> *elems, SortResult *res); protected: engine::Storage *storage_; @@ -119,6 +167,21 @@ class Database { // Already internal keys [[nodiscard]] rocksdb::Status existsInternal(const std::vector &keys, int *ret); [[nodiscard]] rocksdb::Status typeInternal(const Slice &key, RedisType *type); + + /// lookupKeyByPattern is a helper function of `Sort` to support `GET` and `BY` fields. + /// + /// \param pattern can be the value of a `BY` or `GET` field + /// \param subst is used to replace the "*" or "#" matched in the pattern string. + /// \return Returns the value associated to the key with a name obtained using the following rules: + /// 1) The first occurrence of '*' in 'pattern' is substituted with 'subst'. + /// 2) If 'pattern' matches the "->" string, everything on the left of + /// the arrow is treated as the name of a hash field, and the part on the + /// left as the key name containing a hash. The value of the specified + /// field is returned. + /// 3) If 'pattern' equals "#", the function simply returns 'subst' itself so + /// that the SORT command can be used like: SORT key GET # to retrieve + /// the Set/List elements directly. + std::optional lookupKeyByPattern(const std::string &pattern, const std::string &subst); }; class LatestSnapShot { public: diff --git a/src/storage/storage.cc b/src/storage/storage.cc index 1759d2e7a8c..b25ae8a8c67 100644 --- a/src/storage/storage.cc +++ b/src/storage/storage.cc @@ -67,7 +67,7 @@ constexpr double kRocksdbLRUBlockCacheHighPriPoolRatio = 0.75; constexpr double kRocksdbLRURowCacheHighPriPoolRatio = 0.5; // used in creating rocksdb::HyperClockCache, set`estimated_entry_charge` to 0 means let rocksdb dynamically and -// automacally adjust the table size for the cache. +// automatically adjust the table size for the cache. constexpr size_t kRockdbHCCAutoAdjustCharge = 0; const int64_t kIORateLimitMaxMb = 1024000; @@ -75,7 +75,7 @@ const int64_t kIORateLimitMaxMb = 1024000; using rocksdb::Slice; Storage::Storage(Config *config) - : backup_creating_time_(util::GetTimeStamp()), + : backup_creating_time_secs_(util::GetTimeStamp()), env_(rocksdb::Env::Default()), config_(config), lock_mgr_(16), @@ -421,8 +421,8 @@ Status Storage::CreateBackup(uint64_t *sequence_number) { return {Status::NotOK, s.ToString()}; } - // 'backup_mu_' can guarantee 'backup_creating_time_' is thread-safe - backup_creating_time_ = static_cast(util::GetTimeStamp()); + // 'backup_mu_' can guarantee 'backup_creating_time_secs_' is thread-safe + backup_creating_time_secs_ = util::GetTimeStamp(); LOG(INFO) << "[storage] Success to create new backup"; return Status::OK(); @@ -546,7 +546,7 @@ void Storage::EmptyDB() { } void Storage::PurgeOldBackups(uint32_t num_backups_to_keep, uint32_t backup_max_keep_hours) { - time_t now = util::GetTimeStamp(); + auto now_secs = util::GetTimeStamp(); std::lock_guard lg(config_->backup_mu); std::string task_backup_dir = config_->backup_dir; @@ -555,13 +555,14 @@ void Storage::PurgeOldBackups(uint32_t num_backups_to_keep, uint32_t backup_max_ if (!s.ok()) return; // No backup is needed to keep or the backup is expired, we will clean it. - bool backup_expired = (backup_max_keep_hours != 0 && backup_creating_time_ + backup_max_keep_hours * 3600 < now); + bool backup_expired = + (backup_max_keep_hours != 0 && backup_creating_time_secs_ + backup_max_keep_hours * 3600 < now_secs); if (num_backups_to_keep == 0 || backup_expired) { s = rocksdb::DestroyDB(task_backup_dir, rocksdb::Options()); if (s.ok()) { - LOG(INFO) << "[storage] Succeeded cleaning old backup that was created at " << backup_creating_time_; + LOG(INFO) << "[storage] Succeeded cleaning old backup that was created at " << backup_creating_time_secs_; } else { - LOG(INFO) << "[storage] Failed cleaning old backup that was created at " << backup_creating_time_ + LOG(INFO) << "[storage] Failed cleaning old backup that was created at " << backup_creating_time_secs_ << ". Error: " << s.ToString(); } } @@ -975,9 +976,9 @@ Status Storage::ReplDataManager::GetFullReplDataInfo(Storage *storage, std::stri uint64_t checkpoint_latest_seq = 0; s = checkpoint->CreateCheckpoint(data_files_dir, storage->config_->rocks_db.write_buffer_size * MiB, &checkpoint_latest_seq); - auto now = static_cast(util::GetTimeStamp()); - storage->checkpoint_info_.create_time = now; - storage->checkpoint_info_.access_time = now; + auto now_secs = util::GetTimeStamp(); + storage->checkpoint_info_.create_time_secs = now_secs; + storage->checkpoint_info_.access_time_secs = now_secs; storage->checkpoint_info_.latest_seq = checkpoint_latest_seq; if (!s.ok()) { LOG(WARNING) << "[storage] Failed to create checkpoint (snapshot). Error: " << s.ToString(); @@ -987,12 +988,12 @@ Status Storage::ReplDataManager::GetFullReplDataInfo(Storage *storage, std::stri LOG(INFO) << "[storage] Create checkpoint successfully"; } else { // Replicas can share checkpoint to replication if the checkpoint existing time is less than a half of WAL TTL. - int64_t can_shared_time = storage->config_->rocks_db.wal_ttl_seconds / 2; - if (can_shared_time > 60 * 60) can_shared_time = 60 * 60; - if (can_shared_time < 10 * 60) can_shared_time = 10 * 60; + int64_t can_shared_time_secs = storage->config_->rocks_db.wal_ttl_seconds / 2; + if (can_shared_time_secs > 60 * 60) can_shared_time_secs = 60 * 60; + if (can_shared_time_secs < 10 * 60) can_shared_time_secs = 10 * 60; - auto now = static_cast(util::GetTimeStamp()); - if (now - storage->GetCheckpointCreateTime() > can_shared_time) { + auto now_secs = util::GetTimeStamp(); + if (now_secs - storage->GetCheckpointCreateTimeSecs() > can_shared_time_secs) { LOG(WARNING) << "[storage] Can't use current checkpoint, waiting next checkpoint"; return {Status::NotOK, "Can't use current checkpoint, waiting for next checkpoint"}; } diff --git a/src/storage/storage.h b/src/storage/storage.h index 208499b58fc..09151baad22 100644 --- a/src/storage/storage.h +++ b/src/storage/storage.h @@ -215,8 +215,10 @@ class Storage { static int OpenDataFile(Storage *storage, const std::string &rel_file, uint64_t *file_size); static Status CleanInvalidFiles(Storage *storage, const std::string &dir, std::vector valid_files); struct CheckpointInfo { - std::atomic create_time = 0; - std::atomic access_time = 0; + // System clock time when the checkpoint was created. + std::atomic create_time_secs = 0; + // System clock time when the checkpoint was last accessed. + std::atomic access_time_secs = 0; uint64_t latest_seq = 0; }; @@ -238,9 +240,9 @@ class Storage { bool ExistCheckpoint(); bool ExistSyncCheckpoint(); - time_t GetCheckpointCreateTime() const { return checkpoint_info_.create_time; } - void SetCheckpointAccessTime(time_t t) { checkpoint_info_.access_time = t; } - time_t GetCheckpointAccessTime() const { return checkpoint_info_.access_time; } + int64_t GetCheckpointCreateTimeSecs() const { return checkpoint_info_.create_time_secs; } + void SetCheckpointAccessTimeSecs(int64_t t) { checkpoint_info_.access_time_secs = t; } + int64_t GetCheckpointAccessTimeSecs() const { return checkpoint_info_.access_time_secs; } void SetDBInRetryableIOError(bool yes_or_no) { db_in_retryable_io_error_ = yes_or_no; } bool IsDBInRetryableIOError() const { return db_in_retryable_io_error_; } @@ -251,7 +253,8 @@ class Storage { private: std::unique_ptr db_ = nullptr; std::string replid_; - time_t backup_creating_time_; + // The system clock time when the backup was created. + int64_t backup_creating_time_secs_; std::unique_ptr backup_ = nullptr; rocksdb::Env *env_; std::shared_ptr sst_file_manager_; diff --git a/src/types/redis_stream.cc b/src/types/redis_stream.cc index 1ce8b5f20bf..ba22b625d5c 100644 --- a/src/types/redis_stream.cc +++ b/src/types/redis_stream.cc @@ -237,8 +237,8 @@ std::string Stream::consumerNameFromInternalKey(rocksdb::Slice key) const { std::string Stream::encodeStreamConsumerMetadataValue(const StreamConsumerMetadata &consumer_metadata) { std::string dst; PutFixed64(&dst, consumer_metadata.pending_number); - PutFixed64(&dst, consumer_metadata.last_idle); - PutFixed64(&dst, consumer_metadata.last_active); + PutFixed64(&dst, consumer_metadata.last_idle_ms); + PutFixed64(&dst, consumer_metadata.last_active_ms); return dst; } @@ -246,8 +246,8 @@ StreamConsumerMetadata Stream::decodeStreamConsumerMetadataValue(const std::stri StreamConsumerMetadata consumer_metadata; rocksdb::Slice input(value); GetFixed64(&input, &consumer_metadata.pending_number); - GetFixed64(&input, &consumer_metadata.last_idle); - GetFixed64(&input, &consumer_metadata.last_active); + GetFixed64(&input, &consumer_metadata.last_idle_ms); + GetFixed64(&input, &consumer_metadata.last_active_ms); return consumer_metadata; } @@ -277,7 +277,7 @@ StreamEntryID Stream::groupAndEntryIdFromPelInternalKey(rocksdb::Slice key, std: std::string Stream::encodeStreamPelEntryValue(const StreamPelEntry &pel_entry) { std::string dst; - PutFixed64(&dst, pel_entry.last_delivery_time); + PutFixed64(&dst, pel_entry.last_delivery_time_ms); PutFixed64(&dst, pel_entry.last_delivery_count); PutFixed64(&dst, pel_entry.consumer_name.size()); dst += pel_entry.consumer_name; @@ -287,7 +287,7 @@ std::string Stream::encodeStreamPelEntryValue(const StreamPelEntry &pel_entry) { StreamPelEntry Stream::decodeStreamPelEntryValue(const std::string &value) { StreamPelEntry pel_entry; rocksdb::Slice input(value); - GetFixed64(&input, &pel_entry.last_delivery_time); + GetFixed64(&input, &pel_entry.last_delivery_time_ms); GetFixed64(&input, &pel_entry.last_delivery_count); uint64_t consumer_name_len = 0; GetFixed64(&input, &consumer_name_len); @@ -619,8 +619,8 @@ rocksdb::Status Stream::createConsumerWithoutLock(const Slice &stream_name, cons StreamConsumerMetadata consumer_metadata; auto now = util::GetTimeStampMS(); - consumer_metadata.last_idle = now; - consumer_metadata.last_active = now; + consumer_metadata.last_idle_ms = now; + consumer_metadata.last_active_ms = now; std::string consumer_key = internalKeyFromConsumerName(ns_key, metadata, group_name, consumer_name); std::string consumer_value = encodeStreamConsumerMetadataValue(consumer_metadata); std::string get_consumer_value; @@ -1285,9 +1285,9 @@ rocksdb::Status Stream::RangeWithPending(const Slice &stream_name, StreamRangeOp return s; } StreamConsumerMetadata consumer_metadata = decodeStreamConsumerMetadataValue(get_consumer_value); - auto now = util::GetTimeStampMS(); - consumer_metadata.last_idle = now; - consumer_metadata.last_active = now; + auto now_ms = util::GetTimeStampMS(); + consumer_metadata.last_idle_ms = now_ms; + consumer_metadata.last_active_ms = now_ms; if (latest) { options.start = consumergroup_metadata.last_delivered_id; @@ -1351,7 +1351,7 @@ rocksdb::Status Stream::RangeWithPending(const Slice &stream_name, StreamRangeOp } entries->emplace_back(entry_id.ToString(), std::move(values)); pel_entry.last_delivery_count += 1; - pel_entry.last_delivery_time = now; + pel_entry.last_delivery_time_ms = now_ms; batch->Put(stream_cf_handle_, iter->key(), encodeStreamPelEntryValue(pel_entry)); ++count; if (count >= options.count) break; diff --git a/src/types/redis_stream_base.h b/src/types/redis_stream_base.h index 0a527255450..7808e593e3e 100644 --- a/src/types/redis_stream_base.h +++ b/src/types/redis_stream_base.h @@ -183,8 +183,8 @@ struct StreamConsumerGroupMetadata { struct StreamConsumerMetadata { uint64_t pending_number = 0; - uint64_t last_idle; - uint64_t last_active; + uint64_t last_idle_ms; + uint64_t last_active_ms; }; enum class StreamSubkeyType { @@ -195,7 +195,7 @@ enum class StreamSubkeyType { }; struct StreamPelEntry { - uint64_t last_delivery_time; + uint64_t last_delivery_time_ms; uint64_t last_delivery_count; std::string consumer_name; }; diff --git a/src/types/redis_string.cc b/src/types/redis_string.cc index e0f0c99daa5..0f0d468874c 100644 --- a/src/types/redis_string.cc +++ b/src/types/redis_string.cc @@ -255,21 +255,21 @@ rocksdb::Status String::Set(const std::string &user_key, const std::string &valu return updateRawValue(ns_key, new_raw_value); } -rocksdb::Status String::SetEX(const std::string &user_key, const std::string &value, uint64_t expire) { +rocksdb::Status String::SetEX(const std::string &user_key, const std::string &value, uint64_t expire_ms) { std::optional ret; - return Set(user_key, value, {expire, StringSetType::NONE, /*get=*/false, /*keep_ttl=*/false}, ret); + return Set(user_key, value, {expire_ms, StringSetType::NONE, /*get=*/false, /*keep_ttl=*/false}, ret); } -rocksdb::Status String::SetNX(const std::string &user_key, const std::string &value, uint64_t expire, bool *flag) { +rocksdb::Status String::SetNX(const std::string &user_key, const std::string &value, uint64_t expire_ms, bool *flag) { std::optional ret; - auto s = Set(user_key, value, {expire, StringSetType::NX, /*get=*/false, /*keep_ttl=*/false}, ret); + auto s = Set(user_key, value, {expire_ms, StringSetType::NX, /*get=*/false, /*keep_ttl=*/false}, ret); *flag = ret.has_value(); return s; } -rocksdb::Status String::SetXX(const std::string &user_key, const std::string &value, uint64_t expire, bool *flag) { +rocksdb::Status String::SetXX(const std::string &user_key, const std::string &value, uint64_t expire_ms, bool *flag) { std::optional ret; - auto s = Set(user_key, value, {expire, StringSetType::XX, /*get=*/false, /*keep_ttl=*/false}, ret); + auto s = Set(user_key, value, {expire_ms, StringSetType::XX, /*get=*/false, /*keep_ttl=*/false}, ret); *flag = ret.has_value(); return s; } @@ -384,7 +384,7 @@ rocksdb::Status String::IncrByFloat(const std::string &user_key, double incremen return updateRawValue(ns_key, raw_value); } -rocksdb::Status String::MSet(const std::vector &pairs, uint64_t expire, bool lock) { +rocksdb::Status String::MSet(const std::vector &pairs, uint64_t expire_ms, bool lock) { // Data race, key string maybe overwrite by other key while didn't lock the keys here, // to improve the set performance std::optional guard; @@ -404,7 +404,7 @@ rocksdb::Status String::MSet(const std::vector &pairs, uint64_t expi for (const auto &pair : pairs) { std::string bytes; Metadata metadata(kRedisString, false); - metadata.expire = expire; + metadata.expire = expire_ms; metadata.Encode(&bytes); bytes.append(pair.value.data(), pair.value.size()); std::string ns_key = AppendNamespacePrefix(pair.key); @@ -413,7 +413,7 @@ rocksdb::Status String::MSet(const std::vector &pairs, uint64_t expi return storage_->Write(storage_->DefaultWriteOptions(), batch->GetWriteBatch()); } -rocksdb::Status String::MSetNX(const std::vector &pairs, uint64_t expire, bool *flag) { +rocksdb::Status String::MSetNX(const std::vector &pairs, uint64_t expire_ms, bool *flag) { *flag = false; int exists = 0; @@ -435,7 +435,7 @@ rocksdb::Status String::MSetNX(const std::vector &pairs, uint64_t ex return rocksdb::Status::OK(); } - rocksdb::Status s = MSet(pairs, /*expire=*/expire, /*lock=*/false); + rocksdb::Status s = MSet(pairs, /*expire_ms=*/expire_ms, /*lock=*/false); if (!s.ok()) return s; *flag = true; diff --git a/src/types/redis_string.h b/src/types/redis_string.h index faf30259cdc..34afb0bd95b 100644 --- a/src/types/redis_string.h +++ b/src/types/redis_string.h @@ -37,6 +37,7 @@ struct StringPair { enum class StringSetType { NONE, NX, XX }; struct StringSetArgs { + // Expire time in mill seconds. uint64_t expire; StringSetType type; bool get; @@ -85,24 +86,24 @@ class String : public Database { rocksdb::Status Set(const std::string &user_key, const std::string &value); rocksdb::Status Set(const std::string &user_key, const std::string &value, StringSetArgs args, std::optional &ret); - rocksdb::Status SetEX(const std::string &user_key, const std::string &value, uint64_t expire); - rocksdb::Status SetNX(const std::string &user_key, const std::string &value, uint64_t expire, bool *flag); - rocksdb::Status SetXX(const std::string &user_key, const std::string &value, uint64_t expire, bool *flag); + rocksdb::Status SetEX(const std::string &user_key, const std::string &value, uint64_t expire_ms); + rocksdb::Status SetNX(const std::string &user_key, const std::string &value, uint64_t expire_ms, bool *flag); + rocksdb::Status SetXX(const std::string &user_key, const std::string &value, uint64_t expire_ms, bool *flag); rocksdb::Status SetRange(const std::string &user_key, size_t offset, const std::string &value, uint64_t *new_size); rocksdb::Status IncrBy(const std::string &user_key, int64_t increment, int64_t *new_value); rocksdb::Status IncrByFloat(const std::string &user_key, double increment, double *new_value); std::vector MGet(const std::vector &keys, std::vector *values); - rocksdb::Status MSet(const std::vector &pairs, uint64_t expire, bool lock = true); - rocksdb::Status MSetNX(const std::vector &pairs, uint64_t expire, bool *flag); + rocksdb::Status MSet(const std::vector &pairs, uint64_t expire_ms, bool lock = true); + rocksdb::Status MSetNX(const std::vector &pairs, uint64_t expire_ms, bool *flag); rocksdb::Status CAS(const std::string &user_key, const std::string &old_value, const std::string &new_value, - uint64_t expire, int *flag); + uint64_t expire_ms, int *flag); rocksdb::Status CAD(const std::string &user_key, const std::string &value, int *flag); rocksdb::Status LCS(const std::string &user_key1, const std::string &user_key2, StringLCSArgs args, StringLCSResult *rst); private: rocksdb::Status getValue(const std::string &ns_key, std::string *value); - rocksdb::Status getValueAndExpire(const std::string &ns_key, std::string *value, uint64_t *expire); + rocksdb::Status getValueAndExpire(const std::string &ns_key, std::string *value, uint64_t *expire_ms); std::vector getValues(const std::vector &ns_keys, std::vector *values); rocksdb::Status getRawValue(const std::string &ns_key, std::string *raw_value); std::vector getRawValues(const std::vector &keys, std::vector *raw_values); diff --git a/tests/cppunit/indexer_test.cc b/tests/cppunit/indexer_test.cc index ae5a045ef56..ced039e05f0 100644 --- a/tests/cppunit/indexer_test.cc +++ b/tests/cppunit/indexer_test.cc @@ -39,26 +39,26 @@ struct IndexerTest : TestBase { SearchMetadata hash_field_meta(false); hash_field_meta.on_data_type = SearchOnDataType::HASH; - kqir::IndexInfo hash_info("hashtest", hash_field_meta); - hash_info.Add(kqir::FieldInfo("x", std::make_unique())); - hash_info.Add(kqir::FieldInfo("y", std::make_unique())); - hash_info.prefixes.prefixes.emplace_back("idxtesthash"); + auto hash_info = std::make_unique("hashtest", hash_field_meta); + hash_info->Add(kqir::FieldInfo("x", std::make_unique())); + hash_info->Add(kqir::FieldInfo("y", std::make_unique())); + hash_info->prefixes.prefixes.emplace_back("idxtesthash"); map.emplace("hashtest", std::move(hash_info)); - redis::IndexUpdater hash_updater{&map.at("hashtest")}; + redis::IndexUpdater hash_updater{map.at("hashtest").get()}; SearchMetadata json_field_meta(false); json_field_meta.on_data_type = SearchOnDataType::JSON; - kqir::IndexInfo json_info("jsontest", json_field_meta); - json_info.Add(kqir::FieldInfo("$.x", std::make_unique())); - json_info.Add(kqir::FieldInfo("$.y", std::make_unique())); - json_info.prefixes.prefixes.emplace_back("idxtestjson"); + auto json_info = std::make_unique("jsontest", json_field_meta); + json_info->Add(kqir::FieldInfo("$.x", std::make_unique())); + json_info->Add(kqir::FieldInfo("$.y", std::make_unique())); + json_info->prefixes.prefixes.emplace_back("idxtestjson"); map.emplace("jsontest", std::move(json_info)); - redis::IndexUpdater json_updater{&map.at("jsontest")}; + redis::IndexUpdater json_updater{map.at("jsontest").get()}; indexer.Add(std::move(hash_updater)); indexer.Add(std::move(json_updater)); diff --git a/tests/cppunit/interval_test.cc b/tests/cppunit/interval_test.cc index 5090c4c8801..bffd5d630c2 100644 --- a/tests/cppunit/interval_test.cc +++ b/tests/cppunit/interval_test.cc @@ -48,6 +48,14 @@ TEST(IntervalSet, Simple) { (IntervalSet::DataType{{IntervalSet::minf, 1}, {4, IntervalSet::inf}})); ASSERT_EQ((IntervalSet(NumericCompareExpr::GET, 4) | IntervalSet(NumericCompareExpr::NE, 1)).intervals, (IntervalSet::DataType{{IntervalSet::minf, 1}, {IntervalSet::NextNum(1), IntervalSet::inf}})); + + ASSERT_TRUE((IntervalSet(Interval(1, 2)) & IntervalSet(Interval(3, 4))).IsEmpty()); + ASSERT_EQ((IntervalSet(Interval(1, 2)) & IntervalSet(Interval(2, 4))).intervals, (IntervalSet::DataType{{2, 2}})); + ASSERT_EQ((IntervalSet(Interval(1, 3)) & IntervalSet(Interval(2, 4))).intervals, (IntervalSet::DataType{{2, 3}})); + ASSERT_EQ((IntervalSet(Interval(3, 8)) & (IntervalSet(Interval(1, 4)) | IntervalSet(Interval(5, 7)))).intervals, + (IntervalSet::DataType{{3, 4}, {5, 7}})); + ASSERT_EQ((IntervalSet(Interval(3, 8)) & (IntervalSet(Interval(1, 4)) | IntervalSet(Interval(9, 11)))).intervals, + (IntervalSet::DataType{{3, 4}})); ASSERT_EQ((IntervalSet(NumericCompareExpr::GET, 1) & IntervalSet(NumericCompareExpr::LT, 4)).intervals, (IntervalSet::DataType{{1, 4}})); ASSERT_EQ((IntervalSet(NumericCompareExpr::GET, 1) & IntervalSet(NumericCompareExpr::NE, 4)).intervals, @@ -60,9 +68,21 @@ TEST(IntervalSet, Simple) { IntervalSet({2, 5}) | IntervalSet({7, 8})); ASSERT_EQ(~IntervalSet({2, 8}), IntervalSet({IntervalSet::minf, 2}) | IntervalSet({8, IntervalSet::inf})); - for (auto i = 0; i < 1000; ++i) { - auto gen = [] { return static_cast(rand()) / 100; }; - auto geni = [&gen] { return IntervalSet({gen(), gen()}); }; + for (auto i = 0; i < 2000; ++i) { + auto gen = [] { return static_cast(std::rand()) / 100; }; + auto geni = [&gen] { + auto r = std::rand() % 50; + if (r == 0) { + return IntervalSet(NumericCompareExpr::GET, gen()); + } else if (r == 1) { + return IntervalSet(NumericCompareExpr::LT, gen()); + } else if (r == 2) { + return IntervalSet(NumericCompareExpr::NE, gen()); + } else { + return IntervalSet({gen(), gen()}); + } + }; + auto l = geni(), r = geni(); for (int j = 0; j < i % 10; ++j) { l = l | geni(); diff --git a/tests/cppunit/ir_dot_dumper_test.cc b/tests/cppunit/ir_dot_dumper_test.cc index 66a588d9a5c..1615b3ca031 100644 --- a/tests/cppunit/ir_dot_dumper_test.cc +++ b/tests/cppunit/ir_dot_dumper_test.cc @@ -71,14 +71,14 @@ static IndexMap MakeIndexMap() { auto f4 = FieldInfo("n2", std::make_unique()); auto f5 = FieldInfo("n3", std::make_unique()); f5.metadata->noindex = true; - auto ia = IndexInfo("ia", SearchMetadata()); - ia.Add(std::move(f1)); - ia.Add(std::move(f2)); - ia.Add(std::move(f3)); - ia.Add(std::move(f4)); - ia.Add(std::move(f5)); - - auto& name = ia.name; + auto ia = std::make_unique("ia", SearchMetadata()); + ia->Add(std::move(f1)); + ia->Add(std::move(f2)); + ia->Add(std::move(f3)); + ia->Add(std::move(f4)); + ia->Add(std::move(f5)); + + auto& name = ia->name; IndexMap res; res.emplace(name, std::move(ia)); return res; diff --git a/tests/cppunit/ir_pass_test.cc b/tests/cppunit/ir_pass_test.cc index a02dc907395..6188bb49f44 100644 --- a/tests/cppunit/ir_pass_test.cc +++ b/tests/cppunit/ir_pass_test.cc @@ -176,14 +176,14 @@ static IndexMap MakeIndexMap() { auto f4 = FieldInfo("n2", std::make_unique()); auto f5 = FieldInfo("n3", std::make_unique()); f5.metadata->noindex = true; - auto ia = IndexInfo("ia", SearchMetadata()); - ia.Add(std::move(f1)); - ia.Add(std::move(f2)); - ia.Add(std::move(f3)); - ia.Add(std::move(f4)); - ia.Add(std::move(f5)); + auto ia = std::make_unique("ia", SearchMetadata()); + ia->Add(std::move(f1)); + ia->Add(std::move(f2)); + ia->Add(std::move(f3)); + ia->Add(std::move(f4)); + ia->Add(std::move(f5)); - auto& name = ia.name; + auto& name = ia->name; IndexMap res; res.emplace(name, std::move(ia)); return res; diff --git a/tests/cppunit/ir_sema_checker_test.cc b/tests/cppunit/ir_sema_checker_test.cc index 9068a38b7c5..678a0a0fa33 100644 --- a/tests/cppunit/ir_sema_checker_test.cc +++ b/tests/cppunit/ir_sema_checker_test.cc @@ -38,12 +38,12 @@ static IndexMap MakeIndexMap() { auto f1 = FieldInfo("f1", std::make_unique()); auto f2 = FieldInfo("f2", std::make_unique()); auto f3 = FieldInfo("f3", std::make_unique()); - auto ia = IndexInfo("ia", SearchMetadata()); - ia.Add(std::move(f1)); - ia.Add(std::move(f2)); - ia.Add(std::move(f3)); + auto ia = std::make_unique("ia", SearchMetadata()); + ia->Add(std::move(f1)); + ia->Add(std::move(f2)); + ia->Add(std::move(f3)); - auto& name = ia.name; + auto& name = ia->name; IndexMap res; res.emplace(name, std::move(ia)); return res; diff --git a/tests/cppunit/plan_executor_test.cc b/tests/cppunit/plan_executor_test.cc new file mode 100644 index 00000000000..0b225fc7a1d --- /dev/null +++ b/tests/cppunit/plan_executor_test.cc @@ -0,0 +1,416 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +#include "search/plan_executor.h" + +#include + +#include + +#include "config/config.h" +#include "search/executors/mock_executor.h" +#include "search/indexer.h" +#include "search/interval.h" +#include "search/ir.h" +#include "search/ir_plan.h" +#include "test_base.h" +#include "types/redis_json.h" + +using namespace kqir; + +static auto exe_end = ExecutorNode::Result(ExecutorNode::end); + +static IndexMap MakeIndexMap() { + auto f1 = FieldInfo("f1", std::make_unique()); + auto f2 = FieldInfo("f2", std::make_unique()); + auto f3 = FieldInfo("f3", std::make_unique()); + auto ia = std::make_unique("ia", SearchMetadata()); + ia->ns = "search_ns"; + ia->metadata.on_data_type = SearchOnDataType::JSON; + ia->prefixes.prefixes.emplace_back("test2:"); + ia->prefixes.prefixes.emplace_back("test4:"); + ia->Add(std::move(f1)); + ia->Add(std::move(f2)); + ia->Add(std::move(f3)); + + auto& name = ia->name; + IndexMap res; + res.emplace(name, std::move(ia)); + return res; +} + +static auto index_map = MakeIndexMap(); + +static auto NextRow(ExecutorContext& ctx) { + auto n = ctx.Next(); + EXPECT_EQ(n.Msg(), Status::ok_msg); + auto v = std::move(n).GetValue(); + EXPECT_EQ(v.index(), 1); + return std::get(std::move(v)); +} + +TEST(PlanExecutorTest, Mock) { + auto op = std::make_unique(std::vector{}); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + + op = std::make_unique(std::vector{{"a"}, {"b"}, {"c"}}); + + ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); +} + +static auto IndexI() -> const IndexInfo* { return index_map.at("ia").get(); } +static auto FieldI(const std::string& f) -> const FieldInfo* { return &index_map.at("ia")->fields.at(f); } + +TEST(PlanExecutorTest, TopNSort) { + std::vector data{ + {"a", {{FieldI("f3"), "4"}}, IndexI()}, {"b", {{FieldI("f3"), "2"}}, IndexI()}, + {"c", {{FieldI("f3"), "7"}}, IndexI()}, {"d", {{FieldI("f3"), "3"}}, IndexI()}, + {"e", {{FieldI("f3"), "1"}}, IndexI()}, {"f", {{FieldI("f3"), "6"}}, IndexI()}, + {"g", {{FieldI("f3"), "8"}}, IndexI()}, + }; + { + auto op = std::make_unique( + std::make_unique(data), + std::make_unique(SortByClause::ASC, std::make_unique("f3", FieldI("f3"))), + std::make_unique(0, 4)); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "e"); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "d"); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + { + auto op = std::make_unique( + std::make_unique(data), + std::make_unique(SortByClause::ASC, std::make_unique("f3", FieldI("f3"))), + std::make_unique(1, 4)); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "d"); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "f"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +TEST(PlanExecutorTest, Filter) { + std::vector data{ + {"a", {{FieldI("f3"), "4"}}, IndexI()}, {"b", {{FieldI("f3"), "2"}}, IndexI()}, + {"c", {{FieldI("f3"), "7"}}, IndexI()}, {"d", {{FieldI("f3"), "3"}}, IndexI()}, + {"e", {{FieldI("f3"), "1"}}, IndexI()}, {"f", {{FieldI("f3"), "6"}}, IndexI()}, + {"g", {{FieldI("f3"), "8"}}, IndexI()}, + }; + { + auto field = std::make_unique("f3", FieldI("f3")); + auto op = std::make_unique( + std::make_unique(data), + AndExpr::Create(Node::List( + std::make_unique(NumericCompareExpr::GT, field->CloneAs(), + std::make_unique(2)), + std::make_unique(NumericCompareExpr::LET, field->CloneAs(), + std::make_unique(6))))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "d"); + ASSERT_EQ(NextRow(ctx).key, "f"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + { + auto field = std::make_unique("f3", FieldI("f3")); + auto op = std::make_unique( + std::make_unique(data), + OrExpr::Create(Node::List( + std::make_unique(NumericCompareExpr::GET, field->CloneAs(), + std::make_unique(6)), + std::make_unique(NumericCompareExpr::LT, field->CloneAs(), + std::make_unique(2))))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(NextRow(ctx).key, "e"); + ASSERT_EQ(NextRow(ctx).key, "f"); + ASSERT_EQ(NextRow(ctx).key, "g"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + + data = {{"a", {{FieldI("f1"), "cpp,java"}}, IndexI()}, {"b", {{FieldI("f1"), "python,cpp,c"}}, IndexI()}, + {"c", {{FieldI("f1"), "c,perl"}}, IndexI()}, {"d", {{FieldI("f1"), "rust,python"}}, IndexI()}, + {"e", {{FieldI("f1"), "java,kotlin"}}, IndexI()}, {"f", {{FieldI("f1"), "c,rust"}}, IndexI()}, + {"g", {{FieldI("f1"), "c,cpp,java"}}, IndexI()}}; + { + auto field = std::make_unique("f1", FieldI("f1")); + auto op = std::make_unique( + std::make_unique(data), + AndExpr::Create(Node::List( + std::make_unique(field->CloneAs(), std::make_unique("c")), + std::make_unique(field->CloneAs(), std::make_unique("cpp"))))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "g"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + { + auto field = std::make_unique("f1", FieldI("f1")); + auto op = std::make_unique( + std::make_unique(data), + OrExpr::Create(Node::List( + std::make_unique(field->CloneAs(), std::make_unique("rust")), + std::make_unique(field->CloneAs(), std::make_unique("perl"))))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(NextRow(ctx).key, "d"); + ASSERT_EQ(NextRow(ctx).key, "f"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +TEST(PlanExecutorTest, Limit) { + std::vector data{ + {"a", {{FieldI("f3"), "4"}}, IndexI()}, {"b", {{FieldI("f3"), "2"}}, IndexI()}, + {"c", {{FieldI("f3"), "7"}}, IndexI()}, {"d", {{FieldI("f3"), "3"}}, IndexI()}, + {"e", {{FieldI("f3"), "1"}}, IndexI()}, {"f", {{FieldI("f3"), "6"}}, IndexI()}, + {"g", {{FieldI("f3"), "8"}}, IndexI()}, + }; + { + auto op = std::make_unique(std::make_unique(data), std::make_unique(1, 2)); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + { + auto field = std::make_unique("f3", FieldI("f3")); + auto op = std::make_unique(std::make_unique(data), std::make_unique(0, 3)); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +TEST(PlanExecutorTest, Merge) { + std::vector data1{ + {"a", {{FieldI("f3"), "4"}}, IndexI()}, + {"b", {{FieldI("f3"), "2"}}, IndexI()}, + }; + std::vector data2{{"c", {{FieldI("f3"), "7"}}, IndexI()}, + {"d", {{FieldI("f3"), "3"}}, IndexI()}, + {"e", {{FieldI("f3"), "1"}}, IndexI()}}; + { + auto op = + std::make_unique(Node::List(std::make_unique(data1), std::make_unique(data2))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(NextRow(ctx).key, "c"); + ASSERT_EQ(NextRow(ctx).key, "d"); + ASSERT_EQ(NextRow(ctx).key, "e"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + { + auto op = std::make_unique( + Node::List(std::make_unique(decltype(data1){}), std::make_unique(data1))); + + auto ctx = ExecutorContext(op.get()); + ASSERT_EQ(NextRow(ctx).key, "a"); + ASSERT_EQ(NextRow(ctx).key, "b"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +class PlanExecutorTestC : public TestBase { + protected: + explicit PlanExecutorTestC() : json_(std::make_unique(storage_.get(), "search_ns")) {} + ~PlanExecutorTestC() override = default; + + void SetUp() override {} + void TearDown() override {} + + std::unique_ptr json_; +}; + +TEST_F(PlanExecutorTestC, FullIndexScan) { + json_->Set("test1:a", "$", "{}"); + json_->Set("test1:b", "$", "{}"); + json_->Set("test2:c", "$", "{\"f3\": 6}"); + json_->Set("test3:d", "$", "{}"); + json_->Set("test4:e", "$", "{\"f3\": 7}"); + json_->Set("test4:f", "$", "{\"f3\": 2}"); + json_->Set("test4:g", "$", "{\"f3\": 8}"); + json_->Set("test5:h", "$", "{}"); + json_->Set("test5:i", "$", "{}"); + json_->Set("test5:g", "$", "{}"); + + { + auto op = std::make_unique(std::make_unique("ia", IndexI())); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:c"); + ASSERT_EQ(NextRow(ctx).key, "test4:e"); + ASSERT_EQ(NextRow(ctx).key, "test4:f"); + ASSERT_EQ(NextRow(ctx).key, "test4:g"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + + { + auto op = std::make_unique( + std::make_unique(std::make_unique("ia", IndexI())), + std::make_unique(NumericCompareExpr::GT, std::make_unique("f3", FieldI("f3")), + std::make_unique(3))); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:c"); + ASSERT_EQ(NextRow(ctx).key, "test4:e"); + ASSERT_EQ(NextRow(ctx).key, "test4:g"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +struct ScopedUpdate { + redis::GlobalIndexer::RecordResult rr; + std::string_view key; + std::string ns; + + static auto Create(redis::GlobalIndexer& indexer, std::string_view key, const std::string& ns) { + auto s = indexer.Record(key, ns); + EXPECT_EQ(s.Msg(), Status::ok_msg); + return *s; + } + + ScopedUpdate(redis::GlobalIndexer& indexer, std::string_view key, const std::string& ns) + : rr(Create(indexer, key, ns)), key(key), ns(ns) {} + + ScopedUpdate(const ScopedUpdate&) = delete; + ScopedUpdate(ScopedUpdate&&) = delete; + ScopedUpdate& operator=(const ScopedUpdate&) = delete; + ScopedUpdate& operator=(ScopedUpdate&&) = delete; + + ~ScopedUpdate() { + auto s = redis::GlobalIndexer::Update(rr, key, ns); + EXPECT_EQ(s.Msg(), Status::ok_msg); + } +}; + +std::vector> ScopedUpdates(redis::GlobalIndexer& indexer, + const std::vector& keys, + const std::string& ns) { + std::vector> sus; + + sus.reserve(keys.size()); + for (auto key : keys) { + sus.emplace_back(std::make_unique(indexer, key, ns)); + } + + return sus; +} + +TEST_F(PlanExecutorTestC, NumericFieldScan) { + redis::GlobalIndexer indexer(storage_.get()); + indexer.Add(redis::IndexUpdater(IndexI())); + + { + auto updates = ScopedUpdates(indexer, {"test2:a", "test2:b", "test2:c", "test2:d", "test2:e", "test2:f", "test2:g"}, + "search_ns"); + json_->Set("test2:a", "$", "{\"f2\": 6}"); + json_->Set("test2:b", "$", "{\"f2\": 3}"); + json_->Set("test2:c", "$", "{\"f2\": 8}"); + json_->Set("test2:d", "$", "{\"f2\": 14}"); + json_->Set("test2:e", "$", "{\"f2\": 1}"); + json_->Set("test2:f", "$", "{\"f2\": 3}"); + json_->Set("test2:g", "$", "{\"f2\": 9}"); + } + + { + auto op = std::make_unique(std::make_unique("f2", FieldI("f2")), Interval(3, 9), + SortByClause::ASC); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:b"); + ASSERT_EQ(NextRow(ctx).key, "test2:f"); + ASSERT_EQ(NextRow(ctx).key, "test2:a"); + ASSERT_EQ(NextRow(ctx).key, "test2:c"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + + { + auto op = std::make_unique(std::make_unique("f2", FieldI("f2")), Interval(3, 9), + SortByClause::DESC); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:c"); + ASSERT_EQ(NextRow(ctx).key, "test2:a"); + ASSERT_EQ(NextRow(ctx).key, "test2:f"); + ASSERT_EQ(NextRow(ctx).key, "test2:b"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} + +TEST_F(PlanExecutorTestC, TagFieldScan) { + redis::GlobalIndexer indexer(storage_.get()); + indexer.Add(redis::IndexUpdater(IndexI())); + + { + auto updates = ScopedUpdates(indexer, {"test2:a", "test2:b", "test2:c", "test2:d", "test2:e", "test2:f", "test2:g"}, + "search_ns"); + json_->Set("test2:a", "$", "{\"f1\": \"c,cpp,java\"}"); + json_->Set("test2:b", "$", "{\"f1\": \"python,c\"}"); + json_->Set("test2:c", "$", "{\"f1\": \"java,scala\"}"); + json_->Set("test2:d", "$", "{\"f1\": \"rust,python,perl\"}"); + json_->Set("test2:e", "$", "{\"f1\": \"python,cpp\"}"); + json_->Set("test2:f", "$", "{\"f1\": \"c,cpp\"}"); + json_->Set("test2:g", "$", "{\"f1\": \"cpp,rust\"}"); + } + + { + auto op = std::make_unique(std::make_unique("f1", FieldI("f1")), "cpp"); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:a"); + ASSERT_EQ(NextRow(ctx).key, "test2:e"); + ASSERT_EQ(NextRow(ctx).key, "test2:f"); + ASSERT_EQ(NextRow(ctx).key, "test2:g"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } + + { + auto op = std::make_unique(std::make_unique("f1", FieldI("f1")), "python"); + + auto ctx = ExecutorContext(op.get(), storage_.get()); + ASSERT_EQ(NextRow(ctx).key, "test2:b"); + ASSERT_EQ(NextRow(ctx).key, "test2:d"); + ASSERT_EQ(NextRow(ctx).key, "test2:e"); + ASSERT_EQ(ctx.Next().GetValue(), exe_end); + } +} \ No newline at end of file diff --git a/tests/gocase/unit/dump/dump_test.go b/tests/gocase/unit/dump/dump_test.go index faeee9a9536..bca9300d1ea 100644 --- a/tests/gocase/unit/dump/dump_test.go +++ b/tests/gocase/unit/dump/dump_test.go @@ -113,10 +113,21 @@ func TestDump_List(t *testing.T) { require.NoError(t, rdb.RPush(ctx, key, elements).Err()) serialized, err := rdb.Dump(ctx, key).Result() require.NoError(t, err) + require.Equal(t, "\x0e\x03\x15\x15\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\bkvrocks1\xff\x15\x15\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\bkvrocks2\xff\x15\x15\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\bkvrocks3\xff\x06\x00u\xc7\x19h\x1da\xd0\xd8", serialized) restoredKey := fmt.Sprintf("restore_%s", key) require.NoError(t, rdb.RestoreReplace(ctx, restoredKey, 0, serialized).Err()) require.EqualValues(t, elements, rdb.LRange(ctx, restoredKey, 0, -1).Val()) + + //test special case + elements = []string{"A", " ", "", util.RandString(0, 4000, util.Alpha)} + require.NoError(t, rdb.Del(ctx, key).Err()) + require.NoError(t, rdb.RPush(ctx, key, elements).Err()) + serialized, err = rdb.Dump(ctx, key).Result() + require.NoError(t, err) + + require.NoError(t, rdb.RestoreReplace(ctx, restoredKey, 0, serialized).Err()) + require.EqualValues(t, elements, rdb.LRange(ctx, restoredKey, 0, -1).Val()) } func TestDump_Set(t *testing.T) { diff --git a/tests/gocase/unit/sort/sort_test.go b/tests/gocase/unit/sort/sort_test.go new file mode 100644 index 00000000000..6715ed783a0 --- /dev/null +++ b/tests/gocase/unit/sort/sort_test.go @@ -0,0 +1,881 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package sort + +import ( + "context" + "fmt" + "testing" + + "github.com/redis/go-redis/v9" + + "github.com/apache/kvrocks/tests/gocase/util" + "github.com/stretchr/testify/require" +) + +func TestSortParser(t *testing.T) { + srv := util.StartServer(t, map[string]string{}) + defer srv.Close() + + ctx := context.Background() + rdb := srv.NewClient() + defer func() { require.NoError(t, rdb.Close()) }() + + t.Run("SORT Parser", func(t *testing.T) { + rdb.RPush(ctx, "bad-case-key", 5, 4, 3, 2, 1) + + _, err := rdb.Do(ctx, "Sort").Result() + require.EqualError(t, err, "ERR wrong number of arguments") + + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "BadArg").Result() + require.EqualError(t, err, "ERR syntax error") + + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "LIMIT").Result() + require.EqualError(t, err, "ERR no more item to parse") + + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "LIMIT", 1).Result() + require.EqualError(t, err, "ERR no more item to parse") + + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "LIMIT", 1, "not-number").Result() + require.EqualError(t, err, "ERR not started as an integer") + + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "STORE").Result() + require.EqualError(t, err, "ERR no more item to parse") + + rdb.MSet(ctx, "rank_1", 1, "rank_2", "rank_3", 3, "rank_4", 4, "rank_5", 5) + _, err = rdb.Do(ctx, "Sort", "bad-case-key", "BY", "dontsort", "BY", "rank_*").Result() + require.EqualError(t, err, "ERR don't use multiple BY parameters") + + _, err = rdb.Do(ctx, "Sort_RO", "bad-case-key", "STORE", "store_ro_key").Result() + require.EqualError(t, err, "ERR SORT_RO is read-only and does not support the STORE parameter") + }) +} + +func TestSortLengthLimit(t *testing.T) { + srv := util.StartServer(t, map[string]string{}) + defer srv.Close() + + ctx := context.Background() + rdb := srv.NewClient() + defer func() { require.NoError(t, rdb.Close()) }() + + t.Run("SORT Length Limit", func(t *testing.T) { + for i := 0; i <= 512; i++ { + rdb.LPush(ctx, "many-list-elems-key", i) + } + _, err := rdb.Sort(ctx, "many-list-elems-key", &redis.Sort{}).Result() + require.EqualError(t, err, "The number of elements to be sorted exceeds SORT_LENGTH_LIMIT = 512") + + for i := 0; i <= 512; i++ { + rdb.SAdd(ctx, "many-set-elems-key", i) + } + _, err = rdb.Sort(ctx, "many-set-elems-key", &redis.Sort{}).Result() + require.EqualError(t, err, "The number of elements to be sorted exceeds SORT_LENGTH_LIMIT = 512") + + for i := 0; i <= 512; i++ { + rdb.ZAdd(ctx, "many-zset-elems-key", redis.Z{Score: float64(i), Member: fmt.Sprintf("%d", i)}) + } + _, err = rdb.Sort(ctx, "many-zset-elems-key", &redis.Sort{}).Result() + require.EqualError(t, err, "The number of elements to be sorted exceeds SORT_LENGTH_LIMIT = 512") + }) +} + +func TestListSort(t *testing.T) { + srv := util.StartServer(t, map[string]string{}) + defer srv.Close() + + ctx := context.Background() + rdb := srv.NewClient() + defer func() { require.NoError(t, rdb.Close()) }() + + t.Run("SORT Basic", func(t *testing.T) { + rdb.LPush(ctx, "today_cost", 30, 1.5, 10, 8) + + sortResult, err := rdb.Sort(ctx, "today_cost", &redis.Sort{}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"30", "10", "8", "1.5"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"30", "10", "8", "1.5"}, sortResult) + }) + + t.Run("SORT ALPHA", func(t *testing.T) { + rdb.LPush(ctx, "website", "www.reddit.com", "www.slashdot.com", "www.infoq.com") + + sortResult, err := rdb.Sort(ctx, "website", &redis.Sort{Alpha: true}).Result() + require.NoError(t, err) + require.Equal(t, []string{"www.infoq.com", "www.reddit.com", "www.slashdot.com"}, sortResult) + + _, err = rdb.Sort(ctx, "website", &redis.Sort{Alpha: false}).Result() + require.EqualError(t, err, "One or more scores can't be converted into double") + }) + + t.Run("SORT LIMIT", func(t *testing.T) { + rdb.RPush(ctx, "rank", 1, 3, 5, 7, 9, 2, 4, 6, 8, 10) + + sortResult, err := rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"10", "9", "8", "7", "6"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 11, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 11}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + }) + + t.Run("SORT BY + GET", func(t *testing.T) { + rdb.LPush(ctx, "uid", 1, 2, 3, 4) + rdb.MSet(ctx, "user_name_1", "admin", "user_name_2", "jack", "user_name_3", "peter", "user_name_4", "mary") + rdb.MSet(ctx, "user_level_1", 9999, "user_level_2", 10, "user_level_3", 25, "user_level_4", 70) + + sortResult, err := rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"admin", "jack", "peter", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*", Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"9999", "admin", "10", "jack", "25", "peter", "70", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "9999", "admin", "2", "10", "jack", "3", "25", "peter", "4", "70", "mary"}, sortResult) + + // not sorted + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "3", "2", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"3", "2"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"3", "2", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "70", "mary", "3", "25", "peter", "2", "10", "jack", "1", "9999", "admin"}, sortResult) + + // pattern with hash tag + rdb.HMSet(ctx, "user_info_1", "name", "admin", "level", 9999) + rdb.HMSet(ctx, "user_info_2", "name", "jack", "level", 10) + rdb.HMSet(ctx, "user_info_3", "name", "peter", "level", 25) + rdb.HMSet(ctx, "user_info_4", "name", "mary", "level", 70) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level", Get: []string{"user_info_*->name"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + // get/by empty and nil + rdb.LPush(ctx, "uid_empty_nil", 4, 5, 6) + rdb.MSet(ctx, "user_name_5", "tom", "user_level_5", -1) + + getResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", nil}, getResult) + byResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + + getResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", ""}, getResult) + + byResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + }) + + t.Run("SORT STORE", func(t *testing.T) { + rdb.RPush(ctx, "numbers", 1, 3, 5, 7, 9, 2, 4, 6, 8, 10) + + storedLen, err := rdb.Do(ctx, "Sort", "numbers", "STORE", "sorted-numbers").Result() + require.NoError(t, err) + require.Equal(t, int64(10), storedLen) + + sortResult, err := rdb.LRange(ctx, "sorted-numbers", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + rdb.LPush(ctx, "no-force-alpha-sort-key", 123, 3, 21) + storedLen, err = rdb.Do(ctx, "Sort", "no-force-alpha-sort-key", "BY", "not-exists-key", "STORE", "no-alpha-sorted").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "no-alpha-sorted", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"21", "3", "123"}, sortResult) + + // get empty and nil + rdb.LPush(ctx, "uid_get_empty_nil", 4, 5, 6) + rdb.MSet(ctx, "user_name_4", "mary", "user_level_4", 70, "user_name_5", "tom", "user_level_5", -1) + + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + }) +} + +func TestSetSort(t *testing.T) { + srv := util.StartServer(t, map[string]string{}) + defer srv.Close() + + ctx := context.Background() + rdb := srv.NewClient() + defer func() { require.NoError(t, rdb.Close()) }() + + t.Run("SORT Basic", func(t *testing.T) { + rdb.SAdd(ctx, "today_cost", 30, 1.5, 10, 8) + + sortResult, err := rdb.Sort(ctx, "today_cost", &redis.Sort{}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"30", "10", "8", "1.5"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1.5", "8", "10", "30"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"30", "10", "8", "1.5"}, sortResult) + }) + + t.Run("SORT ALPHA", func(t *testing.T) { + rdb.SAdd(ctx, "website", "www.reddit.com", "www.slashdot.com", "www.infoq.com") + + sortResult, err := rdb.Sort(ctx, "website", &redis.Sort{Alpha: true}).Result() + require.NoError(t, err) + require.Equal(t, []string{"www.infoq.com", "www.reddit.com", "www.slashdot.com"}, sortResult) + + _, err = rdb.Sort(ctx, "website", &redis.Sort{Alpha: false}).Result() + require.EqualError(t, err, "One or more scores can't be converted into double") + }) + + t.Run("SORT LIMIT", func(t *testing.T) { + rdb.SAdd(ctx, "rank", 1, 3, 5, 7, 9, 2, 4, 6, 8, 10) + + sortResult, err := rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"10", "9", "8", "7", "6"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 11, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 11}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + }) + + t.Run("SORT BY + GET", func(t *testing.T) { + rdb.SAdd(ctx, "uid", 4, 3, 2, 1) + rdb.MSet(ctx, "user_name_1", "admin", "user_name_2", "jack", "user_name_3", "peter", "user_name_4", "mary") + rdb.MSet(ctx, "user_level_1", 9999, "user_level_2", 10, "user_level_3", 25, "user_level_4", 70) + + sortResult, err := rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"admin", "jack", "peter", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*", Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"9999", "admin", "10", "jack", "25", "peter", "70", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "9999", "admin", "2", "10", "jack", "3", "25", "peter", "4", "70", "mary"}, sortResult) + + // not sorted + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "9999", "admin", "2", "10", "jack", "3", "25", "peter", "4", "70", "mary"}, sortResult) + + // pattern with hash tag + rdb.HMSet(ctx, "user_info_1", "name", "admin", "level", 9999) + rdb.HMSet(ctx, "user_info_2", "name", "jack", "level", 10) + rdb.HMSet(ctx, "user_info_3", "name", "peter", "level", 25) + rdb.HMSet(ctx, "user_info_4", "name", "mary", "level", 70) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level", Get: []string{"user_info_*->name"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + // get/by empty and nil + rdb.SAdd(ctx, "uid_empty_nil", 4, 5, 6) + rdb.MSet(ctx, "user_name_5", "tom", "user_level_5", -1) + + getResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", nil}, getResult) + byResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + + getResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", ""}, getResult) + + byResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + + }) + + t.Run("SORT STORE", func(t *testing.T) { + rdb.SAdd(ctx, "numbers", 1, 3, 5, 7, 9, 2, 4, 6, 8, 10) + + storedLen, err := rdb.Do(ctx, "Sort", "numbers", "STORE", "sorted-numbers").Result() + require.NoError(t, err) + require.Equal(t, int64(10), storedLen) + + sortResult, err := rdb.LRange(ctx, "sorted-numbers", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + rdb.SAdd(ctx, "force-alpha-sort-key", 123, 3, 21) + storedLen, err = rdb.Do(ctx, "Sort", "force-alpha-sort-key", "BY", "not-exists-key", "STORE", "alpha-sorted").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "alpha-sorted", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"123", "21", "3"}, sortResult) + + // get empty and nil + rdb.SAdd(ctx, "uid_get_empty_nil", 4, 5, 6) + rdb.MSet(ctx, "user_name_4", "mary", "user_level_4", 70, "user_name_5", "tom", "user_level_5", -1) + + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + }) +} + +func TestZSetSort(t *testing.T) { + srv := util.StartServer(t, map[string]string{}) + defer srv.Close() + + ctx := context.Background() + rdb := srv.NewClient() + defer func() { require.NoError(t, rdb.Close()) }() + + t.Run("SORT Basic", func(t *testing.T) { + rdb.ZAdd(ctx, "today_cost", redis.Z{Score: 30, Member: "1"}, redis.Z{Score: 1.5, Member: "2"}, redis.Z{Score: 10, Member: "3"}, redis.Z{Score: 8, Member: "4"}) + + sortResult, err := rdb.Sort(ctx, "today_cost", &redis.Sort{}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "3", "2", "1"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "ASC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.SortRO(ctx, "today_cost", &redis.Sort{Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "3", "2", "1"}, sortResult) + }) + + t.Run("SORT ALPHA", func(t *testing.T) { + rdb.ZAdd(ctx, "website", redis.Z{Score: 1, Member: "www.reddit.com"}, redis.Z{Score: 2, Member: "www.slashdot.com"}, redis.Z{Score: 3, Member: "www.infoq.com"}) + + sortResult, err := rdb.Sort(ctx, "website", &redis.Sort{Alpha: true}).Result() + require.NoError(t, err) + require.Equal(t, []string{"www.infoq.com", "www.reddit.com", "www.slashdot.com"}, sortResult) + + _, err = rdb.Sort(ctx, "website", &redis.Sort{Alpha: false}).Result() + require.EqualError(t, err, "One or more scores can't be converted into double") + }) + + t.Run("SORT LIMIT", func(t *testing.T) { + rdb.ZAdd(ctx, "rank", + redis.Z{Score: 1, Member: "1"}, + redis.Z{Score: 2, Member: "3"}, + redis.Z{Score: 3, Member: "5"}, + redis.Z{Score: 4, Member: "7"}, + redis.Z{Score: 5, Member: "9"}, + redis.Z{Score: 6, Member: "2"}, + redis.Z{Score: 7, Member: "4"}, + redis.Z{Score: 8, Member: "6"}, + redis.Z{Score: 9, Member: "8"}, + redis.Z{Score: 10, Member: "10"}, + ) + + sortResult, err := rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 0, Count: 5, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"10", "9", "8", "7", "6"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 10, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: 11, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -1, Count: 11}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "rank", &redis.Sort{Offset: -2, Count: -2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + }) + + t.Run("SORT BY + GET", func(t *testing.T) { + rdb.ZAdd(ctx, "uid", + redis.Z{Score: 1, Member: "4"}, + redis.Z{Score: 2, Member: "3"}, + redis.Z{Score: 3, Member: "2"}, + redis.Z{Score: 4, Member: "1"}) + + rdb.MSet(ctx, "user_name_1", "admin", "user_name_2", "jack", "user_name_3", "peter", "user_name_4", "mary") + rdb.MSet(ctx, "user_level_1", 9999, "user_level_2", 10, "user_level_3", 25, "user_level_4", 70) + + sortResult, err := rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"admin", "jack", "peter", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_level_*", Get: []string{"user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"9999", "admin", "10", "jack", "25", "peter", "70", "mary"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "9999", "admin", "2", "10", "jack", "3", "25", "peter", "4", "70", "mary"}, sortResult) + + // not sorted + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "3", "2", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2}).Result() + require.NoError(t, err) + require.Equal(t, []string{"3", "2"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1}).Result() + require.NoError(t, err) + require.Equal(t, []string{"3", "2", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 0, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: 2, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 4, Count: 0, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Offset: 1, Count: -1, Order: "DESC"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "not-exists-key", Get: []string{"#", "user_level_*", "user_name_*"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"4", "70", "mary", "3", "25", "peter", "2", "10", "jack", "1", "9999", "admin"}, sortResult) + + // pattern with hash tag + rdb.HMSet(ctx, "user_info_1", "name", "admin", "level", 9999) + rdb.HMSet(ctx, "user_info_2", "name", "jack", "level", 10) + rdb.HMSet(ctx, "user_info_3", "name", "peter", "level", 25) + rdb.HMSet(ctx, "user_info_4", "name", "mary", "level", 70) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level"}).Result() + require.NoError(t, err) + require.Equal(t, []string{"2", "3", "4", "1"}, sortResult) + + sortResult, err = rdb.Sort(ctx, "uid", &redis.Sort{By: "user_info_*->level", Get: []string{"user_info_*->name"}}).Result() + require.NoError(t, err) + require.Equal(t, []string{"jack", "peter", "mary", "admin"}, sortResult) + + // get/by empty and nil + rdb.ZAdd(ctx, "uid_empty_nil", + redis.Z{Score: 4, Member: "6"}, + redis.Z{Score: 5, Member: "5"}, + redis.Z{Score: 6, Member: "4"}) + rdb.MSet(ctx, "user_name_5", "tom", "user_level_5", -1) + + getResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", nil}, getResult) + byResult, err := rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + + getResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "Get", "user_name_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"mary", "tom", ""}, getResult) + + byResult, err = rdb.Do(ctx, "Sort", "uid_empty_nil", "By", "user_level_*").Slice() + require.NoError(t, err) + require.Equal(t, []interface{}{"5", "6", "4"}, byResult) + }) + + t.Run("SORT STORE", func(t *testing.T) { + rdb.ZAdd(ctx, "numbers", + redis.Z{Score: 1, Member: "1"}, + redis.Z{Score: 2, Member: "3"}, + redis.Z{Score: 3, Member: "5"}, + redis.Z{Score: 4, Member: "7"}, + redis.Z{Score: 5, Member: "9"}, + redis.Z{Score: 6, Member: "2"}, + redis.Z{Score: 7, Member: "4"}, + redis.Z{Score: 8, Member: "6"}, + redis.Z{Score: 9, Member: "8"}, + redis.Z{Score: 10, Member: "10"}, + ) + + storedLen, err := rdb.Do(ctx, "Sort", "numbers", "STORE", "sorted-numbers").Result() + require.NoError(t, err) + require.Equal(t, int64(10), storedLen) + + sortResult, err := rdb.LRange(ctx, "sorted-numbers", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, sortResult) + + rdb.ZAdd(ctx, "no-force-alpha-sort-key", + redis.Z{Score: 1, Member: "123"}, + redis.Z{Score: 2, Member: "3"}, + redis.Z{Score: 3, Member: "21"}, + ) + + storedLen, err = rdb.Do(ctx, "Sort", "no-force-alpha-sort-key", "BY", "not-exists-key", "STORE", "no-alpha-sorted").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "no-alpha-sorted", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"123", "3", "21"}, sortResult) + + // get empty and nil + rdb.ZAdd(ctx, "uid_get_empty_nil", + redis.Z{Score: 4, Member: "6"}, + redis.Z{Score: 5, Member: "5"}, + redis.Z{Score: 6, Member: "4"}) + rdb.MSet(ctx, "user_name_4", "mary", "user_level_4", 70, "user_name_5", "tom", "user_level_5", -1) + + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + + rdb.MSet(ctx, "user_name_6", "", "user_level_6", "") + storedLen, err = rdb.Do(ctx, "Sort", "uid_get_empty_nil", "Get", "user_name_*", "Store", "get_empty_nil_store").Result() + require.NoError(t, err) + require.Equal(t, int64(3), storedLen) + + sortResult, err = rdb.LRange(ctx, "get_empty_nil_store", 0, -1).Result() + require.NoError(t, err) + require.Equal(t, []string{"mary", "tom", ""}, sortResult) + }) +} diff --git a/tests/gocase/unit/type/json/json_test.go b/tests/gocase/unit/type/json/json_test.go index 51785d29844..e30635d4eb6 100644 --- a/tests/gocase/unit/type/json/json_test.go +++ b/tests/gocase/unit/type/json/json_test.go @@ -180,6 +180,7 @@ func TestJson(t *testing.T) { result2 = append(result2, int64(3), int64(5), interface{}(nil)) require.NoError(t, rdb.Do(ctx, "JSON.SET", "a", "$", `{"a":"foo", "nested": {"a": "hello"}, "nested2": {"a": 31}}`).Err()) require.Equal(t, rdb.Do(ctx, "JSON.STRLEN", "a", "$..a").Val(), result2) + require.ErrorIs(t, rdb.Do(ctx, "JSON.STRLEN", "not_exists", "$").Err(), redis.Nil) }) t.Run("Merge basics", func(t *testing.T) {