diff --git a/configure.ac b/configure.ac index 65f323a76..27da277c5 100644 --- a/configure.ac +++ b/configure.ac @@ -16,7 +16,7 @@ AC_SUBST(MAJOR_VERSION, niova_core_major) AC_SUBST(MINOR_VERSION, niova_core_minor) AC_SUBST(VERSION_SUFFIX, niova_core_suffix) -AC_SUBST([AM_CFLAGS], ["-g -O2 -Wall"]) +AC_SUBST([AM_CFLAGS], ["-g -O2 -Wall -Wextra"]) AC_SUBST([AM_CPPFLAGS], ["-Isrc/include -Isrc/contrib/include"]) AC_SUBST([AM_LDFLAGS], [" "]) diff --git a/src/buffer.c b/src/buffer.c index 4b8c6f306..c7b8fe3d0 100644 --- a/src/buffer.c +++ b/src/buffer.c @@ -143,8 +143,8 @@ buffer_set_navail_locked(const struct buffer_set *bs) NIOVA_ASSERT(bs->bs_num_allocated >= 0); NIOVA_ASSERT(bs->bs_num_pndg_alloc >= 0); - NIOVA_ASSERT(bs->bs_num_bufs >= (bs->bs_num_allocated + - bs->bs_num_pndg_alloc)); + NIOVA_ASSERT((ssize_t)bs->bs_num_bufs >= + (bs->bs_num_allocated + bs->bs_num_pndg_alloc)); return bs->bs_num_bufs - (bs->bs_num_allocated + bs->bs_num_pndg_alloc); } @@ -215,7 +215,7 @@ buffer_set_allocate_item_locked(struct buffer_set *bs) bs->bs_num_allocated++; bi->bi_allocated = true; - if (bs->bs_num_allocated > bs->bs_max_allocated) + if (bs->bs_num_allocated > (ssize_t)bs->bs_max_allocated) bs->bs_max_allocated = bs->bs_num_allocated; return bi; @@ -262,7 +262,7 @@ buffer_set_release_pending_alloc(struct buffer_set *bs, const size_t nitems) BS_LOCK(bs); - if (nitems > bs->bs_num_pndg_alloc) + if ((ssize_t)nitems > bs->bs_num_pndg_alloc) { BS_UNLOCK(bs); diff --git a/src/config_token.c b/src/config_token.c index 56ac58953..061205d2a 100644 --- a/src/config_token.c +++ b/src/config_token.c @@ -255,7 +255,7 @@ conf_token_set_parse_match_token(const char *input_buf, size_t input_buf_size, continue; // Check len prior to strncmp() - if (ct->ct_name_len + 1 > input_buf_size) + if ((ct->ct_name_len + 1U) > input_buf_size) return NULL; // The token string be immediately followed by a tab or space. @@ -275,7 +275,7 @@ conf_token_value_check_and_clear_ws(const struct conf_token *ct, if (!ct || !ctsp || !ctsp->ctsp_value_buf || !ctsp->ctsp_value_buf_size) return -EINVAL; - const ssize_t original_value_str_len = + const size_t original_value_str_len = strnlen(ctsp->ctsp_value_buf, ctsp->ctsp_value_buf_size); if (original_value_str_len == ctsp->ctsp_value_buf_size) @@ -288,7 +288,7 @@ conf_token_value_check_and_clear_ws(const struct conf_token *ct, ctsp->ctsp_value_buf_size); // Recheck the string len. - const ssize_t new_value_str_len = + const size_t new_value_str_len = strnlen(ctsp->ctsp_value_buf, ctsp->ctsp_value_buf_size); NIOVA_ASSERT(new_value_str_len <= original_value_str_len); diff --git a/src/ctl_interface.c b/src/ctl_interface.c index 7c1cbb844..1dfe897af 100644 --- a/src/ctl_interface.c +++ b/src/ctl_interface.c @@ -233,6 +233,8 @@ lctli_epoll_mgr_cb(const struct epoll_handle *eph, uint32_t events) { NIOVA_ASSERT(eph); + (void)events; + struct ctl_interface *lctli = eph->eph_arg; if (eph->eph_fd != lctli->lctli_inotify_fd) diff --git a/src/ctl_interface_cmd.c b/src/ctl_interface_cmd.c index 5d6199a4a..c7338bb75 100644 --- a/src/ctl_interface_cmd.c +++ b/src/ctl_interface_cmd.c @@ -200,7 +200,7 @@ ctlic_request_done(struct ctlic_request *cr) } #define CTLIC_OUTPUT_TMP_FILE(tmp_str, file_name) \ - const size_t CTLIC_OUTPUT_TMP_FILE_file_name_len = \ + const ssize_t CTLIC_OUTPUT_TMP_FILE_file_name_len = \ NUM_HEX_CHARS(pid_t) + strnlen((file_name), PATH_MAX); \ if (CTLIC_OUTPUT_TMP_FILE_file_name_len >= PATH_MAX - 2) \ return -ENAMETOOLONG; \ @@ -690,7 +690,7 @@ ctlic_scan_registry_cb_output_writer(struct ctlic_iterator *citer) struct ctlic_request *cr = citer->citer_cr; const bool open_stanza = citer->citer_open_stanza; const struct lreg_value *lv = &citer->citer_lv; - const size_t tab_depth = citer->citer_tab_depth; + const ssize_t tab_depth = citer->citer_tab_depth; const size_t sibling_number = citer->citer_sibling_num; const size_t starting_byte_cnt = citer->citer_starting_byte_cnt; const char *value_string = ctlic_citer_2_value_string(citer); @@ -1161,9 +1161,9 @@ ctlic_scan_registry_cb(struct lreg_node *lrn, void *arg, const int depth); static bool ctlic_scan_registry_cb_CT_ID_WHERE(struct lreg_node *lrn, struct ctlic_iterator *parent_citer, - const int depth) + const unsigned int depth) { - NIOVA_ASSERT(parent_citer && parent_citer->citer_cr && depth >= 0); + NIOVA_ASSERT(parent_citer && parent_citer->citer_cr && depth > 0); struct ctlic_request *cr = parent_citer->citer_cr; struct ctlic_matched_token *cmt = ctlic_get_current_matched_token(cr); @@ -1233,9 +1233,9 @@ ctlic_scan_registry_cb_CT_ID_WHERE(struct lreg_node *lrn, static bool ctlic_scan_registry_cb_CT_ID_GET(struct lreg_node *lrn, struct ctlic_iterator *parent_citer, - const int depth) + const unsigned int depth) { - NIOVA_ASSERT(parent_citer && parent_citer->citer_cr && depth >= 0); + NIOVA_ASSERT(parent_citer && parent_citer->citer_cr && depth > 0); struct ctlic_request *cr = parent_citer->citer_cr; struct ctlic_matched_token *cmt = ctlic_get_current_matched_token(cr); diff --git a/src/ctl_svc.c b/src/ctl_svc.c index 2a032166d..811b04a82 100644 --- a/src/ctl_svc.c +++ b/src/ctl_svc.c @@ -329,6 +329,7 @@ ctl_svc_node_token_hndlr_HOSTNAME(struct ctl_svc_node *csn, const struct conf_token *ct, const char *val_buf, size_t val_buf_sz) { + (void)ct; if (csn->csn_type == CTL_SVC_NODE_TYPE_RAFT) return -EINVAL; @@ -376,6 +377,8 @@ ctl_svc_node_token_hndlr_UUID(struct ctl_svc_node *csn, const struct conf_token *ct, const char *val_buf, size_t val_buf_sz) { + (void)ct; + if (val_buf_sz > UUID_STR_LEN) return -ENAMETOOLONG; @@ -423,6 +426,8 @@ ctl_svc_node_token_hndlr_PEER(struct ctl_svc_node *csn, const struct conf_token *ct, const char *val_buf, size_t val_buf_sz) { + (void)ct; + if (val_buf_sz > UUID_STR_LEN) return -ENAMETOOLONG; @@ -439,6 +444,8 @@ ctl_svc_node_token_hndlr_STORE(struct ctl_svc_node *csn, const struct conf_token *ct, const char *val_buf, size_t val_buf_sz) { + (void)ct; + if (val_buf_sz > PATH_MAX) return -ENAMETOOLONG; @@ -465,6 +472,8 @@ ctl_svc_node_token_hndlr_IPADDR(struct ctl_svc_node *csn, const struct conf_token *ct, const char *val_buf, size_t val_buf_sz) { + (void)ct; + if (val_buf_sz >= IPV4_STRLEN) // IPV4_STRLEN includes NULL terminator return -ENAMETOOLONG; @@ -679,7 +688,7 @@ ctl_svc_read_and_prep_conf_file(int ctl_svc_dir_fd, const char *input_file, return read_rc; } - const size_t null_cnt_end_of_buf = + const ssize_t null_cnt_end_of_buf = niova_count_nulls_from_end_of_buffer(file_buf, read_rc); NIOVA_ASSERT(read_rc >= null_cnt_end_of_buf); @@ -693,7 +702,8 @@ ctl_svc_read_and_prep_conf_file(int ctl_svc_dir_fd, const char *input_file, } // Workaround for files which do not have a newline at the end. - if (read_rc && file_buf[read_rc - 1] != '\n' && read_rc < file_buf_sz) + if (read_rc && file_buf[read_rc - 1] != '\n' && + read_rc < (ssize_t)file_buf_sz) { file_buf[read_rc] = '\n'; read_rc += 1; diff --git a/src/epoll_mgr.c b/src/epoll_mgr.c index 495cabdff..93962433c 100644 --- a/src/epoll_mgr.c +++ b/src/epoll_mgr.c @@ -21,12 +21,14 @@ REGISTRY_ENTRY_FILE_GENERATE; typedef int epoll_mgr_thread_ctx_int_t; -static size_t epollMgrNumEvents = EPOLL_MGR_DEF_EVENTS; +static long long epollMgrNumEvents = EPOLL_MGR_DEF_EVENTS; static pthread_mutex_t epollMgrInstallLock = PTHREAD_MUTEX_INITIALIZER; static void epoll_mgr_wake_cb(const struct epoll_handle *eph, uint32_t evs) { + (void)evs; + uint64_t eventcnt; int rc = read(eph->eph_fd, &eventcnt, sizeof(eventcnt)); SIMPLE_LOG_MSG(LL_TRACE, "read(): rc=%d evcnt=%lu", rc, eventcnt); diff --git a/src/ev_pipe.c b/src/ev_pipe.c index d9ae80e64..316e65adb 100644 --- a/src/ev_pipe.c +++ b/src/ev_pipe.c @@ -34,7 +34,7 @@ ev_pipe_notify(struct ev_pipe *evp) int rc = 0; - int64_t old_write_cnt = niova_atomic_read(&evp->evp_writer_cnt); + uint64_t old_write_cnt = niova_atomic_read(&evp->evp_writer_cnt); if (old_write_cnt < evp->evp_reader_cnt) { diff --git a/src/fault_inject.c b/src/fault_inject.c index 4e95d42f7..f0989a016 100644 --- a/src/fault_inject.c +++ b/src/fault_inject.c @@ -284,7 +284,7 @@ fault_inject_set_lreg_install(struct fault_injection_set *fis) NIOVA_ASSERT(fis); SIMPLE_LOG_MSG(LL_DEBUG, "size=%zu", fis->finj_set_size); - for (int i = 0; i < fis->finj_set_size; i++) + for (size_t i = 0; i < fis->finj_set_size; i++) { struct lreg_node *lrn = &fis->finj_set[i].flti_lrn; @@ -336,7 +336,7 @@ fault_inject_set_install(struct fault_injection *finj_set, size_t set_size, { struct fault_injection_set *fis = &faultInjectionSets[idx]; - for (int i = 0; i < fis->finj_set_size; i++) + for (size_t i = 0; i < fis->finj_set_size; i++) { struct lreg_node *lrn = &fis->finj_set[i].flti_lrn; diff --git a/src/file_util.c b/src/file_util.c index 344f772f8..b005c800f 100644 --- a/src/file_util.c +++ b/src/file_util.c @@ -55,7 +55,7 @@ file_util_open_and_read(int dirfd, const char *file_name, char *output_buf, else if (!S_ISREG(stb.st_mode)) return -ENOTSUP; - else if (!proc_file && stb.st_size > output_size) + else if (!proc_file && (size_t)stb.st_size > output_size) return -E2BIG; else if (!proc_file && !stb.st_size) // nothing to read @@ -67,8 +67,9 @@ file_util_open_and_read(int dirfd, const char *file_name, char *output_buf, bool close_fd = ret_fd ? false : true; - ssize_t io_rc = niova_io_read(fd, output_buf, - proc_file ? output_size : stb.st_size); + ssize_t io_rc = + niova_io_read(fd, output_buf, + proc_file ? output_size : (size_t)stb.st_size); if (io_rc < 0) { io_rc = -errno; @@ -80,7 +81,7 @@ file_util_open_and_read(int dirfd, const char *file_name, char *output_buf, close_fd = true; } else if ((!proc_file && io_rc != stb.st_size) || - (proc_file && io_rc == output_size)) + (proc_file && io_rc == (ssize_t)output_size)) { io_rc = -EMSGSIZE; close_fd = true; @@ -90,7 +91,7 @@ file_util_open_and_read(int dirfd, const char *file_name, char *output_buf, *ret_fd = fd; } - if (io_rc > 0 && io_rc < output_size) + if (io_rc > 0 && io_rc < (ssize_t)output_size) output_buf[io_rc] = '\0'; // terminate the output buffer if (close_fd) diff --git a/src/include/binary_hist.h b/src/include/binary_hist.h index ab185efd4..0b6fcfcf1 100644 --- a/src/include/binary_hist.h +++ b/src/include/binary_hist.h @@ -16,9 +16,9 @@ struct binary_hist { - const int bh_start_bit; - const int bh_num_buckets; - size_t bh_values[BIN_HIST_BUCKETS_MAX]; + const unsigned int bh_start_bit; + const unsigned int bh_num_buckets; + size_t bh_values[BIN_HIST_BUCKETS_MAX]; }; static inline int @@ -42,11 +42,14 @@ binary_hist_init(struct binary_hist *bh, int start_bit, int num_buckets) static inline int binary_hist_size(const struct binary_hist *bh) { + if (!bh) + return -EINVAL; + return bh->bh_num_buckets; } static inline long long -binary_hist_get_cnt(const struct binary_hist *bh, int pos) +binary_hist_get_cnt(const struct binary_hist *bh, unsigned int pos) { if (!bh || pos >= bh->bh_num_buckets || pos >= BIN_HIST_BUCKETS_MAX) return -EINVAL; @@ -107,7 +110,7 @@ binary_hist_incorporate_val_multi(struct binary_hist *bh, } static inline long long -binary_hist_lower_bucket_range(const struct binary_hist *bh, int pos) +binary_hist_lower_bucket_range(const struct binary_hist *bh, unsigned int pos) { if (!bh || pos >= bh->bh_num_buckets) return -EINVAL; @@ -117,13 +120,13 @@ binary_hist_lower_bucket_range(const struct binary_hist *bh, int pos) } static inline long long -binary_hist_upper_bucket_range(const struct binary_hist *bh, int pos) +binary_hist_upper_bucket_range(const struct binary_hist *bh, unsigned int pos) { if (!bh || pos >= bh->bh_num_buckets) return -EINVAL; return pos == bh->bh_num_buckets - 1 ? - -1 : (unsigned long long)((1ULL << (pos + bh->bh_start_bit)) - 1); + -1LL : (long long)((1ULL << (pos + bh->bh_start_bit)) - 1); } static inline void @@ -142,7 +145,7 @@ binary_hist_print(const struct binary_hist *bh, size_t num_hist, fprintf(stdout, "\thist-%zu = {", i); const struct binary_hist *b = &bh[i]; - for (int j = 0; j < b->bh_num_buckets; j++) + for (unsigned int j = 0; j < b->bh_num_buckets; j++) { if (binary_hist_get_cnt(b, j)) { diff --git a/src/include/buffer.h b/src/include/buffer.h index a1a26e3b1..cfaabd2b9 100644 --- a/src/include/buffer.h +++ b/src/include/buffer.h @@ -51,7 +51,7 @@ SLIST_HEAD(buffer_user_slist, buffer_item); struct buffer_set { char bs_name[BUFFER_SET_NAME_MAX + 1]; - ssize_t bs_num_bufs; + size_t bs_num_bufs; ssize_t bs_num_allocated; ssize_t bs_num_user_cached; ssize_t bs_num_pndg_alloc; @@ -79,7 +79,7 @@ buffer_item_touch(struct buffer_item *bi) char *x = (char *)bi->bi_iov.iov_base; - for (off_t off = 0; off < bi->bi_iov.iov_len; off += buffer_page_size()) + for (size_t off = 0; off < bi->bi_iov.iov_len; off += buffer_page_size()) x[off] = 0xff; } diff --git a/src/include/ctl_svc.h b/src/include/ctl_svc.h index a0989b490..2f93e0eed 100644 --- a/src/include/ctl_svc.h +++ b/src/include/ctl_svc.h @@ -138,14 +138,14 @@ ctl_svc_node_raft_2_num_members(const struct ctl_svc_node *csn) csn->csn_raft.csnr_num_members : RAFT_PEER_ANY; } -static inline const uint16_t +static inline uint16_t ctl_svc_node_peer_2_port(const struct ctl_svc_node *csn) { return (csn && ctl_svc_node_is_peer(csn)) ? csn->csn_peer.csnp_port : 0; } -static inline const uint16_t +static inline uint16_t ctl_svc_node_peer_2_client_port(const struct ctl_svc_node *csn) { return (csn && ctl_svc_node_is_peer(csn)) ? diff --git a/src/include/pumice_db_net.h b/src/include/pumice_db_net.h index b61e9e9b5..9bc35d0a8 100644 --- a/src/include/pumice_db_net.h +++ b/src/include/pumice_db_net.h @@ -162,6 +162,7 @@ pmdb_request_options_init(pmdb_request_opts_t *pmdb_req, int use_user_buffer, size_t get_buffer_size, int timeout_sec) { + (void)use_user_buffer; pmdb_req->pro_non_blocking = non_blocking; pmdb_req->pro_get_response = get_response; pmdb_req->pro_stat = obj_stat; @@ -225,6 +226,6 @@ pmdb_direct_msg_init(struct pmdb_msg *msg, pmdb_obj_id_t *obj, if (op == pmdb_op_write) msg->pmdbrm_write_seqno = sequence_num; } - + #endif diff --git a/src/include/raft.h b/src/include/raft.h index 7e5dedd7e..a107a632a 100644 --- a/src/include/raft.h +++ b/src/include/raft.h @@ -57,6 +57,9 @@ #define RAFT_HEARTBEAT_FREQ_PER_ELECTION 10 +#define RAFT_ENTRY_IDX_ANY -1LL +#define RAFT_TERM_ANY -1LL + #define RAFT_MIN_APPEND_ENTRY_IDX -1 #define RAFT_INSTANCE_2_SELF_UUID(ri) \ @@ -216,7 +219,7 @@ struct raft_recovery_handle int64_t rrh_peer_chkpt_idx; ssize_t rrh_chkpt_size; ssize_t rrh_remaining; - ssize_t rrh_completed; + size_t rrh_completed; char rrh_rate_bytes_per_sec[BW_RATE_LEN + 1]; struct timespec rrh_start; bool rrh_from_recovery_marker; diff --git a/src/include/raft_net.h b/src/include/raft_net.h index d64640383..c26781643 100644 --- a/src/include/raft_net.h +++ b/src/include/raft_net.h @@ -48,7 +48,7 @@ typedef bool raft_net_timerfd_cb_ctx_bool_t; typedef void raft_net_init_cb_ctx_t; typedef uint64_t raft_net_request_tag_t; -#define RAFT_NET_TAG_NONE 0UL +#define RAFT_NET_TAG_NONE 0L struct raft_client_rpc_msg; struct raft_net_client_request_handle; @@ -381,8 +381,9 @@ do { \ (rcm)->rcrm_sys_error, (rcm)->rcrm_app_error, \ ##__VA_ARGS__); \ break; \ - case RAFT_CLIENT_RPC_MSG_TYPE_PING: /* fall through */ \ + case RAFT_CLIENT_RPC_MSG_TYPE_PING: \ uuid_unparse((rcm)->rcrm_dest_id, __uuid_str); \ + /* fall through */ \ case RAFT_CLIENT_RPC_MSG_TYPE_PING_REPLY: \ LOG_MSG(log_level, \ "CLI-%s %s id=%lx err=%hd:%hd "fmt, \ @@ -760,10 +761,10 @@ raft_net_client_user_id_to_string(const struct raft_net_client_user_id *rncui, int rc = snprintf(out_string, out_string_len, RAFT_NET_CLIENT_USER_ID_FMT, RAFT_NET_CLIENT_USER_ID_FMT_ARGS(rncui, uuid_str, 0)); - if (rc > out_string_len - 1) - return -ENOSPC; + if (rc < 0) + return rc; - return (rc > out_string_len - 1) ? -ENOSPC : 0; + return ((size_t)rc > (out_string_len - 1)) ? -ENOSPC : 0; } #define raft_net_client_user_id_unparse raft_net_client_user_id_to_string diff --git a/src/include/registry.h b/src/include/registry.h index ccef612c1..ec8daedd7 100644 --- a/src/include/registry.h +++ b/src/include/registry.h @@ -546,6 +546,7 @@ lreg_node_object_init(struct lreg_node *, enum lreg_user_types, bool); lreg_root_cb##name(enum lreg_node_cb_ops op, struct lreg_node *lrn, \ struct lreg_value *lreg_val) \ { \ + (void)lrn; \ switch (op) \ { \ case LREG_NODE_CB_OP_GET_NODE_INFO: \ @@ -607,6 +608,7 @@ lreg_node_object_init(struct lreg_node *, enum lreg_user_types, bool); struct lreg_value *lreg_val) \ { \ int rc = 0; \ + (void)lrn; \ switch (op) \ { \ case LREG_NODE_CB_OP_GET_NODE_INFO: \ @@ -638,7 +640,8 @@ lreg_node_object_init(struct lreg_node *, enum lreg_user_types, bool); struct lreg_node *lrn, \ struct lreg_value *lreg_val) \ { \ - switch (op) \ + (void)lrn; \ + switch (op) \ { \ case LREG_NODE_CB_OP_GET_NODE_INFO: \ if (!lreg_val) \ @@ -742,7 +745,7 @@ do { \ } while (0) static inline void -lreg_node_set_reverse_varray(struct lreg_node *lrn, int x) +lreg_node_set_reverse_varray(struct lreg_node *lrn) { if (lrn) lrn->lrn_reverse_varray = 1; diff --git a/src/include/tcp.h b/src/include/tcp.h index adec8413d..e6dab97b7 100644 --- a/src/include/tcp.h +++ b/src/include/tcp.h @@ -93,7 +93,7 @@ tcp_socket_handle_accept(int fd, struct tcp_socket_handle *tsh); int tcp_socket_connect(struct tcp_socket_handle *tsh); -ssize_t +size_t tcp_get_max_size(); int diff --git a/src/include/udp.h b/src/include/udp.h index 8e2863502..6c9b9fb59 100644 --- a/src/include/udp.h +++ b/src/include/udp.h @@ -77,6 +77,6 @@ ssize_t udp_socket_send(const struct udp_socket_handle *ush, const struct iovec *iov, const size_t iovlen, const struct sockaddr_in *to); -ssize_t +size_t udp_get_max_size(); #endif diff --git a/src/include/util.h b/src/include/util.h index 9acc08e62..031120299 100644 --- a/src/include/util.h +++ b/src/include/util.h @@ -104,12 +104,12 @@ niova_string_find_next_instance_of_char(const char *string, char char_to_find, if (!string || !max_len) return (ssize_t)-EINVAL; - ssize_t pos; - for (pos = 0; pos < (ssize_t)max_len; pos++) + size_t pos; + for (pos = 0; pos < max_len; pos++) if (string[pos] == char_to_find) break; - return pos < max_len ? pos : (ssize_t)-ENOENT; + return pos < max_len ? (ssize_t)pos : (ssize_t)-ENOENT; } static inline ssize_t @@ -148,11 +148,11 @@ niova_clear_whitespace_from_end_of_string(char *string, const size_t max_len) } } -static inline size_t +static inline ssize_t niova_count_nulls_from_end_of_buffer(const char *buf, const size_t len) { if (!buf || !len) - return 0; + return -EINVAL; ssize_t cnt = 0; for (ssize_t pos = len - 1; pos > 0; pos--) diff --git a/src/io.c b/src/io.c index bcf652255..141574fb5 100644 --- a/src/io.c +++ b/src/io.c @@ -292,7 +292,7 @@ niova_io_copy_from_iovs(char *dest, const size_t dest_size, if (!dest || !dest_size || !src_iovs || !num_iovs) return -EINVAL; - ssize_t cnt = 0; + size_t cnt = 0; for (size_t i = 0; i < num_iovs && cnt < dest_size; i++) { @@ -353,7 +353,7 @@ niova_io_iovs_advance(struct iovec *iovs, size_t niovs, return 0; } - ssize_t idx; + size_t idx; for (idx = 0; idx < niovs && bytes_already_consumed; idx++) { bytes_already_consumed -= iovs[idx].iov_len; diff --git a/src/pumice_db.c b/src/pumice_db.c index 28e57b654..c9d7398a4 100644 --- a/src/pumice_db.c +++ b/src/pumice_db.c @@ -369,7 +369,7 @@ pmdb_obj_to_reply(const struct pmdb_object *obj, struct pmdb_msg *reply, // if either term value is -1 then write_pending is false; reply->pmdbrm_write_pending = (obj->pmdb_obj_pending_term == current_raft_term && - current_raft_term != ID_ANY_64bit) ? 1 : 0; + current_raft_term != RAFT_TERM_ANY) ? 1 : 0; } /** @@ -415,7 +415,7 @@ pmdb_prep_raft_entry_write_obj(struct pmdb_object *obj, int64_t current_term) * apply context. Otherwise, when called in write context, the object's * pending-term must be less than the current-term. */ - if (current_term != ID_ANY_64bit) + if (current_term != RAFT_TERM_ANY) NIOVA_ASSERT(obj->pmdb_obj_pending_term < current_term); obj->pmdb_obj_pending_term = current_term; @@ -519,6 +519,8 @@ static void pmdb_cowr_sub_app_put(struct pmdb_cowr_sub_app *sa, const char *caller_func, const int caller_lineno) { + (void)caller_func; + (void)caller_lineno; SIMPLE_LOG_MSG(LL_DEBUG, "%s:%d", caller_func, caller_lineno); RT_PUT(pmdb_cowr_sub_app_tree, &pmdb_cowr_sub_apps, sa); } @@ -565,6 +567,8 @@ pmdb_cowr_sub_app_add(const struct raft_net_client_user_id *rncui, const char *caller_func, const int caller_lineno) { NIOVA_ASSERT(rncui); + (void)caller_func; + (void)caller_lineno; struct pmdb_cowr_sub_app cowr = {0}; raft_net_client_user_id_copy(&cowr.pcwsa_rncui, rncui); @@ -750,6 +754,9 @@ pmdb_range_read_req_add(const uint64_t seq_number, const char *caller_func, const int caller_lineno) { // If there are any stale entries in range read req RB tree, release them all. + (void)caller_func; + (void)caller_lineno; + if (pmdb_range_read_tree_term != current_term) pmdb_range_read_req_release_all(); @@ -933,7 +940,7 @@ pmdb_sm_handler_client_write(struct raft_net_client_request_handle *rncr) * that write did not yet (or ever) commit. */ if (pmdb_req->pmdbrm_write_seqno <= obj.pmdb_obj_commit_seqno && - obj.pmdb_obj_commit_seqno != ID_ANY_64bit) + obj.pmdb_obj_commit_seqno != RAFT_ENTRY_IDX_ANY) { raft_client_net_request_handle_error_set(rncr, -EALREADY, 0, 0); } @@ -1469,9 +1476,7 @@ PmdbGetRoptionsWithSnapshot(const uint64_t seq_number, // Check if snapshot with the given seq_number is already created. - if (seq_number >= 0) - prrq = pmdb_range_read_req_lookup(seq_number, __func__, __LINE__); - + prrq = pmdb_range_read_req_lookup(seq_number, __func__, __LINE__); if (!prrq) { // Get the latest sequence number and create snapshot against it. diff --git a/src/pumice_db_client.c b/src/pumice_db_client.c index 3f76bdf0f..cced58b72 100644 --- a/src/pumice_db_client.c +++ b/src/pumice_db_client.c @@ -154,7 +154,8 @@ pmdb_client_request_cb(void *arg, ssize_t status, void *reply_buff) const struct pmdb_msg *reply = &pcreq->pcreq_msg_reply; // Incorrect size is translated to be a system error. - if (status != (sizeof(struct pmdb_msg) + reply->pmdbrm_data_size)) + if (status != + (ssize_t)((sizeof(struct pmdb_msg) + reply->pmdbrm_data_size))) status = -EMSGSIZE; else if (reply->pmdbrm_err) diff --git a/src/raft_client.c b/src/raft_client.c index 66bde8c77..0455e05c8 100644 --- a/src/raft_client.c +++ b/src/raft_client.c @@ -461,10 +461,13 @@ raft_client_sub_app_destruct(struct raft_client_sub_app *destroy, void *arg) struct iovec *recv_iovs = &rcrh->rcrh_iovs[rcrh->rcrh_send_niovs]; if (rcrh->rcrh_async_cb) - rcrh->rcrh_async_cb(rcrh->rcrh_arg, - rcrh->rcrh_error ? err : - rcrh->rcrh_reply_used_size, - recv_iovs[1].iov_base); + { + ssize_t ret_err = rcrh->rcrh_reply_used_size; + if (rcrh->rcrh_error) + ret_err = err; + + rcrh->rcrh_async_cb(rcrh->rcrh_arg, ret_err, recv_iovs[1].iov_base); + } if (rcrh->rcrh_blocking) { @@ -528,6 +531,7 @@ raft_client_sub_app_done(struct raft_client_instance *rci, const bool wakeup, const int error) { NIOVA_ASSERT(rci && sa); + (void)wakeup; DBG_RAFT_CLIENT_SUB_APP((error ? LL_NOTIFY : LL_DEBUG), sa, "%s:%d err=%s", @@ -1039,9 +1043,10 @@ raft_client_check_pending_requests(struct raft_client_instance *rci) continue; } - const long long queued_ms = - timespec_2_msec(&now) - - timespec_2_msec(&sa->rcsa_rh.rcrh_submitted); + unsigned long long queued_ms = + MAX(0LL, + (long long)(timespec_2_msec(&now) - + timespec_2_msec(&sa->rcsa_rh.rcrh_submitted))); DBG_RAFT_CLIENT_SUB_APP( LL_DEBUG, sa, @@ -1049,7 +1054,7 @@ raft_client_check_pending_requests(struct raft_client_instance *rci) queued_ms, timespec_2_msec(&sa->rcsa_rh.rcrh_timeout), sa->rcsa_rh.rcrh_arg, sa->rcsa_rh.rcrh_rpc_request.rcrm_user_tag); - if (queued_ms > timespec_2_msec(&sa->rcsa_rh.rcrh_timeout) || + if ((queued_ms > timespec_2_msec(&sa->rcsa_rh.rcrh_timeout)) || FAULT_INJECT(async_raft_client_request_expire)) { // Detect and stash expired requests @@ -1719,11 +1724,13 @@ raft_client_reply_try_complete(struct raft_client_instance *rci, struct iovec *recv_iovs = &rcrh->rcrh_iovs[rcrh->rcrh_send_niovs]; ssize_t rrc = niova_io_copy_to_iovs(rcrm->rcrm_data, rcrm->rcrm_data_size, - recv_iovs, rcrh->rcrh_recv_niovs); - NIOVA_ASSERT(rrc == - MIN(rcrm->rcrm_data_size, - niova_io_iovs_total_size_get( - recv_iovs, rcrh->rcrh_recv_niovs))); + recv_iovs, rcrh->rcrh_recv_niovs); + NIOVA_ASSERT(rrc >= 0); + NIOVA_ASSERT( + (size_t)rrc == + MIN(rcrm->rcrm_data_size, + niova_io_iovs_total_size_get( + recv_iovs, rcrh->rcrh_recv_niovs))); SIMPLE_LOG_MSG(LL_DEBUG, "Copied the contents"); rcrh->rcrh_reply_used_size = (size_t)rrc; @@ -1791,10 +1798,17 @@ raft_client_recv_handler(struct raft_instance *ri, const char *recv_buffer, ssize_t recv_bytes, const struct sockaddr_in *from) { if (!ri || !ri->ri_csn_leader || !recv_buffer || !recv_bytes || !from || - recv_bytes > raft_net_max_rpc_size(ri->ri_store_type) || FAULT_INJECT(raft_client_recv_handler_bypass)) return; + if (recv_bytes < (ssize_t)sizeof(struct raft_client_rpc_msg) || + (size_t)recv_bytes > raft_net_max_rpc_size(ri->ri_store_type)) + { + LOG_MSG(LL_NOTIFY, "invalid msg size (%zd) from %s:%u", + recv_bytes, inet_ntoa(from->sin_addr), ntohs(from->sin_port)); + return; + } + struct raft_client_instance *rci = raft_client_raft_instance_to_client_instance(ri); @@ -2009,6 +2023,8 @@ raft_client_evp_cb(const struct epoll_handle *eph, uint32_t events) FUNC_ENTRY(LL_DEBUG); + (void)events; + struct raft_instance *ri = eph->eph_arg; struct ev_pipe *evp = raft_net_evp_get(ri, RAFT_EVP_CLIENT); @@ -2085,10 +2101,16 @@ raft_client_instance_hist_lreg_multi_facet_handler( struct raft_instance_hist_stats *rihs, struct lreg_value *lv) { - if (!lv || - lv->lrv_value_idx_in >= binary_hist_size(&rihs->rihs_bh)) + if (!lv || !rihs) return -EINVAL; + int hsz = binary_hist_size(&rihs->rihs_bh); + if (hsz < 0) + return hsz; + + if (lv->lrv_value_idx_in >= (unsigned int)hsz) + return -ERANGE; + else if (op == LREG_NODE_CB_OP_WRITE_VAL) return -EPERM; @@ -2303,8 +2325,8 @@ raft_client_sub_app_req_history_lreg_cb(enum lreg_node_cb_ops op, size_t idx = vd->lvd_index; - const int64_t cnt = niova_atomic_read(&rh->rcsarh_cnt); - int64_t oldest_entry = cnt > rh->rcsarh_size ? (cnt % rh->rcsarh_size) : 0; + const uint64_t cnt = niova_atomic_read(&rh->rcsarh_cnt); + uint64_t oldest_entry = cnt > rh->rcsarh_size ? (cnt % rh->rcsarh_size) : 0; idx = (idx + oldest_entry) % rh->rcsarh_size; @@ -2346,7 +2368,7 @@ static size_t raft_client_sub_app_req_history_size( const struct raft_client_sub_app_req_history *rh) { - const int64_t cnt = niova_atomic_read(&rh->rcsarh_cnt); + const uint64_t cnt = niova_atomic_read(&rh->rcsarh_cnt); return cnt > rh->rcsarh_size ? rh->rcsarh_size : cnt; } @@ -2530,10 +2552,15 @@ raft_client_instance_lreg_init(struct raft_client_instance *rci, FATAL_IF((rc), "lreg_node_install(): %s", strerror(-rc)); + NIOVA_ASSERT( + (LREG_USER_TYPE_HISTOGRAM__MAX - LREG_USER_TYPE_HISTOGRAM__MIN) >= + RAFT_INSTANCE_HIST_MAX); + for (enum raft_instance_hist_types i = RAFT_INSTANCE_HIST_MIN; i < RAFT_INSTANCE_HIST_MAX; i++) { - lreg_node_init(&ri->ri_rihs[i].rihs_lrn, i, + enum lreg_user_types x = i + LREG_USER_TYPE_HISTOGRAM__MIN; + lreg_node_init(&ri->ri_rihs[i].rihs_lrn, x, raft_client_instance_hist_lreg_cb, (void *)&ri->ri_rihs[i], LREG_INIT_OPT_IGNORE_NUM_VAL_ZERO); diff --git a/src/raft_net.c b/src/raft_net.c index e0b586731..da3c53475 100644 --- a/src/raft_net.c +++ b/src/raft_net.c @@ -1501,6 +1501,9 @@ raft_net_client_msg_bulk_size_cb(struct tcp_mgr_connection *tmc, struct raft_client_rpc_msg *msg, struct raft_instance *ri) { + (void)tmc; + (void)ri; + return msg->rcrm_data_size; } @@ -1509,6 +1512,9 @@ raft_net_peer_msg_bulk_size_cb(struct tcp_mgr_connection *tmc, struct raft_rpc_msg *msg, struct raft_instance *ri) { + (void)tmc; + (void)ri; + return msg->rrm_type != RAFT_RPC_MSG_TYPE_APPEND_ENTRIES_REQUEST ? 0 : msg->rrm_append_entries_request.raerqm_entries_sz; } @@ -1857,20 +1863,25 @@ raft_net_send_msg(struct raft_instance *ri, struct ctl_svc_node *csn, // communication from and to client should be through tcp. if (raft_instance_is_client(ri) || sock_src == RAFT_UDP_LISTEN_CLIENT) size_rc = raft_net_send_tcp(ri, csn, iov, niovs); + else if (msg_size <= udp_get_max_size()) size_rc = raft_net_send_udp(ri, csn, iov, niovs, sock_src); + else if (!raft_net_tcp_disabled() && msg_size <= tcp_get_max_size()) size_rc = raft_net_send_tcp(ri, csn, iov, niovs); + else size_rc = -E2BIG; } SIMPLE_LOG_MSG(LL_DEBUG, "raft_net_send_msg(): size_rc=%ld msg_size=%zu", size_rc, msg_size); - if (size_rc == msg_size) - raft_net_update_last_comm_time(ri, csn->csn_uuid, true); - return size_rc == msg_size ? 0 : size_rc; + if (size_rc != (ssize_t)msg_size) // return the error + return size_rc; + + raft_net_update_last_comm_time(ri, csn->csn_uuid, true); + return 0; } int @@ -2158,6 +2169,7 @@ raft_net_timerfd_settime(struct raft_instance *ri, unsigned long long msecs) raft_net_timerfd_cb_ctx_t raft_net_timerfd_cb(const struct epoll_handle *eph, uint32_t events) { + (void)events; struct raft_instance *ri = eph->eph_arg; ssize_t rc = niova_io_fd_drain(ri->ri_timer_fd, NULL); @@ -2196,6 +2208,7 @@ raft_net_udp_identify_socket(const struct raft_instance *ri, const int fd) static raft_net_cb_ctx_t raft_net_udp_cb(const struct epoll_handle *eph, uint32_t events) { + (void)events; SIMPLE_FUNC_ENTRY(LL_TRACE); static char sink_buf[NIOVA_MAX_UDP_SIZE]; diff --git a/src/raft_server.c b/src/raft_server.c index 6de17df10..7e88988df 100644 --- a/src/raft_server.c +++ b/src/raft_server.c @@ -258,16 +258,17 @@ raft_instance_lreg_multi_facet_cb(enum lreg_node_cb_ops op, raft_instance_is_leader(ri) ? timespec_2_float(&ri->ri_leader.rls_leader_accumulated) : (float)0.0); - break; + break; case RAFT_LREG_QUORUM_CNT: lreg_value_fill_signed(lv, "quorum-cnt", (raft_instance_is_leader(ri) ? ri->ri_leader.rls_quorum_ok_cnt : -1)); break; case RAFT_LREG_HEARTBEAT_MSEC: - lreg_value_fill_signed(lv, "heartbeat-freq-msec", - (raft_instance_is_leader(ri) ? - raft_heartbeat_timeout_msec(ri) : -1)); + lreg_value_fill_signed( + lv, "heartbeat-freq-msec", + (raft_instance_is_leader(ri) ? + (long long)raft_heartbeat_timeout_msec(ri) : -1)); break; case RAFT_LREG_CLIENT_REQUESTS: lreg_value_fill_string( @@ -346,8 +347,8 @@ raft_instance_lreg_multi_facet_cb(enum lreg_node_cb_ops op, lreg_value_fill_signed( lv, "coalesce-space-remaining", (raft_instance_is_leader(ri) && ri->ri_coalesced_wr != NULL) ? - (RAFT_ENTRY_MAX_DATA_SIZE(ri) - - ri->ri_coalesced_wr->rcwi_total_size) : -1LL); + (long long)((RAFT_ENTRY_MAX_DATA_SIZE(ri) - + ri->ri_coalesced_wr->rcwi_total_size)) : -1LL); break; case RAFT_LREG_HIST_COMMIT_LAT: lreg_value_fill_histogram( @@ -432,6 +433,7 @@ raft_instance_lreg_multi_facet_cb(enum lreg_node_cb_ops op, rc = -EPERM; break; } + break; default: rc = -EOPNOTSUPP; @@ -673,7 +675,7 @@ raft_server_wr_entries_get_total_size(const uint32_t *entry_sizes, if (!num_entries) return 0; - for (int i = 0; i < num_entries; i++) + for (uint32_t i = 0; i < num_entries; i++) total_size += entry_sizes[i]; return total_size; @@ -1010,7 +1012,7 @@ raft_server_read_entry_register_idx(struct raft_instance *ri, niova_mutex_lock(&ri->ri_compaction_mutex); // only one read at a time - NIOVA_ASSERT(ri->ri_pending_read_idx == ID_ANY_64bit); + NIOVA_ASSERT(ri->ri_pending_read_idx == RAFT_ENTRY_IDX_ANY); rc = raft_server_entry_has_been_compacted(ri, entry_idx, NULL); if (!rc) @@ -1036,7 +1038,7 @@ raft_server_read_entry_unregister_idx(struct raft_instance *ri, niova_mutex_lock(&ri->ri_compaction_mutex); NIOVA_ASSERT(ri->ri_pending_read_idx == entry_idx); - ri->ri_pending_read_idx = ID_ANY_64bit; + ri->ri_pending_read_idx = RAFT_ENTRY_IDX_ANY; niova_mutex_unlock(&ri->ri_compaction_mutex); } @@ -1053,7 +1055,7 @@ raft_server_compaction_try_increase_lowest_idx( NIOVA_ASSERT(new_lowest_idx > niova_atomic_read(&ri->ri_lowest_idx)); // A read is currently operating in the compaction region - if (ri->ri_pending_read_idx != ID_ANY_64bit && + if (ri->ri_pending_read_idx != RAFT_ENTRY_IDX_ANY && ri->ri_pending_read_idx < new_lowest_idx) rc = -EAGAIN; else @@ -1111,7 +1113,7 @@ raft_server_entry_read_by_store_common(struct raft_instance *ri, { // entry read errors are fatal DBG_RAFT_ENTRY_FATAL_IF( - (rrc != raft_server_entry_to_total_size(re)), reh, + (rrc != (ssize_t)raft_server_entry_to_total_size(re)), reh, "invalid read size rrc=%zd, expected %zu: %s", rrc, raft_server_entry_to_total_size(re), strerror((int)-rrc)); } @@ -1555,7 +1557,7 @@ raft_server_entries_scan(struct raft_instance *ri) */ if (starting_entry > 0) { -#define LOG_INITIAL_SCAN_SZ 1000UL +#define LOG_INITIAL_SCAN_SZ 1000L int rc = raft_server_entries_scan_internal( ri, lowest_idx, MIN((lowest_idx + LOG_INITIAL_SCAN_SZ), entry_max_idx)); @@ -2598,7 +2600,7 @@ raft_server_leader_init_append_entry_msg(struct raft_instance *ri, raerq->raerqm_prev_log_index = rfi->rfi_next_idx - 1; // Copy the rls_prev_idx_term[] if it was refreshed above. - raerq->raerqm_prev_log_term = rc ? -1ULL : rfi->rfi_prev_idx_term; + raerq->raerqm_prev_log_term = rc ? -1LL : rfi->rfi_prev_idx_term; // If error, return -ESTALE to signify that the peer needs bulk recovery return rc ? -ESTALE : 0; @@ -3023,7 +3025,7 @@ raft_server_append_entry_request_bounds_check( /* Sanity check the leader's request. If the leader's lowest index is * higher than the pli then don't proceed with this msg. */ - if (raerq->raerqm_prev_log_index != ID_ANY_64bit && + if (raerq->raerqm_prev_log_index != RAFT_ENTRY_IDX_ANY && raerq->raerqm_prev_log_index < raerq->raerqm_lowest_index) { DBG_RAFT_MSG(LL_WARN, rrm, "pli < leader-lowest-idx"); @@ -3232,11 +3234,11 @@ raft_server_process_append_entries_request_prep_reply( * set our next-idx to '0'. */ rae_reply->raerpm_newly_initialized_peer = - current_idx == ID_ANY_64bit ? 1 : 0; + current_idx == RAFT_ENTRY_IDX_ANY ? 1 : 0; // Issue #27 - send synced-log-index in non_matching_prev_term case too rae_reply->raerpm_synced_log_index = - (!rc || non_matching_prev_term) ? current_idx : ID_ANY_64bit; + (!rc || non_matching_prev_term) ? current_idx : RAFT_ENTRY_IDX_ANY; raft_server_set_uuids_in_rpc_msg(ri, reply); @@ -3609,7 +3611,7 @@ raft_server_leader_can_advance_commit_idx(struct raft_instance *ri, */ return (committed_raft_idx >= rls->rls_initial_term_idx && committed_raft_idx > ri->ri_commit_idx) ? - committed_raft_idx : ID_ANY_64bit; + committed_raft_idx : RAFT_ENTRY_IDX_ANY; } /** @@ -3633,7 +3635,7 @@ raft_server_leader_try_advance_commit_idx(struct raft_instance *ri) const int64_t committed_raft_idx = raft_server_leader_can_advance_commit_idx(ri, false); - if (committed_raft_idx != ID_ANY_64bit) + if (committed_raft_idx != RAFT_ENTRY_IDX_ANY) raft_server_advance_commit_idx(ri, committed_raft_idx); } @@ -3642,7 +3644,7 @@ raft_server_leader_try_advance_commit_idx_from_sync_thread( struct raft_instance *ri) { if (ri && (raft_server_leader_can_advance_commit_idx(ri, true) != - ID_ANY_64bit)) + RAFT_ENTRY_IDX_ANY)) RAFT_NET_EVP_NOTIFY_NO_FAIL(ri, RAFT_EVP_ASYNC_COMMIT_IDX_ADV); } @@ -3943,7 +3945,7 @@ raft_server_peer_recv_handler(struct raft_instance *ri, const struct raft_rpc_msg *rrm = (const struct raft_rpc_msg *)recv_buffer; - size_t expected_msg_size = sizeof(struct raft_rpc_msg); + ssize_t expected_msg_size = sizeof(struct raft_rpc_msg); if (rrm->rrm_type == RAFT_RPC_MSG_TYPE_APPEND_ENTRIES_REQUEST) expected_msg_size += rrm->rrm_append_entries_request.raerqm_entries_sz; @@ -4017,7 +4019,7 @@ raft_leader_majority_followers_comm_window(const struct raft_instance *ri, num_acked_within_window, num_raft_peers / 2 + 1, num_raft_peers); - return (num_acked_within_window >= (num_raft_peers / 2 + 1)) ? + return (num_acked_within_window >= (size_t)((num_raft_peers / 2 + 1))) ? true : false; } @@ -4286,6 +4288,8 @@ raft_server_net_client_request_init( NIOVA_ASSERT(ri && rncr && (rncr->rncr_is_direct_req || (reply_buf && reply_buf_size >= sizeof(struct raft_client_rpc_msg)))); + (void)from; + if (type == RAFT_NET_CLIENT_REQ_TYPE_NONE) FATAL_IF((!rpc_request || commit_data), "invalid argument: rpc_request may only be specified"); @@ -4310,7 +4314,7 @@ raft_server_net_client_request_init( { //memset the reply_buf to make sure garbage values are not used from it. memset(reply_buf, 0, sizeof(struct raft_client_rpc_msg)); - + rncr->rncr_reply = (struct raft_client_rpc_msg *)reply_buf; } @@ -4386,6 +4390,8 @@ raft_server_write_coalesce_entry(struct raft_instance *ri, const char *data, ri->ri_coalesced_wr->rcwi_nentries < RAFT_ENTRY_NUM_ENTRIES && ri->ri_coalesced_wr->rcwi_total_size < RAFT_ENTRY_MAX_DATA_SIZE(ri)); + (void)opts; + // Buffer should have space to accomodate this request. FATAL_IF((len + ri->ri_coalesced_wr->rcwi_total_size) > RAFT_ENTRY_MAX_DATA_SIZE(ri), @@ -4653,7 +4659,7 @@ raft_server_client_recv_handler(struct raft_instance *ri, NIOVA_ASSERT(ri && from); if (!recv_buffer || !recv_bytes || !ri->ri_server_sm_request_cb || - recv_bytes < sizeof(struct raft_client_rpc_msg)) + recv_bytes < (ssize_t)sizeof(struct raft_client_rpc_msg)) { LOG_MSG(LL_NOTIFY, "sanity check fail, buf %p bytes %ld cb %p", recv_buffer, recv_bytes, ri->ri_server_sm_request_cb); @@ -5126,6 +5132,7 @@ static raft_server_epoll_remote_sender_t raft_server_remote_send_evp_cb(const struct epoll_handle *eph, uint32_t events) { NIOVA_ASSERT(eph); + (void)events; FUNC_ENTRY(LL_DEBUG); @@ -5147,6 +5154,7 @@ static raft_server_epoll_sm_apply_t raft_server_sm_apply_evp_cb(const struct epoll_handle *eph, uint32_t events) { NIOVA_ASSERT(eph); + (void)events; FUNC_ENTRY(LL_DEBUG); @@ -5170,6 +5178,7 @@ raft_server_commit_idx_adv_evp_cb(const struct epoll_handle *eph, { NIOVA_ASSERT(eph); FUNC_ENTRY(LL_DEBUG); + (void)events; struct raft_instance *ri = eph->eph_arg; struct ev_pipe *evp = raft_net_evp_get(ri, RAFT_EVP_ASYNC_COMMIT_IDX_ADV); @@ -5414,9 +5423,11 @@ raft_server_instance_hist_lreg_multi_facet_handler( struct raft_instance_hist_stats *rihs, struct lreg_value *lv) { - if (!lv || - lv->lrv_value_idx_in >= binary_hist_size(&rihs->rihs_bh) || - op != LREG_NODE_CB_OP_READ_VAL) + if (!lv || !rihs || op != LREG_NODE_CB_OP_READ_VAL) + return; + + int hsz = binary_hist_size(&rihs->rihs_bh); + if (hsz < 0 || lv->lrv_value_idx_in >= (unsigned int)hsz) return; snprintf(lv->lrv_key_string, LREG_VALUE_STRING_MAX, "%lld", @@ -5482,11 +5493,16 @@ raft_server_instance_lreg_init(struct raft_instance *ri) lreg_node_init(&ri->ri_lreg, LREG_USER_TYPE_RAFT, raft_instance_lreg_cb, ri, LREG_INIT_OPT_INLINED_CHILDREN); + NIOVA_ASSERT( + (LREG_USER_TYPE_HISTOGRAM__MAX - LREG_USER_TYPE_HISTOGRAM__MIN) >= + RAFT_INSTANCE_HIST_MAX); + // Install the inlined objects into the parent for (enum raft_instance_hist_types i = RAFT_INSTANCE_HIST_MIN; i < RAFT_INSTANCE_HIST_MAX; i++) { - lreg_node_init(&ri->ri_rihs[i].rihs_lrn, i, + enum lreg_user_types x = i + LREG_USER_TYPE_HISTOGRAM__MIN; + lreg_node_init(&ri->ri_rihs[i].rihs_lrn, x, raft_server_instance_hist_lreg_cb, (void *)&ri->ri_rihs[i], (LREG_INIT_OPT_IGNORE_NUM_VAL_ZERO | @@ -5725,7 +5741,7 @@ raft_server_reap_log(struct raft_instance *ri, ssize_t num_keep_entries) DBG_RAFT_INSTANCE((reaped ? LL_NOTIFY : LL_DEBUG), ri, "num-keep-entries=%zd reap-idx=%ld reap=%s", num_keep_entries, - new_lowest_idx > lowest_idx ? new_lowest_idx : -1UL, + new_lowest_idx > lowest_idx ? new_lowest_idx : -1L, reaped ? "true" : "false"); } @@ -6366,6 +6382,8 @@ raft_server_enq_direct_raft_req_from_leader(char *req_buf, int64_t data_size) if (FAULT_INJECT(raft_leader_ignore_direct_req)) return -EAGAIN; + (void)data_size; + struct raft_client_rpc_msg *rcm = (struct raft_client_rpc_msg *)req_buf; struct raft_instance *ri = raft_net_get_instance(); diff --git a/src/raft_server_backend_posix.c b/src/raft_server_backend_posix.c index 0e85e2e9b..6b5ce327a 100644 --- a/src/raft_server_backend_posix.c +++ b/src/raft_server_backend_posix.c @@ -100,7 +100,8 @@ rsbr_get_num_log_headers(const struct raft_instance *ri) static inline bool rsbr_phys_idx_is_log_header(const struct raft_instance *ri, size_t phys_idx) { - return phys_idx < rsbr_get_num_log_headers(ri) ? true : false; + return (raft_entry_idx_t)phys_idx < rsbr_get_num_log_headers(ri) ? + true : false; } #if defined(__clang__) #pragma clang diagnostic pop @@ -182,7 +183,7 @@ rsbp_entry_write(struct raft_instance *ri, const struct raft_entry *re, rrc < 0 ? strerror(-rrc) : "Success", rrc, expected_size, offset); - NIOVA_ASSERT(rrc == expected_size); + NIOVA_ASSERT(rrc == (ssize_t)expected_size); } static ssize_t @@ -282,7 +283,7 @@ rsbp_header_load(struct raft_instance *ri) if (!ri) return -EINVAL; - const size_t num_headers = rsbr_get_num_log_headers(ri); + const raft_entry_idx_t num_headers = rsbr_get_num_log_headers(ri); NIOVA_ASSERT(num_headers > 0); struct raft_log_header most_recent_rlh = {0}; @@ -390,7 +391,8 @@ rsbp_header_write(struct raft_instance *ri) raft_server_entry_to_total_size(re), rsbr_raft_entry_to_phys_offset(ri, re)); - int rc = (write_sz == raft_server_entry_to_total_size(re)) ? 0 : -EIO; + int rc = (write_sz == (ssize_t)raft_server_entry_to_total_size(re)) ? + 0 : -EIO; int sync_rc = rsbp_sync(ri); diff --git a/src/raft_server_backend_rocksdb.c b/src/raft_server_backend_rocksdb.c index 7c4ff9aad..f6bf05794 100644 --- a/src/raft_server_backend_rocksdb.c +++ b/src/raft_server_backend_rocksdb.c @@ -625,7 +625,8 @@ rsbr_entry_header_write_recovery_scrub(struct raft_instance *ri, rocksdb_writebatch_clear(rir->rir_writebatch); size_t entry_header_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_header_key, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_header_key_len, RAFT_ENTRY_HEADER_KEY_PRINTF, entry_idx); @@ -660,7 +661,8 @@ rsbr_entry_write(struct raft_instance *ri, const struct raft_entry *re, * 2) raft entry KV */ size_t entry_header_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_header_key, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_header_key_len, RAFT_ENTRY_HEADER_KEY_PRINTF, entry_idx); @@ -669,7 +671,8 @@ rsbr_entry_write(struct raft_instance *ri, const struct raft_entry *re, sizeof(struct raft_entry_header)); size_t entry_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_key, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_key_len, RAFT_ENTRY_KEY_PRINTF, entry_idx); @@ -763,7 +766,8 @@ rsbr_entry_header_read(struct raft_instance *ri, struct raft_entry_header *reh) struct raft_instance_rocks_db *rir = rsbr_ri_to_rirdb(ri); size_t entry_header_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_header_key, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_header_key_len, RAFT_ENTRY_HEADER_KEY_PRINTF, reh->reh_index); @@ -792,7 +796,7 @@ rsbr_entry_read(struct raft_instance *ri, struct raft_entry *re) struct raft_instance_rocks_db *rir = rsbr_ri_to_rirdb(ri); size_t entry_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_key, (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_key_len, RAFT_ENTRY_KEY_PRINTF, re->re_header.reh_index); @@ -805,7 +809,8 @@ rsbr_entry_read(struct raft_instance *ri, struct raft_entry *re) entry_key, strerror(rc)); return rc < 0 ? rc : - re->re_header.reh_data_size + sizeof(struct raft_entry_header); + (ssize_t)(re->re_header.reh_data_size + + sizeof(struct raft_entry_header)); //Xxx this is wonky } @@ -818,7 +823,7 @@ rsbr_header_load(struct raft_instance *ri) struct raft_instance_rocks_db *rir = rsbr_ri_to_rirdb(ri); size_t header_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(header_key, (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&header_key_len, RAFT_LOG_HEADER_FMT, ri->ri_raft_uuid_str, @@ -849,7 +854,7 @@ rsbr_header_write(struct raft_instance *ri) rocksdb_writebatch_clear(rir->rir_writebatch); size_t key_len; - DECL_AND_FMT_STRING_RET_LEN(header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(header_key, (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&key_len, RAFT_LOG_HEADER_FMT, ri->ri_raft_uuid_str, ri->ri_this_peer_uuid_str); @@ -897,7 +902,7 @@ rsbr_init_header(struct raft_instance *ri) rocksdb_writebatch_clear(rir->rir_writebatch); size_t key_len; - DECL_AND_FMT_STRING_RET_LEN(last_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(last_key, (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&key_len, RAFT_LOG_LASTENTRY_FMT, ri->ri_raft_uuid_str, ri->ri_this_peer_uuid_str); @@ -1092,7 +1097,7 @@ rsbr_num_entries_calc(struct raft_instance *ri) SIMPLE_LOG_MSG(LL_NOTIFY, "last-entry-index=%zd", last_entry_idx + 1); - return last_entry_idx >= 0UL ? last_entry_idx + 1 : last_entry_idx; + return last_entry_idx >= 0L ? last_entry_idx + 1 : last_entry_idx; } static void @@ -1108,7 +1113,7 @@ rsbr_log_truncate(struct raft_instance *ri, const raft_entry_idx_t entry_idx) rocksdb_writebatch_clear(rir->rir_writebatch); size_t entry_header_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(entry_header_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(entry_header_key, (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&entry_header_key_len, RAFT_ENTRY_KEY_PRINTF, entry_idx); @@ -1149,12 +1154,13 @@ rsbr_log_reap(struct raft_instance *ri, const raft_entry_idx_t entry_idx) size_t start_entry_key_len = 0; DECL_AND_FMT_STRING_RET_LEN(start_entry_key, - RAFT_ROCKSDB_KEY_LEN_MAX, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&start_entry_key_len, RAFT_ENTRY_KEY_PRINTF, (raft_entry_idx_t)0); size_t end_entry_key_len = 0; - DECL_AND_FMT_STRING_RET_LEN(end_entry_key, RAFT_ROCKSDB_KEY_LEN_MAX, + DECL_AND_FMT_STRING_RET_LEN(end_entry_key, + (ssize_t)RAFT_ROCKSDB_KEY_LEN_MAX, (ssize_t *)&end_entry_key_len, RAFT_ENTRY_KEY_PRINTF, entry_idx); @@ -1231,7 +1237,7 @@ rsbr_recovery_rsync_path_build(const char *base, const uuid_t peer_id, base, ribSubDirs[RIR_SUBDIR_CHKPT_PEERS], CHKPT_RESTORE_PATH_FMT_ARGS(db_uuid, peer_uuid)); - return rc >= len ? -ENAMETOOLONG : 0; + return rc >= (ssize_t)len ? -ENAMETOOLONG : 0; } static int @@ -1250,7 +1256,7 @@ rsbr_recovery_inprogress_path_build(const char *base, base, RECOVERY_MARKER_NAME, CHKPT_RESTORE_PATH_FMT_ARGS(db_uuid, peer_uuid)); - return rc >= len ? -ENAMETOOLONG : 0; + return rc >= (ssize_t)len ? -ENAMETOOLONG : 0; } static int @@ -1273,7 +1279,7 @@ rsbr_checkpoint_path_build(const char *base, const uuid_t peer_id, initial ? ".in-progress_" : "", CHKPT_PATH_FMT_ARGS(db_uuid, peer_uuid, sync_idx)); - return rc > len ? -ENAMETOOLONG : 0; + return rc > (ssize_t)len ? -ENAMETOOLONG : 0; } static int @@ -1677,7 +1683,7 @@ rsbr_bulk_recover_build_remote_path(const struct raft_recovery_handle *rrh, rc = snprintf(remote_path, len - 1, "%s:%s", ctl_svc_node_peer_2_ipaddr(csn), remote_relative_path); - if (rc >= len) + if (rc >= (ssize_t)len) { LOG_MSG(LL_ERROR, "snprintf() overrun (rc=%d)", rc); rc = -ENAMETOOLONG; @@ -2587,7 +2593,7 @@ rsbr_db_open_internal(const struct raft_instance *ri, if (cft && cft->rsrcfe_num_cf) { NIOVA_ASSERT(cft->rsrcfe_num_cf <= RAFT_ROCKSDB_MAX_CF); - for (int i = 0; i < cft->rsrcfe_num_cf; i++) + for (size_t i = 0; i < cft->rsrcfe_num_cf; i++) cft_opts[i] = rir->rir_options; } // Set prefix extractor for range queries diff --git a/src/registry.c b/src/registry.c index bc28035ac..ae61463c7 100644 --- a/src/registry.c +++ b/src/registry.c @@ -75,21 +75,15 @@ lreg_node_walk_vnode(const struct lreg_node *parent, lrn_walk_cb_t lrn_wcb, { NIOVA_ASSERT(parent->lrn_vnode_child); - unsigned int max_idx = parent->lrn_lvd.lvd_num_entries; + unsigned int cnt = parent->lrn_lvd.lvd_num_entries; - if (parent->lrn_reverse_varray) + for (unsigned int i = 0; i < cnt; i++) { - for (unsigned int i = max_idx - 1; i >= 0; i--) - if (!lreg_node_vnode_entry_exec(parent, lrn_wcb, cb_arg, i, depth, - user_type)) - break; - } - else - { - for (unsigned int i = 0; i < max_idx; i++) - if (!lreg_node_vnode_entry_exec(parent, lrn_wcb, cb_arg, i, depth, - user_type)) - break; + unsigned int idx = parent->lrn_reverse_varray ? (cnt - 1 - i) : i; + + if (!lreg_node_vnode_entry_exec(parent, lrn_wcb, cb_arg, idx, depth, + user_type)) + break; } } @@ -262,28 +256,19 @@ lreg_node_recurse_from_parent(struct lreg_node *parent, struct lreg_node varray_child = *parent; lreg_value_vnode_data_to_lreg_node(&lrv, &varray_child); - if (parent->lrn_reverse_varray) - { - for (unsigned int j = - lrv.get.lrv_varray_out.lvvd_num_keys_out - 1; - j >= 0; j--) - { - varray_child.lrn_lvd.lvd_index = j; - lreg_node_recurse_from_parent(&varray_child, lrn_rcb, - depth + 1); - } - } - else + unsigned int cnt = lrv.get.lrv_varray_out.lvvd_num_keys_out; + + for (unsigned int j = 0; j < cnt; j++) { - for (unsigned int j = 0; - j < lrv.get.lrv_varray_out.lvvd_num_keys_out; j++) - { - varray_child.lrn_lvd.lvd_index = j; - lreg_node_recurse_from_parent(&varray_child, lrn_rcb, - depth + 1); - } + unsigned int idx = + parent->lrn_reverse_varray ? (cnt - 1 - j) : i; + + varray_child.lrn_lvd.lvd_index = idx; + lreg_node_recurse_from_parent(&varray_child, lrn_rcb, + depth + 1); } } + lrn_rcb(&lrv, depth, i, true); if (i < num_keys - 1) SIMPLE_LOG_MSG(LL_WARN, "%d:%d %*s %c", depth, 0, indent, "", ','); @@ -789,6 +774,8 @@ lreg_util_thread_cb(const struct epoll_handle *eph, uint32_t events) { FUNC_ENTRY(LL_DEBUG); + (void)events; + if (eph->eph_fd != lRegEventFD) { LOG_MSG(LL_ERROR, "invalid fd=%d, expected %d", diff --git a/src/tcp.c b/src/tcp.c index 8556e3687..eb6c0fc36 100644 --- a/src/tcp.c +++ b/src/tcp.c @@ -11,7 +11,7 @@ REGISTRY_ENTRY_FILE_GENERATE; -static ssize_t maxTcpSize = NIOVA_MAX_TCP_SIZE; +static size_t maxTcpSize = NIOVA_MAX_TCP_SIZE; static int tcpDefaultPort = NIOVA_DEFAULT_TCP_PORT; @@ -21,20 +21,20 @@ tcp_get_default_port(void) return tcpDefaultPort; } -ssize_t +size_t tcp_get_max_size() { return maxTcpSize; } void -tcp_set_max_size(ssize_t new_size) +tcp_set_max_size(size_t new_size) { maxTcpSize = new_size; } bool -tcp_iov_size_ok(ssize_t sz) +tcp_iov_size_ok(size_t sz) { return sz <= maxTcpSize; } @@ -260,6 +260,8 @@ tcp_socket_recv_all(const struct tcp_socket_handle *tsh, { SIMPLE_FUNC_ENTRY(LL_TRACE); + (void)from; + ssize_t total_bytes = 0; for (int i = 0; i < max_attempts && iov->iov_len > 0; i++) diff --git a/src/tcp_mgr.c b/src/tcp_mgr.c index 840c92e08..71e285f6b 100644 --- a/src/tcp_mgr.c +++ b/src/tcp_mgr.c @@ -28,7 +28,7 @@ tcp_mgr_credits_set(niova_atomic32_t *credits, uint32_t cnt) static void * tcp_mgr_credits_malloc(niova_atomic32_t *credits, size_t sz) { - uint32_t new_credits = niova_atomic_dec(credits); + int32_t new_credits = niova_atomic_dec(credits); SIMPLE_LOG_MSG(LL_TRACE, "malloc sz %lu, new cred %u", sz, new_credits); if (new_credits < 0) { @@ -36,7 +36,6 @@ tcp_mgr_credits_malloc(niova_atomic32_t *credits, size_t sz) return NULL; } - void *buf = niova_malloc_can_fail(sz); if (!buf) return NULL; @@ -231,7 +230,7 @@ tcp_mgr_setup(struct tcp_mgr_instance *tmi, void *data, if (tmi->tmi_nworkers == 0) return rc; - for (int i = 0; i < tmi->tmi_nworkers; i++) + for (size_t i = 0; i < tmi->tmi_nworkers; i++) thread_creator_wait_until_ctl_loop_reached(&tmi->tmi_workers[i]); } @@ -549,7 +548,7 @@ tcp_mgr_bulk_progress_recv(struct tcp_mgr_connection *tmc) else if (recv_bytes < 0) return recv_bytes; - NIOVA_ASSERT(recv_bytes <= tmc->tmc_bulk_remain); + NIOVA_ASSERT((size_t)recv_bytes <= tmc->tmc_bulk_remain); tmc->tmc_bulk_offset += recv_bytes; tmc->tmc_bulk_remain -= recv_bytes; @@ -578,7 +577,7 @@ tcp_mgr_bulk_prepare_and_recv(struct tcp_mgr_connection *tmc, size_t bulk_size, int bytes_avail; ioctl(tmc->tmc_tsh.tsh_socket, FIONREAD, &bytes_avail); - if (bytes_avail >= bulk_size) + if (bytes_avail >= (ssize_t)bulk_size) buf = niova_malloc_can_fail(buf_size); if (!buf) @@ -610,7 +609,7 @@ tcp_mgr_new_msg_handler(struct tcp_mgr_connection *tmc) SIMPLE_FUNC_ENTRY(LL_TRACE); struct tcp_mgr_instance *tmi = tmc->tmc_tmi; - size_t header_size = tmc->tmc_header_size; + ssize_t header_size = tmc->tmc_header_size; NIOVA_ASSERT(tmi->tmi_recv_cb && tmi->tmi_bulk_size_cb && header_size && header_size <= TCP_MGR_MAX_HDR_SIZE); @@ -805,6 +804,8 @@ tcp_mgr_handshake_cb(const struct epoll_handle *eph, uint32_t events) { SIMPLE_FUNC_ENTRY(LL_TRACE); NIOVA_ASSERT(eph && eph->eph_arg); + + (void)events; struct tcp_mgr_connection *tmc = eph->eph_arg; struct tcp_mgr_instance *tmi = tmc->tmc_tmi; @@ -875,6 +876,8 @@ tcp_mgr_listen_cb(const struct epoll_handle *eph, uint32_t events) SIMPLE_FUNC_ENTRY(LL_TRACE); NIOVA_ASSERT(eph && eph->eph_arg); + (void)events; + struct tcp_mgr_instance *tmi = eph->eph_arg; int rc = tcp_mgr_accept(tmi, eph->eph_fd); diff --git a/src/thread.c b/src/thread.c index 147e6e354..d77b6fbf2 100644 --- a/src/thread.c +++ b/src/thread.c @@ -24,6 +24,7 @@ __thread const struct thread_ctl *thrCtl; static void thr_ctl_basic_sighandler(int signum) { + (void)signum; struct thread_ctl *tc = (struct thread_ctl *)thrCtl; if (tc) diff --git a/src/udp.c b/src/udp.c index 2c28cd72c..b36fa7751 100644 --- a/src/udp.c +++ b/src/udp.c @@ -14,7 +14,7 @@ REGISTRY_ENTRY_FILE_GENERATE; static int udpDefaultPort = NIOVA_DEFAULT_UDP_PORT; -static ssize_t maxUdpSize = NIOVA_MAX_UDP_SIZE; +static size_t maxUdpSize = NIOVA_MAX_UDP_SIZE; int udp_get_default_port(void) @@ -22,14 +22,14 @@ udp_get_default_port(void) return udpDefaultPort; } -ssize_t +size_t udp_get_max_size() { return maxUdpSize; } void -udp_set_max_size(ssize_t new_size) +udp_set_max_size(size_t new_size) { maxUdpSize = new_size; } @@ -228,7 +228,7 @@ udp_socket_send(const struct udp_socket_handle *ush, const struct iovec *iov, else if (iovlen > IO_MAX_IOVS) return -E2BIG; - const ssize_t total_size = niova_io_iovs_total_size_get(iov, iovlen); + const size_t total_size = niova_io_iovs_total_size_get(iov, iovlen); if (!total_size || !udp_iov_size_ok(total_size)) return -EMSGSIZE; @@ -275,5 +275,5 @@ udp_socket_send(const struct udp_socket_handle *ush, const struct iovec *iov, inet_ntoa(to->sin_addr), ntohs(to->sin_port), total_sent, total_size); - return rc ? rc : total_sent; + return rc ? rc : (ssize_t)total_sent; } diff --git a/src/util_thread.c b/src/util_thread.c index ac5b4364f..e4a90f6ac 100644 --- a/src/util_thread.c +++ b/src/util_thread.c @@ -64,7 +64,7 @@ util_thread_remove_event_src(struct epoll_handle *eph) pthread_mutex_lock(&utilThreadMutex); - for (int i = 0; i < utilThreadNumEpollHandles; i++) + for (size_t i = 0; i < utilThreadNumEpollHandles; i++) { if (eph == &utilThreadEpollHandles[i]) { diff --git a/test/binary-hist-test.c b/test/binary-hist-test.c index a39eaaf9a..1cc4f03d5 100644 --- a/test/binary-hist-test.c +++ b/test/binary-hist-test.c @@ -41,14 +41,14 @@ dump_hist(const struct binary_hist *bh) } static void -fill_bh(int start_bit, int num_buckets) +fill_bh(unsigned int start_bit, unsigned int num_buckets) { struct binary_hist bh; NIOVA_ASSERT(!binary_hist_init(&bh, start_bit, num_buckets)); NIOVA_ASSERT(bh.bh_start_bit == start_bit); NIOVA_ASSERT(bh.bh_num_buckets == num_buckets); - NIOVA_ASSERT(binary_hist_size(&bh) == num_buckets); + NIOVA_ASSERT(binary_hist_size(&bh) == (int)num_buckets); for (int i = 0; i < binary_hist_size(&bh); i++) { diff --git a/test/bitmap-test.c b/test/bitmap-test.c index c495dfc6d..0458a243d 100644 --- a/test/bitmap-test.c +++ b/test/bitmap-test.c @@ -202,7 +202,7 @@ niova_bitmap_merge_test(void) rc = niova_bitmap_exclusive(&x, &y); FATAL_IF(rc, "niova_bitmap_exclusive(): %s", strerror(-rc)); - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) { x.nb_map[i] = 0xa55555555555555aULL; y.nb_map[i] = 0x5aaaaaaaaaaaaaa5ULL; @@ -254,7 +254,7 @@ niova_bitmap_bulk_unset_test(void) rc = niova_bitmap_shared(&y, &x); FATAL_IF(rc != -ENOENT, "niova_bitmap_shared(): %s", strerror(-rc)); - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) { x.nb_map[i] = -1ULL; y.nb_map[i] = 0x5555555555555555ULL; @@ -272,7 +272,7 @@ niova_bitmap_bulk_unset_test(void) rc = niova_bitmap_bulk_unset(&x, &y); FATAL_IF(rc, "niova_bitmap_unset(): %s", strerror(-rc)); - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) { niova_bitmap_init(&y); @@ -424,7 +424,7 @@ niova_bitmap_iterate_test(void) NIOVA_ASSERT(rc == 0); NIOVA_ASSERT(xarg.cnt == 0 && xarg.total_bits == 0); - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) x_map[i] = -1ULL; rc = niova_bitmap_iterate(&x, niova_bitmap_iterate_cb, &xarg); @@ -435,7 +435,7 @@ niova_bitmap_iterate_test(void) xarg.cnt = 0; xarg.total_bits = 0; - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) x_map[i] = 0x1010101010101010; rc = niova_bitmap_iterate(&x, niova_bitmap_iterate_cb, &xarg); @@ -449,7 +449,7 @@ niova_bitmap_iterate_test(void) xarg.cnt = 0; xarg.total_bits = 0; - for (int i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) x_map[i] = 0x7070707070707070; rc = niova_bitmap_iterate(&x, niova_bitmap_iterate_cb, &xarg); diff --git a/test/buffer-test.c b/test/buffer-test.c index 4303caed9..ce810e10e 100644 --- a/test/buffer-test.c +++ b/test/buffer-test.c @@ -63,7 +63,7 @@ buffer_test(bool serialize, bool lreg) struct buffer_item *bi_array[n - 1]; - for (int i = 0; i < (n - 1); i++) + for (unsigned int i = 0; i < (n - 1); i++) { bi_array[i] = buffer_set_allocate_item_from_pending(&bs); NIOVA_ASSERT(bi_array[i]); @@ -74,7 +74,7 @@ buffer_test(bool serialize, bool lreg) rc = buffer_set_destroy(&bs); NIOVA_ASSERT(rc == -EBUSY); - for (int i = 0; i < (n - 1); i++) + for (unsigned int i = 0; i < (n - 1); i++) buffer_set_release_item(bi_array[i]); rc = buffer_set_destroy(&bs); diff --git a/test/common-test.c b/test/common-test.c index 9ade030c7..158032c14 100644 --- a/test/common-test.c +++ b/test/common-test.c @@ -13,7 +13,7 @@ highest_set_bit_pos_from_val_test(void) { NIOVA_ASSERT(highest_set_bit_pos_from_val(128) == 8); - for (int i = 0; i < TYPE_SZ_BITS(unsigned long long); i++) + for (unsigned int i = 0; i < TYPE_SZ_BITS(unsigned long long); i++) { const unsigned long long val = 1ULL << i; NIOVA_ASSERT(i + 1 == highest_set_bit_pos_from_val(val)); @@ -32,7 +32,7 @@ highest_set_bit_pos_from_val_test(void) static void highest_power_of_two_test(void) { - for (int i = 0; i < TYPE_SZ_BITS(unsigned long long); i++) + for (unsigned int i = 0; i < TYPE_SZ_BITS(unsigned long long); i++) { const unsigned long long val = 1ULL << i; diff --git a/test/epoll-mgr-test.c b/test/epoll-mgr-test.c index 47fe188ec..846f3fa16 100644 --- a/test/epoll-mgr-test.c +++ b/test/epoll-mgr-test.c @@ -52,6 +52,7 @@ static void foo_cb(const struct epoll_handle *eph, uint32_t events) { (void)eph; + (void)events; return; } @@ -84,6 +85,8 @@ epoll_mgr_thread_test_cb(const struct epoll_handle *eph, uint32_t events) { NIOVA_ASSERT(eph && eph->eph_arg); + (void)events; + struct epm_test_handle *eth = (struct epm_test_handle *)eph->eph_arg; // Sanity check @@ -484,6 +487,8 @@ epm_ctx_test_ref_cb(void *arg, enum epoll_handle_ref_op op) static void epoll_mgr_context_event_cb(const struct epoll_handle *_eph, uint32_t _ev) { + (void)_eph; + (void)_ev; FATAL_MSG("unexpected event received"); } diff --git a/test/ev-pipe-test.c b/test/ev-pipe-test.c index d8d444ff4..8f13edaba 100644 --- a/test/ev-pipe-test.c +++ b/test/ev-pipe-test.c @@ -48,6 +48,7 @@ ev_pipe_test_cb(const struct epoll_handle *eph, uint32_t events) EV_PIPE_RESET(evp); + (void)events; struct item *item = NULL, *tmp = NULL; size_t num_completed = 0; diff --git a/test/pumice-reference-client.c b/test/pumice-reference-client.c index 734385d51..46f976c2c 100644 --- a/test/pumice-reference-client.c +++ b/test/pumice-reference-client.c @@ -148,7 +148,7 @@ static util_thread_ctx_reg_int_t pmdbtc_test_apps_varray_lreg_cb(enum lreg_node_cb_ops op, struct lreg_node *lrn, struct lreg_value *lv) { - if (!lv) + if (!lv || pmdbtcNumApps < 0) return -EINVAL; lv->get.lrv_num_keys_out = PMDB_TEST_APP_LREG_APP__MAX; @@ -156,7 +156,7 @@ pmdbtc_test_apps_varray_lreg_cb(enum lreg_node_cb_ops op, NIOVA_ASSERT(lrn->lrn_vnode_child); struct lreg_vnode_data *vd = &lrn->lrn_lvd; - if (vd->lvd_index >= pmdbtcNumApps) + if (vd->lvd_index >= (unsigned int)pmdbtcNumApps) return -ERANGE; const struct pmdbtc_app *papp = &pmdbtcApps[vd->lvd_index]; @@ -247,7 +247,7 @@ pmdbtc_request_history_varray_lreg_cb(enum lreg_node_cb_ops op, struct lreg_node *lrn, struct lreg_value *lv) { - if (!lv) + if (!lv || pmdbtcHistoryCnt < 0) return -EINVAL; lv->get.lrv_num_keys_out = PMDB_TEST_REQ_LREG_APP__MAX; @@ -255,7 +255,7 @@ pmdbtc_request_history_varray_lreg_cb(enum lreg_node_cb_ops op, NIOVA_ASSERT(lrn->lrn_vnode_child); struct lreg_vnode_data *vd = &lrn->lrn_lvd; - if (vd->lvd_index >= pmdbtcHistoryCnt) + if (vd->lvd_index >= (unsigned int)pmdbtcHistoryCnt) return -ERANGE; struct pmdbtc_request_history *req_hist = &pmdbtcReqHistory[vd->lvd_index]; @@ -752,7 +752,7 @@ pmdbtc_write_prep(struct pmdbtc_request *preq) if (preq->preq_pmdb_seqno != papp->papp_rtv.rtv_seqno) pmdbtc_app_rtv_fast_forward(papp, preq->preq_pmdb_seqno); - for (int i = 0; i < preq->preq_rtdb.rtdb_num_values; i++) + for (uint32_t i = 0; i < preq->preq_rtdb.rtdb_num_values; i++) { pmdbtc_app_rtv_increment(papp); preq->preq_rtv[i] = papp->papp_last_rtv_request; diff --git a/test/queue-test.c b/test/queue-test.c index b1162214f..ccca1d54a 100644 --- a/test/queue-test.c +++ b/test/queue-test.c @@ -33,7 +33,7 @@ circleq_splice_tail_test(void) // Basic head insert operation - int i; + unsigned int i; for (i = 0; i < NUM_CES; i++) { ce[i].ce_value = i; @@ -45,7 +45,7 @@ circleq_splice_tail_test(void) } // Foreach in both directions - int j = NUM_CES; + unsigned int j = NUM_CES; CIRCLEQ_FOREACH(tmp, &qx, ce_lentry) NIOVA_ASSERT(tmp->ce_value == --j); diff --git a/test/raft-net-test.c b/test/raft-net-test.c index adc613778..f63f3b3ac 100644 --- a/test/raft-net-test.c +++ b/test/raft-net-test.c @@ -46,7 +46,7 @@ vote_sort(void) rc = raft_server_get_majority_entry_idx(mix2, ARRAY_SIZE(mix2), &idx); NIOVA_ASSERT(!rc && idx == 127); - for (int i = 0; i < ARRAY_SIZE(mix2); i++) + for (unsigned int i = 0; i < ARRAY_SIZE(mix2); i++) { switch (i) { diff --git a/test/raft-reference-client.c b/test/raft-reference-client.c index 963da8480..351c489b6 100644 --- a/test/raft-reference-client.c +++ b/test/raft-reference-client.c @@ -162,7 +162,7 @@ rsc_leader_is_viable(void) return rRTI.rtti_leader_is_viable; } -static uint64_t +static int64_t rsc_get_committed_seqno(void) { return rRTI.rrti_committed.rtv_seqno; @@ -323,7 +323,7 @@ rsc_init_global_raft_test_info(const struct raft_test_values *rtv) * force approach was taken after some failed experiments with * setstate_r(). */ - for (uint64_t i = 1; i <= rtv->rtv_seqno; i++) + for (int64_t i = 1; i <= rtv->rtv_seqno; i++) { uint32_t val = rsc_random_get(&randData); xor_all_values ^= val; @@ -399,7 +399,7 @@ rsc_commit_seqno_validate(const struct raft_test_values *rtv, rsc_random_init(&rand_data, rand_state_buf); - for (uint64_t i = 1; i <= rtv->rtv_seqno; i++) + for (int64_t i = 1; i <= rtv->rtv_seqno; i++) { uint32_t tmp = rsc_random_get(&rand_data); locally_generated_seq ^= tmp; @@ -730,7 +730,7 @@ rsc_recv_handler(struct raft_instance *ri, const char *recv_buffer, { SIMPLE_FUNC_ENTRY(LL_NOTIFY); if (!ri || !ri->ri_csn_leader || !recv_buffer || !recv_bytes || !from || - recv_bytes > raft_net_max_rpc_size(ri->ri_store_type)) + recv_bytes > (ssize_t)raft_net_max_rpc_size(ri->ri_store_type)) return; const struct raft_client_rpc_msg *rcrm = @@ -1105,8 +1105,14 @@ raft_client_test_instance_hist_lreg_multi_facet_handler( struct raft_instance_hist_stats *rihs, struct lreg_value *lv) { - if (!lv || - lv->lrv_value_idx_in >= binary_hist_size(&rihs->rihs_bh)) + if (!lv || !rihs) + return -EINVAL; + + int hsz = binary_hist_size(&rihs->rihs_bh); + if (hsz < 0) + return hsz; + + if (lv->lrv_value_idx_in >= (uint32_t)hsz) return -EINVAL; else if (op == LREG_NODE_CB_OP_WRITE_VAL) @@ -1362,10 +1368,15 @@ raft_client_test_lreg_init(struct raft_instance *ri) if (rc) return rc; + NIOVA_ASSERT( + (LREG_USER_TYPE_HISTOGRAM__MAX - LREG_USER_TYPE_HISTOGRAM__MIN) >= + RAFT_INSTANCE_HIST_MAX); + for (enum raft_instance_hist_types i = RAFT_INSTANCE_HIST_MIN; i < RAFT_INSTANCE_HIST_MAX; i++) { - lreg_node_init(&ri->ri_rihs[i].rihs_lrn, i, + enum lreg_user_types x = i + LREG_USER_TYPE_HISTOGRAM__MIN; + lreg_node_init(&ri->ri_rihs[i].rihs_lrn, x, raft_client_test_instance_hist_lreg_cb, (void *)&ri->ri_rihs[i], LREG_INIT_OPT_IGNORE_NUM_VAL_ZERO); diff --git a/test/raft_test.h b/test/raft_test.h index a0d83b4a1..830fdd1e5 100644 --- a/test/raft_test.h +++ b/test/raft_test.h @@ -29,7 +29,7 @@ */ struct raft_test_values { - uint64_t rtv_seqno; + int64_t rtv_seqno; union { uint64_t rtv_request_value; @@ -106,9 +106,9 @@ do { \ __uuid_str, raft_test_data_op_2_string((rtdb)->rtdb_op), \ (rtdb)->rtdb_num_values, \ ((rtdb)->rtdb_num_values > 0 ? \ - (rtdb)->rtdb_values[0].rtv_seqno : -1), \ + (long)(rtdb)->rtdb_values[0].rtv_seqno : -1L), \ ((rtdb)->rtdb_num_values > 0 ? \ - (rtdb)->rtdb_values[0].rtv_request_value : -1), \ + (long)(rtdb)->rtdb_values[0].rtv_request_value : -1L), \ ##__VA_ARGS__); \ } \ } while (0) diff --git a/test/tcp-test.c b/test/tcp-test.c index c00db0b33..e8147577e 100644 --- a/test/tcp-test.c +++ b/test/tcp-test.c @@ -16,7 +16,7 @@ static niova_atomic64_t numPingPongsDone; #define DEFAULT_RUN_TIME 10; #define DEFAULT_TCP_SIZE 1024*1024; -static size_t tcpSize = DEFAULT_TCP_SIZE; +static ssize_t tcpSize = DEFAULT_TCP_SIZE; static size_t runTime = DEFAULT_RUN_TIME; /** diff --git a/test/udp-test.c b/test/udp-test.c index 9f42ec109..fa238bc7d 100644 --- a/test/udp-test.c +++ b/test/udp-test.c @@ -13,7 +13,7 @@ static niova_atomic64_t numPingPongsDone; #define OPTS "ht:s:" -static size_t udpSize = 5000; +static ssize_t udpSize = 5000; static size_t runTime = 1; /** diff --git a/test/work-dispatch-test.c b/test/work-dispatch-test.c index 92965d344..c0b3b6412 100644 --- a/test/work-dispatch-test.c +++ b/test/work-dispatch-test.c @@ -17,7 +17,7 @@ #include #include -#define NUM_SECONDS_TO_RUN_TEST 2ULL +#define NUM_SECONDS_TO_RUN_TEST 2L pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER;