diff --git a/include/aws/testing/aws_test_harness.h b/include/aws/testing/aws_test_harness.h index 2f743be54..f9bf631e4 100644 --- a/include/aws/testing/aws_test_harness.h +++ b/include/aws/testing/aws_test_harness.h @@ -474,10 +474,11 @@ static inline int s_aws_run_test_case(struct aws_test_harness *harness) { * but aws_mem_tracer_dump() needs a valid logger to be active */ aws_logger_set(&err_logger); - const size_t leaked_bytes = aws_mem_tracer_count(allocator); + const size_t leaked_allocations = aws_mem_tracer_count(allocator); + const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { aws_mem_tracer_dump(allocator); - PRINT_FAIL_INTERNAL0("Test leaked memory: %zu bytes", leaked_bytes); + PRINT_FAIL_INTERNAL0("Test leaked memory: %zu bytes %zu allocations", leaked_bytes, leaked_allocations); goto fail; } diff --git a/source/memtrace.c b/source/memtrace.c index eea342ad1..9c5bff60c 100644 --- a/source/memtrace.c +++ b/source/memtrace.c @@ -438,11 +438,18 @@ static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) { struct alloc_tracer *tracer = allocator->impl; void *new_ptr = old_ptr; - if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) { - return NULL; - } + /* + * Careful with the ordering of state clean up here. + * Tracer keeps a hash table (alloc ptr as key) of meta info about each allocation. + * To avoid race conditions during realloc state update needs to be done in + * following order to avoid race conditions: + * - remove meta info (other threads cant reuse that key, cause ptr is still valid ) + * - realloc (cant fail, ptr might remain the same) + * - add meta info for reallocated mem + */ s_alloc_tracer_untrack(tracer, old_ptr); + aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size); s_alloc_tracer_track(tracer, new_ptr, new_size); return new_ptr; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f8d9a8d44..f2b580dfa 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -317,6 +317,8 @@ add_test_case(sba_threaded_allocs_and_frees) add_test_case(sba_threaded_reallocs) add_test_case(sba_churn) add_test_case(sba_metrics) +add_test_case(default_threaded_reallocs) +add_test_case(default_threaded_allocs_and_frees) add_test_case(test_memtrace_none) add_test_case(test_memtrace_count) diff --git a/tests/alloc_test.c b/tests/alloc_test.c index 0891e89ec..66ff95999 100644 --- a/tests/alloc_test.c +++ b/tests/alloc_test.c @@ -126,39 +126,42 @@ static int s_sba_random_reallocs(struct aws_allocator *allocator, void *ctx) { } AWS_TEST_CASE(sba_random_reallocs, s_sba_random_reallocs) -struct sba_thread_test_data { - struct aws_allocator *sba; +struct allocator_thread_test_data { + struct aws_allocator *test_allocator; uint32_t thread_idx; }; -static void s_sba_threaded_alloc_worker(void *user_data) { - struct aws_allocator *sba = ((struct sba_thread_test_data *)user_data)->sba; +static void s_threaded_alloc_worker(void *user_data) { + struct aws_allocator *test_allocator = ((struct allocator_thread_test_data *)user_data)->test_allocator; void *allocs[NUM_TEST_ALLOCS]; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { size_t size = aws_max_size(rand() % 512, 1); - void *alloc = aws_mem_acquire(sba, size); + void *alloc = aws_mem_acquire(test_allocator, size); AWS_FATAL_ASSERT(alloc); allocs[count] = alloc; } for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { void *alloc = allocs[count]; - aws_mem_release(sba, alloc); + aws_mem_release(test_allocator, alloc); } } -static void s_sba_thread_test(struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_allocator *sba) { +static void s_thread_test( + struct aws_allocator *allocator, + void (*thread_fn)(void *), + struct aws_allocator *test_allocator) { const struct aws_thread_options *thread_options = aws_default_thread_options(); struct aws_thread threads[NUM_TEST_THREADS]; - struct sba_thread_test_data thread_data[NUM_TEST_THREADS]; + struct allocator_thread_test_data thread_data[NUM_TEST_THREADS]; AWS_ZERO_ARRAY(threads); AWS_ZERO_ARRAY(thread_data); for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_init(thread, allocator); - struct sba_thread_test_data *data = &thread_data[thread_idx]; - data->sba = sba; + struct allocator_thread_test_data *data = &thread_data[thread_idx]; + data->test_allocator = test_allocator; data->thread_idx = (uint32_t)thread_idx; aws_thread_launch(thread, thread_fn, data, thread_options); } @@ -175,7 +178,7 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); - s_sba_thread_test(allocator, s_sba_threaded_alloc_worker, sba); + s_thread_test(allocator, s_threaded_alloc_worker, sba); aws_small_block_allocator_destroy(sba); @@ -183,9 +186,9 @@ static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void } AWS_TEST_CASE(sba_threaded_allocs_and_frees, s_sba_threaded_allocs_and_frees) -static void s_sba_threaded_realloc_worker(void *user_data) { - struct sba_thread_test_data *thread_data = user_data; - struct aws_allocator *sba = thread_data->sba; +static void s_threaded_realloc_worker(void *user_data) { + struct allocator_thread_test_data *thread_data = user_data; + struct aws_allocator *test_allocator = thread_data->test_allocator; void *alloc = NULL; size_t size = 0; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { @@ -194,7 +197,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) { if (old_size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); } - AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, old_size, size)); + AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, old_size, size)); /* If there was a value, make sure it's still there */ if (old_size && size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); @@ -203,7 +206,7 @@ static void s_sba_threaded_realloc_worker(void *user_data) { memset(alloc, (int)thread_data->thread_idx, size); } } - AWS_FATAL_ASSERT(0 == aws_mem_realloc(sba, &alloc, size, 0)); + AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, size, 0)); } static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { @@ -212,7 +215,7 @@ static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); - s_sba_thread_test(allocator, s_sba_threaded_realloc_worker, sba); + s_thread_test(allocator, s_threaded_realloc_worker, sba); aws_small_block_allocator_destroy(sba); @@ -313,10 +316,33 @@ static int s_sba_metrics_test(struct aws_allocator *allocator, void *ctx) { ASSERT_INT_EQUALS(0, aws_small_block_allocator_bytes_active(sba)); - /* after freeing everything, we should have reliniquished all but one page in each bin */ + /* after freeing everything, we should have relinquished all but one page in each bin */ ASSERT_INT_EQUALS(5 * aws_small_block_allocator_page_size(sba), aws_small_block_allocator_bytes_reserved(sba)); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_metrics, s_sba_metrics_test) + +/* + * Default allocator tests. + */ +static int s_default_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + srand(15); + + s_thread_test(allocator, s_threaded_realloc_worker, allocator); + + return 0; +} +AWS_TEST_CASE(default_threaded_reallocs, s_default_threaded_reallocs) + +static int s_default_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + srand(99); + + s_thread_test(allocator, s_threaded_alloc_worker, allocator); + + return 0; +} +AWS_TEST_CASE(default_threaded_allocs_and_frees, s_default_threaded_allocs_and_frees)