Skip to content

Commit

Permalink
compiler-rt: inline GetMetadata() function
Browse files Browse the repository at this point in the history
Inlining GetMetadata() improves the performance slightly.

Before:
test-arraylist : 118.22
test-avl-tree : 60.77
test-binary-heap : 28.25
test-binomial-heap : 25.02
test-bloom-filter : 6.19
test-compare-functions : 4.00
test-hash-functions : 7.78
test-hash-table : 14.92
test-list : 7.50
test-queue : 20.15
test-rb-tree : 43.92
test-set : 14.48
test-slist : 6.69
test-sortedarray : 7.24
test-trie : 5.74

After:
test-arraylist : 101.16
test-avl-tree : 51.40
test-binary-heap : 27.44
test-binomial-heap : 21.08
test-bloom-filter : 6.69
test-compare-functions : 4.39
test-hash-functions : 7.28
test-hash-table : 13.15
test-list : 4.47
test-queue : 16.12
test-rb-tree : 37.14
test-set : 12.15
test-slist : 4.08
test-sortedarray : 7.11
test-trie : 4.66

Signed-off-by: Hyeonggon Yoo <[email protected]>
  • Loading branch information
hygoni committed Apr 14, 2024
1 parent b0185cc commit 3335ed3
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 35 deletions.
33 changes: 33 additions & 0 deletions compiler-rt/lib/plsan/plsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,39 @@ void InitializeLocalVariableTLS();
void InitializeMetadataTable();
void DeleteLocalVariableTLS();

// minimum size for mmap()
const uptr kUserMapSize = 1 << 16;
const uptr kMetaMapSize = 1 << 16;
const uptr kMetadataSize = sizeof(struct Metadata);
extern uptr *metadata_table;

inline struct Metadata *GetMetadata(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
uptr page_shift = __builtin_ctz(GetPageSizeCached());
uptr page_idx = addr >> page_shift;
uptr table_size = 1LL << (48 - page_shift);
if (page_idx >= table_size)
return nullptr;

uptr entry = *reinterpret_cast<uptr *>(metadata_table + page_idx);
// If there's no entry, it's not on heap
if (!entry)
return nullptr;

if (kAllocatorSpace <= addr && addr < kAllocatorEnd) {
uptr metabase = entry & ~(kUserMapSize - 1);
__sanitizer::u32 object_size = entry & (kUserMapSize - 1);
// XXX: integer division is costly
__sanitizer::u32 chunk_idx =
(addr % ((object_size / kMetadataSize) * kUserMapSize)) / object_size;
struct Metadata *m = reinterpret_cast<Metadata *>(
metabase - (1 + chunk_idx) * kMetadataSize);
return m;
}

return reinterpret_cast<Metadata *>(entry);
}

} // namespace __plsan

#define ENSURE_PLSAN_INITED() \
Expand Down
35 changes: 0 additions & 35 deletions compiler-rt/lib/plsan/plsan_allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(AllocatorCache);
}

Metadata *GetMetadata(const void *p) { return __plsan_metadata_lookup(p); }

void IncRefCount(Metadata *metadata) {
if (!metadata)
return;
Expand Down Expand Up @@ -283,40 +281,7 @@ static void *Reallocate(const StackTrace *stack, void *p, uptr new_size,
return new_p;
}

// minimum size for mmap()
const uptr kUserMapSize = 1 << 16;
const uptr kMetaMapSize = 1 << 16;
const uptr kMetadataSize = sizeof(struct Metadata);

struct Metadata *__plsan_metadata_lookup(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
uptr page_shift = __builtin_ctz(GetPageSizeCached());
uptr page_idx = addr >> page_shift;
uptr table_size = 1LL << (48 - page_shift);
if (page_idx >= table_size)
return nullptr;

uptr entry = *reinterpret_cast<uptr *>(metadata_table + page_idx);
// If there's no entry, it's not on heap
if (!entry)
return nullptr;

if (kAllocatorSpace <= addr && addr < kAllocatorEnd) {
uptr metabase = entry & ~(kUserMapSize - 1);
__sanitizer::u32 object_size = entry & (kUserMapSize - 1);
// XXX: integer division is costly
__sanitizer::u32 chunk_idx =
(addr % ((object_size / kMetadataSize) * kUserMapSize)) / object_size;
struct Metadata *m = reinterpret_cast<Metadata *>(
metabase - (1 + chunk_idx) * kMetadataSize);
return m;
}

return reinterpret_cast<Metadata *>(entry);
}

uptr *metadata_table;

/*
* Metabase is stored in the metadata table when new page is allocated,
* not when an object is allocated or freed.
Expand Down

0 comments on commit 3335ed3

Please sign in to comment.