sanitizer_allocator_local_cache.h
8.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// Cache used by SizeClassAllocator64.
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr;
DCHECK_GT(c->count, 0);
}
CompactPtrT chunk = c->chunks[--c->count];
stats_.Add(AllocatorStatAllocated, c->class_size);
return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
allocator->GetRegionBeginBySizeClass(class_id), chunk));
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
PerClass *c = &per_class_[class_id];
InitCache(c);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p));
c->chunks[c->count++] = chunk;
stats_.Sub(AllocatorStatAllocated, c->class_size);
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(c, allocator, i, c->count);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
typedef typename Allocator::CompactPtrT CompactPtrT;
struct PerClass {
u32 count;
u32 max_count;
uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache(PerClass *c) {
if (LIKELY(c->max_count))
return;
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
const uptr size = Allocator::ClassIdToSize(i);
c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
c->class_size = size;
}
DCHECK_NE(c->max_count, 0UL);
}
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache(c);
const uptr num_requested_chunks = c->max_count / 2;
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
num_requested_chunks)))
return false;
c->count = num_requested_chunks;
return true;
}
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
uptr count) {
CHECK_GE(c->count, count);
const uptr first_idx_to_drain = c->count - count;
c->count -= count;
allocator->ReturnToAllocator(&stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
// Cache used by SizeClassAllocator32.
template <class SizeClassAllocator>
struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
// Returns a TransferBatch suitable for class_id.
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
return (TransferBatch*)Allocate(allocator, batch_class_id);
return b;
}
// Destroys TransferBatch b.
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
Deallocate(allocator, batch_class_id, b);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr;
DCHECK_GT(c->count, 0);
}
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
stats_.Add(AllocatorStatAllocated, c->class_size);
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
PerClass *c = &per_class_[class_id];
InitCache(c);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id);
c->batch[c->count++] = p;
stats_.Sub(AllocatorStatAllocated, c->class_size);
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(c, allocator, i);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
// If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
// allocated from kBatchClassID size class (except for those that are needed
// for kBatchClassID itself). The goal is to have TransferBatches in a totally
// different region of RAM to improve security.
static const bool kUseSeparateSizeClassForBatch =
Allocator::kUseSeparateSizeClassForBatch;
struct PerClass {
uptr count;
uptr max_count;
uptr class_size;
uptr batch_class_id;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache(PerClass *c) {
if (LIKELY(c->max_count))
return;
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
const uptr size = Allocator::ClassIdToSize(i);
const uptr max_cached = TransferBatch::MaxCached(size);
c->max_count = 2 * max_cached;
c->class_size = size;
// Precompute the class id to use to store batches for the current class
// id. 0 means the class size is large enough to store a batch within one
// of the chunks. If using a separate size class, it will always be
// kBatchClassID, except for kBatchClassID itself.
if (kUseSeparateSizeClassForBatch) {
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
} else {
c->batch_class_id = (size <
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
batch_class_id : 0;
}
}
DCHECK_NE(c->max_count, 0UL);
}
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache(c);
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
if (UNLIKELY(!b))
return false;
CHECK_GT(b->Count(), 0);
b->CopyToArray(c->batch);
c->count = b->Count();
DestroyBatch(class_id, allocator, b);
return true;
}
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
const uptr count = Min(c->max_count / 2, c->count);
const uptr first_idx_to_drain = c->count - count;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
// Failure to allocate a batch while releasing memory is non recoverable.
// TODO(alekseys): Figure out how to do it without allocating a new batch.
if (UNLIKELY(!b)) {
Report("FATAL: Internal error: %s's allocator failed to allocate a "
"transfer batch.\n", SanitizerToolName);
Die();
}
b->SetFromArray(&c->batch[first_idx_to_drain], count);
c->count -= count;
allocator->DeallocateBatch(&stats_, class_id, b);
}
};