lsan_common.h
9.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
//=-- lsan_common.h -------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private LSan header.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_COMMON_H
#define LSAN_COMMON_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
// Also, LSan doesn't like 32 bit architectures
// because of "small" (4 bytes) pointer size that leads to high false negative
// ratio on large leaks. But we still want to have it for some 32 bit arches
// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
// To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust the TLS machinery for
// the new architecture inside the sanitizer library.
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390x__))
#define CAN_SANITIZE_LEAKS 1
#elif defined(__i386__) && \
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
#define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
namespace __sanitizer {
class FlagParser;
class ThreadRegistry;
struct DTLS;
}
namespace __lsan {
// Chunk tags.
enum ChunkTag {
kDirectlyLeaked = 0, // default
kIndirectlyLeaked = 1,
kReachable = 2,
kIgnored = 3
};
const u32 kInvalidTid = (u32) -1;
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
#undef LSAN_FLAG
void SetDefaults();
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak {
u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
bool is_directly_leaked;
bool is_suppressed;
};
struct LeakedObject {
u32 leak_id;
uptr addr;
uptr size;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
void ApplySuppressions();
uptr UnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_ = 0;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
// Platform-specific functions.
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
struct RootRegion {
uptr begin;
uptr size;
};
// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
// this Frontier vector before the StopTheWorldCallback actually runs.
// This is used when the OS has a unified callback API for suspending
// threads and enumerating roots.
struct CheckForLeaksParam {
Frontier frontier;
LeakReport leak_report;
bool success = false;
};
InternalMmapVector<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
uptr region_begin, uptr region_end, bool is_readable);
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
CheckForLeaksParam* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
kIgnoreObjectAlreadyIgnored,
kIgnoreObjectInvalid
};
// Functions called from the parent tool.
const char *MaybeCallLsanDefaultOptions();
void InitCommonLsan();
void DoLeakCheck();
void DoRecoverableLeakCheckVoid();
void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
void DisableInThisThread();
void EnableInThisThread();
// Can be used to ignore memory allocated by an intercepted
// function.
struct ScopedInterceptorDisabler {
ScopedInterceptorDisabler() { DisableInThisThread(); }
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
// According to Itanium C++ ABI array cookie is a one word containing
// size of allocated array.
static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg) == 0;
}
// According to ARM C++ ABI array cookie consists of two words:
// struct array_cookie {
// std::size_t element_size; // element_size != 0
// std::size_t element_count;
// };
static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
}
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate a cookie (one or two words) for the array size (0)
// and store a pointer to the end of allocated chunk. The actual cookie layout
// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
#if defined(__arm__)
return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
#else
return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
#endif
}
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call
// gettid() from the main thread. Our solution is to call this function before
// leak checking and also before every call to pthread_create() (to handle cases
// where leak checking is initiated from a non-main thread).
void EnsureMainThreadIDIsCorrect();
// If p points into a chunk that has been allocated to the user, returns its
// user-visible address. Otherwise, returns 0.
uptr PointsIntoChunk(void *p);
// Returns address of user-visible chunk contained in this allocator chunk.
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Return the linker module, if valid for the platform.
LoadedModule *GetLinker();
// Return true if LSan has finished leak checking and reported leaks.
bool HasReportedLeaks();
// Run platform-specific leak handlers.
void HandleLeaks();
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
// Constructor accepts address of user-visible chunk.
explicit LsanMetadata(uptr chunk);
bool allocated() const;
ChunkTag tag() const;
void set_tag(ChunkTag value);
uptr requested_size() const;
u32 stack_trace_id() const;
private:
void *metadata_;
};
} // namespace __lsan
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions();
} // extern "C"
#endif // LSAN_COMMON_H