common.cpp
3.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
//===-- common.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "gwp_asan/common.h"
#include "gwp_asan/stack_trace_compressor.h"
#include <assert.h>
using AllocationMetadata = gwp_asan::AllocationMetadata;
using Error = gwp_asan::Error;
namespace gwp_asan {
const char *ErrorToString(const Error &E) {
switch (E) {
case Error::UNKNOWN:
return "Unknown";
case Error::USE_AFTER_FREE:
return "Use After Free";
case Error::DOUBLE_FREE:
return "Double Free";
case Error::INVALID_FREE:
return "Invalid (Wild) Free";
case Error::BUFFER_OVERFLOW:
return "Buffer Overflow";
case Error::BUFFER_UNDERFLOW:
return "Buffer Underflow";
}
__builtin_trap();
}
constexpr size_t AllocationMetadata::kStackFrameStorageBytes;
constexpr size_t AllocationMetadata::kMaxTraceLengthToCollect;
void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr,
size_t AllocSize) {
Addr = AllocAddr;
Size = AllocSize;
IsDeallocated = false;
AllocationTrace.ThreadID = getThreadID();
DeallocationTrace.TraceSize = 0;
DeallocationTrace.ThreadID = kInvalidThreadID;
}
void AllocationMetadata::RecordDeallocation() {
IsDeallocated = true;
DeallocationTrace.ThreadID = getThreadID();
}
void AllocationMetadata::CallSiteInfo::RecordBacktrace(
options::Backtrace_t Backtrace) {
TraceSize = 0;
if (!Backtrace)
return;
uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect];
size_t BacktraceLength =
Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect);
// Backtrace() returns the number of available frames, which may be greater
// than the number of frames in the buffer. In this case, we need to only pack
// the number of frames that are in the buffer.
if (BacktraceLength > kMaxTraceLengthToCollect)
BacktraceLength = kMaxTraceLengthToCollect;
TraceSize =
compression::pack(UncompressedBuffer, BacktraceLength, CompressedTrace,
AllocationMetadata::kStackFrameStorageBytes);
}
size_t AllocatorState::maximumAllocationSize() const { return PageSize; }
uintptr_t AllocatorState::slotToAddr(size_t N) const {
return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N);
}
bool AllocatorState::isGuardPage(uintptr_t Ptr) const {
assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize;
size_t PagesPerSlot = maximumAllocationSize() / PageSize;
return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0;
}
static size_t addrToSlot(const AllocatorState *State, uintptr_t Ptr) {
size_t ByteOffsetFromPoolStart = Ptr - State->GuardedPagePool;
return ByteOffsetFromPoolStart /
(State->maximumAllocationSize() + State->PageSize);
}
size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const {
if (Ptr <= GuardedPagePool + PageSize)
return 0;
if (Ptr > GuardedPagePoolEnd - PageSize)
return MaxSimultaneousAllocations - 1;
if (!isGuardPage(Ptr))
return addrToSlot(this, Ptr);
if (Ptr % PageSize <= PageSize / 2)
return addrToSlot(this, Ptr - PageSize); // Round down.
return addrToSlot(this, Ptr + PageSize); // Round up.
}
} // namespace gwp_asan