X86_64.cpp
10.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
//===- X86_64.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "InputFiles.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/Endian.h"
using namespace llvm::MachO;
using namespace llvm::support::endian;
using namespace lld;
using namespace lld::macho;
namespace {
struct X86_64 : TargetInfo {
X86_64();
uint64_t getImplicitAddend(MemoryBufferRef, const section_64 &,
const relocation_info &) const override;
void relocateOne(uint8_t *loc, const Reloc &, uint64_t val) const override;
void writeStub(uint8_t *buf, const macho::Symbol &) const override;
void writeStubHelperHeader(uint8_t *buf) const override;
void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
uint64_t entryAddr) const override;
void prepareSymbolRelocation(lld::macho::Symbol *, const InputSection *,
const Reloc &) override;
uint64_t resolveSymbolVA(uint8_t *buf, const lld::macho::Symbol &,
uint8_t type) const override;
};
} // namespace
static std::string getErrorLocation(MemoryBufferRef mb, const section_64 &sec,
const relocation_info &rel) {
return ("invalid relocation at offset " + std::to_string(rel.r_address) +
" of " + sec.segname + "," + sec.sectname + " in " +
mb.getBufferIdentifier())
.str();
}
static void validateLength(MemoryBufferRef mb, const section_64 &sec,
const relocation_info &rel,
const std::vector<uint8_t> &validLengths) {
if (std::find(validLengths.begin(), validLengths.end(), rel.r_length) !=
validLengths.end())
return;
std::string msg = getErrorLocation(mb, sec, rel) + ": relocations of type " +
std::to_string(rel.r_type) + " must have r_length of ";
bool first = true;
for (uint8_t length : validLengths) {
if (!first)
msg += " or ";
first = false;
msg += std::to_string(length);
}
fatal(msg);
}
uint64_t X86_64::getImplicitAddend(MemoryBufferRef mb, const section_64 &sec,
const relocation_info &rel) const {
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
const uint8_t *loc = buf + sec.offset + rel.r_address;
if (isThreadLocalVariables(sec.flags) && rel.r_type != X86_64_RELOC_UNSIGNED)
error("relocations in thread-local variable sections must be "
"X86_64_RELOC_UNSIGNED");
switch (rel.r_type) {
case X86_64_RELOC_BRANCH:
// XXX: ld64 also supports r_length = 0 here but I'm not sure when such a
// relocation will actually be generated.
validateLength(mb, sec, rel, {2});
break;
case X86_64_RELOC_SIGNED:
case X86_64_RELOC_SIGNED_1:
case X86_64_RELOC_SIGNED_2:
case X86_64_RELOC_SIGNED_4:
case X86_64_RELOC_GOT_LOAD:
case X86_64_RELOC_GOT:
case X86_64_RELOC_TLV:
if (!rel.r_pcrel)
fatal(getErrorLocation(mb, sec, rel) + ": relocations of type " +
std::to_string(rel.r_type) + " must be pcrel");
validateLength(mb, sec, rel, {2});
break;
case X86_64_RELOC_UNSIGNED:
if (rel.r_pcrel)
fatal(getErrorLocation(mb, sec, rel) + ": relocations of type " +
std::to_string(rel.r_type) + " must not be pcrel");
validateLength(mb, sec, rel, {2, 3});
break;
default:
error("TODO: Unhandled relocation type " + std::to_string(rel.r_type));
return 0;
}
switch (rel.r_length) {
case 0:
return *loc;
case 1:
return read16le(loc);
case 2:
return read32le(loc);
case 3:
return read64le(loc);
default:
llvm_unreachable("invalid r_length");
}
}
void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t val) const {
switch (r.type) {
case X86_64_RELOC_BRANCH:
case X86_64_RELOC_SIGNED:
case X86_64_RELOC_SIGNED_1:
case X86_64_RELOC_SIGNED_2:
case X86_64_RELOC_SIGNED_4:
case X86_64_RELOC_GOT_LOAD:
case X86_64_RELOC_GOT:
case X86_64_RELOC_TLV:
// These types are only used for pc-relative relocations, so offset by 4
// since the RIP has advanced by 4 at this point. This is only valid when
// r_length = 2, which is enforced by validateLength().
val -= 4;
break;
case X86_64_RELOC_UNSIGNED:
break;
default:
llvm_unreachable(
"getImplicitAddend should have flagged all unhandled relocation types");
}
switch (r.length) {
case 0:
*loc = val;
break;
case 1:
write16le(loc, val);
break;
case 2:
write32le(loc, val);
break;
case 3:
write64le(loc, val);
break;
default:
llvm_unreachable("invalid r_length");
}
}
// The following methods emit a number of assembly sequences with RIP-relative
// addressing. Note that RIP-relative addressing on X86-64 has the RIP pointing
// to the next instruction, not the current instruction, so we always have to
// account for the current instruction's size when calculating offsets.
// writeRipRelative helps with that.
//
// bufAddr: The virtual address corresponding to buf[0].
// bufOff: The offset within buf of the next instruction.
// destAddr: The destination address that the current instruction references.
static void writeRipRelative(uint8_t *buf, uint64_t bufAddr, uint64_t bufOff,
uint64_t destAddr) {
uint64_t rip = bufAddr + bufOff;
// For the instructions we care about, the RIP-relative address is always
// stored in the last 4 bytes of the instruction.
write32le(buf + bufOff - 4, destAddr - rip);
}
static constexpr uint8_t stub[] = {
0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
};
void X86_64::writeStub(uint8_t *buf, const macho::Symbol &sym) const {
memcpy(buf, stub, 2); // just copy the two nonzero bytes
uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
writeRipRelative(buf, stubAddr, sizeof(stub),
in.lazyPointers->addr + sym.stubsIndex * WordSize);
}
static constexpr uint8_t stubHelperHeader[] = {
0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // 0x0: leaq ImageLoaderCache(%rip), %r11
0x41, 0x53, // 0x7: pushq %r11
0xff, 0x25, 0, 0, 0, 0, // 0x9: jmpq *dyld_stub_binder@GOT(%rip)
0x90, // 0xf: nop
};
static constexpr uint8_t stubHelperEntry[] = {
0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
};
void X86_64::writeStubHelperHeader(uint8_t *buf) const {
memcpy(buf, stubHelperHeader, sizeof(stubHelperHeader));
writeRipRelative(buf, in.stubHelper->addr, 7, in.imageLoaderCache->getVA());
writeRipRelative(buf, in.stubHelper->addr, 0xf,
in.got->addr +
in.stubHelper->stubBinder->gotIndex * WordSize);
}
void X86_64::writeStubHelperEntry(uint8_t *buf, const DylibSymbol &sym,
uint64_t entryAddr) const {
memcpy(buf, stubHelperEntry, sizeof(stubHelperEntry));
write32le(buf + 1, sym.lazyBindOffset);
writeRipRelative(buf, entryAddr, sizeof(stubHelperEntry),
in.stubHelper->addr);
}
void X86_64::prepareSymbolRelocation(lld::macho::Symbol *sym,
const InputSection *isec, const Reloc &r) {
switch (r.type) {
case X86_64_RELOC_GOT_LOAD: {
if (needsBinding(sym))
in.got->addEntry(sym);
if (sym->isTlv())
error("found GOT relocation referencing thread-local variable in " +
toString(isec));
break;
}
case X86_64_RELOC_GOT: {
in.got->addEntry(sym);
if (sym->isTlv())
error("found GOT relocation referencing thread-local variable in " +
toString(isec));
break;
}
case X86_64_RELOC_BRANCH: {
prepareBranchTarget(sym);
break;
}
case X86_64_RELOC_UNSIGNED: {
if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
if (r.length != 3) {
error("X86_64_RELOC_UNSIGNED referencing the dynamic symbol " +
dysym->getName() + " must have r_length = 3");
return;
}
}
addNonLazyBindingEntries(sym, isec, r.offset, r.addend);
break;
}
case X86_64_RELOC_SIGNED:
case X86_64_RELOC_SIGNED_1:
case X86_64_RELOC_SIGNED_2:
case X86_64_RELOC_SIGNED_4:
// TODO: warn if they refer to a weak global
break;
case X86_64_RELOC_TLV: {
if (needsBinding(sym))
in.tlvPointers->addEntry(sym);
if (!sym->isTlv())
error(
"found X86_64_RELOC_TLV referencing a non-thread-local variable in " +
toString(isec));
break;
}
case X86_64_RELOC_SUBTRACTOR:
fatal("TODO: handle relocation type " + std::to_string(r.type));
break;
default:
llvm_unreachable("unexpected relocation type");
}
}
uint64_t X86_64::resolveSymbolVA(uint8_t *buf, const lld::macho::Symbol &sym,
uint8_t type) const {
switch (type) {
case X86_64_RELOC_GOT_LOAD: {
if (!sym.isInGot()) {
if (buf[-2] != 0x8b)
error("X86_64_RELOC_GOT_LOAD must be used with movq instructions");
buf[-2] = 0x8d;
return sym.getVA();
}
LLVM_FALLTHROUGH;
}
case X86_64_RELOC_GOT:
return in.got->addr + sym.gotIndex * WordSize;
case X86_64_RELOC_BRANCH: {
if (sym.isInStubs())
return in.stubs->addr + sym.stubsIndex * sizeof(stub);
return sym.getVA();
}
case X86_64_RELOC_UNSIGNED:
case X86_64_RELOC_SIGNED:
case X86_64_RELOC_SIGNED_1:
case X86_64_RELOC_SIGNED_2:
case X86_64_RELOC_SIGNED_4:
return sym.getVA();
case X86_64_RELOC_TLV: {
if (sym.isInGot())
return in.tlvPointers->addr + sym.gotIndex * WordSize;
// Convert the movq to a leaq.
assert(isa<Defined>(&sym));
if (buf[-2] != 0x8b)
error("X86_64_RELOC_TLV must be used with movq instructions");
buf[-2] = 0x8d;
return sym.getVA();
}
case X86_64_RELOC_SUBTRACTOR:
fatal("TODO: handle relocation type " + std::to_string(type));
default:
llvm_unreachable("Unexpected relocation type");
}
}
X86_64::X86_64() {
cpuType = CPU_TYPE_X86_64;
cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
stubSize = sizeof(stub);
stubHelperHeaderSize = sizeof(stubHelperHeader);
stubHelperEntrySize = sizeof(stubHelperEntry);
}
TargetInfo *macho::createX86_64TargetInfo() {
static X86_64 t;
return &t;
}