AArch64ConditionOptimizer.cpp
15.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
//=- AArch64ConditionOptimizer.cpp - Remove useless comparisons for AArch64 -=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass tries to make consecutive compares of values use same operands to
// allow CSE pass to remove duplicated instructions. For this it analyzes
// branches and adjusts comparisons with immediate values by converting:
// * GE -> GT
// * GT -> GE
// * LT -> LE
// * LE -> LT
// and adjusting immediate values appropriately. It basically corrects two
// immediate values towards each other to make them equal.
//
// Consider the following example in C:
//
// if ((a < 5 && ...) || (a > 5 && ...)) {
// ~~~~~ ~~~~~
// ^ ^
// x y
//
// Here both "x" and "y" expressions compare "a" with "5". When "x" evaluates
// to "false", "y" can just check flags set by the first comparison. As a
// result of the canonicalization employed by
// SelectionDAGBuilder::visitSwitchCase, DAGCombine, and other target-specific
// code, assembly ends up in the form that is not CSE friendly:
//
// ...
// cmp w8, #4
// b.gt .LBB0_3
// ...
// .LBB0_3:
// cmp w8, #6
// b.lt .LBB0_6
// ...
//
// Same assembly after the pass:
//
// ...
// cmp w8, #5
// b.ge .LBB0_3
// ...
// .LBB0_3:
// cmp w8, #5 // <-- CSE pass removes this instruction
// b.le .LBB0_6
// ...
//
// Currently only SUBS and ADDS followed by b.?? are supported.
//
// TODO: maybe handle TBNZ/TBZ the same way as CMP when used instead for "a < 0"
// TODO: handle other conditional instructions (e.g. CSET)
// TODO: allow second branching to be anything if it doesn't require adjusting
//
//===----------------------------------------------------------------------===//
#include "AArch64.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>
#include <tuple>
using namespace llvm;
#define DEBUG_TYPE "aarch64-condopt"
STATISTIC(NumConditionsAdjusted, "Number of conditions adjusted");
namespace {
class AArch64ConditionOptimizer : public MachineFunctionPass {
const TargetInstrInfo *TII;
MachineDominatorTree *DomTree;
const MachineRegisterInfo *MRI;
public:
// Stores immediate, compare instruction opcode and branch condition (in this
// order) of adjusted comparison.
using CmpInfo = std::tuple<int, unsigned, AArch64CC::CondCode>;
static char ID;
AArch64ConditionOptimizer() : MachineFunctionPass(ID) {
initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
MachineInstr *findSuitableCompare(MachineBasicBlock *MBB);
CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp);
void modifyCmp(MachineInstr *CmpMI, const CmpInfo &Info);
bool adjustTo(MachineInstr *CmpMI, AArch64CC::CondCode Cmp, MachineInstr *To,
int ToImm);
bool runOnMachineFunction(MachineFunction &MF) override;
StringRef getPassName() const override {
return "AArch64 Condition Optimizer";
}
};
} // end anonymous namespace
char AArch64ConditionOptimizer::ID = 0;
INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt",
"AArch64 CondOpt Pass", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(AArch64ConditionOptimizer, "aarch64-condopt",
"AArch64 CondOpt Pass", false, false)
FunctionPass *llvm::createAArch64ConditionOptimizerPass() {
return new AArch64ConditionOptimizer();
}
void AArch64ConditionOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
// Finds compare instruction that corresponds to supported types of branching.
// Returns the instruction or nullptr on failures or detecting unsupported
// instructions.
MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
MachineBasicBlock *MBB) {
MachineBasicBlock::iterator I = MBB->getFirstTerminator();
if (I == MBB->end())
return nullptr;
if (I->getOpcode() != AArch64::Bcc)
return nullptr;
// Since we may modify cmp of this MBB, make sure NZCV does not live out.
for (auto SuccBB : MBB->successors())
if (SuccBB->isLiveIn(AArch64::NZCV))
return nullptr;
// Now find the instruction controlling the terminator.
for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
--I;
assert(!I->isTerminator() && "Spurious terminator");
// Check if there is any use of NZCV between CMP and Bcc.
if (I->readsRegister(AArch64::NZCV))
return nullptr;
switch (I->getOpcode()) {
// cmp is an alias for subs with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri:
// cmn is an alias for adds with a dead destination register.
case AArch64::ADDSWri:
case AArch64::ADDSXri: {
unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
if (!I->getOperand(2).isImm()) {
LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
return nullptr;
} else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I
<< '\n');
return nullptr;
} else if (!MRI->use_empty(I->getOperand(0).getReg())) {
LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
return nullptr;
}
return &*I;
}
// Prevent false positive case like:
// cmp w19, #0
// cinc w0, w19, gt
// ...
// fcmp d8, #0.0
// b.gt .LBB0_5
case AArch64::FCMPDri:
case AArch64::FCMPSri:
case AArch64::FCMPESri:
case AArch64::FCMPEDri:
case AArch64::SUBSWrr:
case AArch64::SUBSXrr:
case AArch64::ADDSWrr:
case AArch64::ADDSXrr:
case AArch64::FCMPSrr:
case AArch64::FCMPDrr:
case AArch64::FCMPESrr:
case AArch64::FCMPEDrr:
// Skip comparison instructions without immediate operands.
return nullptr;
}
}
LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
<< '\n');
return nullptr;
}
// Changes opcode adds <-> subs considering register operand width.
static int getComplementOpc(int Opc) {
switch (Opc) {
case AArch64::ADDSWri: return AArch64::SUBSWri;
case AArch64::ADDSXri: return AArch64::SUBSXri;
case AArch64::SUBSWri: return AArch64::ADDSWri;
case AArch64::SUBSXri: return AArch64::ADDSXri;
default:
llvm_unreachable("Unexpected opcode");
}
}
// Changes form of comparison inclusive <-> exclusive.
static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
switch (Cmp) {
case AArch64CC::GT: return AArch64CC::GE;
case AArch64CC::GE: return AArch64CC::GT;
case AArch64CC::LT: return AArch64CC::LE;
case AArch64CC::LE: return AArch64CC::LT;
default:
llvm_unreachable("Unexpected condition code");
}
}
// Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
// operator and condition code.
AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
unsigned Opc = CmpMI->getOpcode();
// CMN (compare with negative immediate) is an alias to ADDS (as
// "operand - negative" == "operand + positive")
bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
// Negate Correction value for comparison with negative immediate (CMN).
if (Negative) {
Correction = -Correction;
}
const int OldImm = (int)CmpMI->getOperand(2).getImm();
const int NewImm = std::abs(OldImm + Correction);
// Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
// adjusting compare instruction opcode.
if (OldImm == 0 && ((Negative && Correction == 1) ||
(!Negative && Correction == -1))) {
Opc = getComplementOpc(Opc);
}
return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
}
// Applies changes to comparison instruction suggested by adjustCmp().
void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI,
const CmpInfo &Info) {
int Imm;
unsigned Opc;
AArch64CC::CondCode Cmp;
std::tie(Imm, Opc, Cmp) = Info;
MachineBasicBlock *const MBB = CmpMI->getParent();
// Change immediate in comparison instruction (ADDS or SUBS).
BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc))
.add(CmpMI->getOperand(0))
.add(CmpMI->getOperand(1))
.addImm(Imm)
.add(CmpMI->getOperand(3));
CmpMI->eraseFromParent();
// The fact that this comparison was picked ensures that it's related to the
// first terminator instruction.
MachineInstr &BrMI = *MBB->getFirstTerminator();
// Change condition in branch instruction.
BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc))
.addImm(Cmp)
.add(BrMI.getOperand(1));
BrMI.eraseFromParent();
MBB->updateTerminator();
++NumConditionsAdjusted;
}
// Parse a condition code returned by AnalyzeBranch, and compute the CondCode
// corresponding to TBB.
// Returns true if parsing was successful, otherwise false is returned.
static bool parseCond(ArrayRef<MachineOperand> Cond, AArch64CC::CondCode &CC) {
// A normal br.cond simply has the condition code.
if (Cond[0].getImm() != -1) {
assert(Cond.size() == 1 && "Unknown Cond array format");
CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
return true;
}
return false;
}
// Adjusts one cmp instruction to another one if result of adjustment will allow
// CSE. Returns true if compare instruction was changed, otherwise false is
// returned.
bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
AArch64CC::CondCode Cmp, MachineInstr *To, int ToImm)
{
CmpInfo Info = adjustCmp(CmpMI, Cmp);
if (std::get<0>(Info) == ToImm && std::get<1>(Info) == To->getOpcode()) {
modifyCmp(CmpMI, Info);
return true;
}
return false;
}
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
MRI = &MF.getRegInfo();
bool Changed = false;
// Visit blocks in dominator tree pre-order. The pre-order enables multiple
// cmp-conversions from the same head block.
// Note that updateDomTree() modifies the children of the DomTree node
// currently being visited. The df_iterator supports that; it doesn't look at
// child_begin() / child_end() until after a node has been visited.
for (MachineDomTreeNode *I : depth_first(DomTree)) {
MachineBasicBlock *HBB = I->getBlock();
SmallVector<MachineOperand, 4> HeadCond;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
if (TII->analyzeBranch(*HBB, TBB, FBB, HeadCond)) {
continue;
}
// Equivalence check is to skip loops.
if (!TBB || TBB == HBB) {
continue;
}
SmallVector<MachineOperand, 4> TrueCond;
MachineBasicBlock *TBB_TBB = nullptr, *TBB_FBB = nullptr;
if (TII->analyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
continue;
}
MachineInstr *HeadCmpMI = findSuitableCompare(HBB);
if (!HeadCmpMI) {
continue;
}
MachineInstr *TrueCmpMI = findSuitableCompare(TBB);
if (!TrueCmpMI) {
continue;
}
AArch64CC::CondCode HeadCmp;
if (HeadCond.empty() || !parseCond(HeadCond, HeadCmp)) {
continue;
}
AArch64CC::CondCode TrueCmp;
if (TrueCond.empty() || !parseCond(TrueCond, TrueCmp)) {
continue;
}
const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
LLVM_DEBUG(dbgs() << "Head branch:\n");
LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
<< '\n');
LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
LLVM_DEBUG(dbgs() << "True branch:\n");
LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp)
<< '\n');
LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
(HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
std::abs(TrueImm - HeadImm) == 2) {
// This branch transforms machine instructions that correspond to
//
// 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
// 2) (a < {TrueImm} && ...) || (a > {HeadImm} && ...)
//
// into
//
// 1) (a >= {NewImm} && ...) || (a <= {NewImm} && ...)
// 2) (a <= {NewImm} && ...) || (a >= {NewImm} && ...)
CmpInfo HeadCmpInfo = adjustCmp(HeadCmpMI, HeadCmp);
CmpInfo TrueCmpInfo = adjustCmp(TrueCmpMI, TrueCmp);
if (std::get<0>(HeadCmpInfo) == std::get<0>(TrueCmpInfo) &&
std::get<1>(HeadCmpInfo) == std::get<1>(TrueCmpInfo)) {
modifyCmp(HeadCmpMI, HeadCmpInfo);
modifyCmp(TrueCmpMI, TrueCmpInfo);
Changed = true;
}
} else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
(HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
std::abs(TrueImm - HeadImm) == 1) {
// This branch transforms machine instructions that correspond to
//
// 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
// 2) (a < {TrueImm} && ...) || (a < {HeadImm} && ...)
//
// into
//
// 1) (a <= {NewImm} && ...) || (a > {NewImm} && ...)
// 2) (a < {NewImm} && ...) || (a >= {NewImm} && ...)
// GT -> GE transformation increases immediate value, so picking the
// smaller one; LT -> LE decreases immediate value so invert the choice.
bool adjustHeadCond = (HeadImm < TrueImm);
if (HeadCmp == AArch64CC::LT) {
adjustHeadCond = !adjustHeadCond;
}
if (adjustHeadCond) {
Changed |= adjustTo(HeadCmpMI, HeadCmp, TrueCmpMI, TrueImm);
} else {
Changed |= adjustTo(TrueCmpMI, TrueCmp, HeadCmpMI, HeadImm);
}
}
// Other transformation cases almost never occur due to generation of < or >
// comparisons instead of <= and >=.
}
return Changed;
}