strict-sub-underflow-check-to-comparison-of-sub-operands.ll
3.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -instcombine -S | FileCheck %s
declare void @llvm.assume(i1)
declare i8 @gen8()
declare void @use8(i8)
define i1 @t0(i8 %base, i8 %offset) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}
define i1 @t1(i8 %base, i8 %offset) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp uge i8 %adjusted, %base
ret i1 %res
}
define i1 @t2(i8 %offset) {
; CHECK-LABEL: @t2(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%base = call i8 @gen8()
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ugt i8 %base, %adjusted
ret i1 %res
}
define i1 @t3(i8 %offset) {
; CHECK-LABEL: @t3(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
%base = call i8 @gen8()
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ule i8 %base, %adjusted
ret i1 %res
}
; Here we don't know that offset is non-zero. Can't fold.
define i1 @n4_maybezero(i8 %base, i8 %offset) {
; CHECK-LABEL: @n4_maybezero(
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[RES]]
;
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}
; We need to know that about %offset, %base won't do. Can't fold.
define i1 @n5_wrongnonzero(i8 %base, i8 %offset) {
; CHECK-LABEL: @n5_wrongnonzero(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[BASE:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp sgt i8 %base, 0
call void @llvm.assume(i1 %cmp)
%adjusted = sub i8 %base, %offset
call void @use8(i8 %adjusted)
%res = icmp ult i8 %adjusted, %base
ret i1 %res
}