uadd-with-overflow.ll
5.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>)
declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>)
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8)
define { i32, i1 } @simple_fold(i32 %x) {
; CHECK-LABEL: @simple_fold(
; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20)
; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw i32 %x, 7
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13)
ret { i32, i1 } %b
}
define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) {
; CHECK-LABEL: @fold_on_constant_add_no_overflow(
; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1)
; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
;
%a = add nuw i8 %x, 200
%b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55)
ret { i8, i1 } %b
}
define { i8, i1 } @no_fold_on_constant_add_overflow(i8 %x) {
; CHECK-LABEL: @no_fold_on_constant_add_overflow(
; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[X:%.*]], 0
; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
;
%a = add nuw i8 %x, 200
%b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 56)
ret { i8, i1 } %b
}
define { <2 x i8>, <2 x i1> } @no_fold_vector_no_overflow(<2 x i8> %x) {
; CHECK-LABEL: @no_fold_vector_no_overflow(
; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -57, i8 -56>
; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]]
;
%a = add nuw <2 x i8> %x, <i8 199, i8 200>
%b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
ret { <2 x i8>, <2 x i1> } %b
}
define { <2 x i8>, <2 x i1> } @no_fold_vector_overflow(<2 x i8> %x) {
; CHECK-LABEL: @no_fold_vector_overflow(
; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -56, i8 -55>
; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]]
;
%a = add nuw <2 x i8> %x, <i8 200, i8 201>
%b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
ret { <2 x i8>, <2 x i1> } %b
}
define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
; CHECK-LABEL: @fold_simple_splat_constant(
; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP1]]
;
%a = add nuw <2 x i32> %x, <i32 12, i32 12>
%b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
ret { <2 x i32>, <2 x i1> } %b
}
define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) {
; CHECK-LABEL: @no_fold_splat_undef_constant(
; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 undef>
; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
;
%a = add nuw <2 x i32> %x, <i32 12, i32 undef>
%b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
ret { <2 x i32>, <2 x i1> } %b
}
define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @no_fold_splat_not_constant(
; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
;
%a = add nuw <2 x i32> %x, %y
%b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
ret { <2 x i32>, <2 x i1> } %b
}
define { i32, i1 } @fold_nuwnsw(i32 %x) {
; CHECK-LABEL: @fold_nuwnsw(
; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw nsw i32 %x, 12
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
ret { i32, i1 } %b
}
define { i32, i1 } @no_fold_nsw(i32 %x) {
; CHECK-LABEL: @no_fold_nsw(
; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], 12
; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
; CHECK-NEXT: ret { i32, i1 } [[B]]
;
%a = add nsw i32 %x, 12
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
ret { i32, i1 } %b
}
define { i32, i1 } @no_fold_wrapped_add(i32 %x) {
; CHECK-LABEL: @no_fold_wrapped_add(
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 12
; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
; CHECK-NEXT: ret { i32, i1 } [[B]]
;
%a = add i32 %x, 12
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a)
ret { i32, i1 } %b
}