fold-and-shift.ll
3.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- | FileCheck %s
define i32 @t1(i8* %X, i32 %i) {
; CHECK-LABEL: t1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movzbl %cl, %ecx
; CHECK-NEXT: movl (%eax,%ecx,4), %eax
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, 1020
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp78 = bitcast i8* %tmp7 to i32*
%tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
define i32 @t2(i16* %X, i32 %i) {
; CHECK-LABEL: t2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movzwl %cx, %ecx
; CHECK-NEXT: movl (%eax,%ecx,4), %eax
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 1
%tmp4 = and i32 %tmp2, 131070
%tmp7 = getelementptr i16, i16* %X, i32 %tmp4
%tmp78 = bitcast i16* %tmp7 to i32*
%tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
define i32 @t3(i16* %i.ptr, i32* %arr) {
; This case is tricky. The lshr followed by a gep will produce a lshr followed
; by an and to remove the low bits. This can be simplified by doing the lshr by
; a greater constant and using the addressing mode to scale the result back up.
; To make matters worse, because of the two-phase zext of %i and their reuse in
; the function, the DAG can get confusing trying to re-use both of them and
; prevent easy analysis of the mask in order to match this.
; CHECK-LABEL: t3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzwl (%eax), %eax
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: shrl $11, %edx
; CHECK-NEXT: addl (%ecx,%edx,4), %eax
; CHECK-NEXT: retl
entry:
%i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%val.ptr = getelementptr inbounds i32, i32* %arr, i32 %index
%val = load i32, i32* %val.ptr
%sum = add i32 %val, %i.zext
ret i32 %sum
}
define i32 @t4(i16* %i.ptr, i32* %arr) {
; A version of @t3 that has more zero extends and more re-use of intermediate
; values. This exercise slightly different bits of canonicalization.
; CHECK-LABEL: t4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzwl (%eax), %eax
; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: shrl $11, %edx
; CHECK-NEXT: addl (%ecx,%edx,4), %eax
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retl
entry:
%i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
%val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
%val = load i32, i32* %val.ptr
%sum.1 = add i32 %val, %i.zext
%sum.2 = add i32 %sum.1, %index
ret i32 %sum.2
}
define i8 @t5(i8* %X, i32 %i) {
; CHECK-LABEL: t5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: andl $-14, %ecx
; CHECK-NEXT: movb (%eax,%ecx,4), %al
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, -56
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}
define i8 @t6(i8* %X, i32 %i) {
; CHECK-LABEL: t6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl $-255, %ecx
; CHECK-NEXT: andl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movb (%eax,%ecx,4), %al
; CHECK-NEXT: retl
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, -1020
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp9 = load i8, i8* %tmp7
ret i8 %tmp9
}