partial-shift-shrink.ll
3.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
; Test combine to reduce the width of a 64-bit shift to 32-bit if
; truncated to 16-bit.
; GCN-LABEL: {{^}}trunc_srl_i64_16_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GCN-NEXT: s_setpc_b64
define i16 @trunc_srl_i64_16_to_i16(i64 %x) {
%shift = lshr i64 %x, 16
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_srl_i64_17_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_lshrrev_b64 v[0:1], 17, v[0:1]
; GCN-NEXT: s_setpc_b64
define i16 @trunc_srl_i64_17_to_i16(i64 %x) {
%shift = lshr i64 %x, 17
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_srl_i55_16_to_i15:
; GCN: s_waitcnt
; GCN-NEXT: v_lshrrev_b32_e32 v0, 15, v0
; GCN-NEXT: v_add_u16_e32 v0, 4, v0
; GCN-NEXT: s_setpc_b64
define i15 @trunc_srl_i55_16_to_i15(i55 %x) {
%shift = lshr i55 %x, 15
%trunc = trunc i55 %shift to i15
%add = add i15 %trunc, 4
ret i15 %add
}
; GCN-LABEL: {{^}}trunc_sra_i64_16_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GCN-NEXT: s_setpc_b64
define i16 @trunc_sra_i64_16_to_i16(i64 %x) {
%shift = ashr i64 %x, 16
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_sra_i64_17_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_lshrrev_b64 v[0:1], 17, v[0:1]
; GCN-NEXT: s_setpc_b64
define i16 @trunc_sra_i64_17_to_i16(i64 %x) {
%shift = ashr i64 %x, 17
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_shl_i64_16_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_setpc_b64
define i16 @trunc_shl_i64_16_to_i16(i64 %x) {
%shift = shl i64 %x, 16
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_shl_i64_17_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_setpc_b64
define i16 @trunc_shl_i64_17_to_i16(i64 %x) {
%shift = shl i64 %x, 17
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_srl_v2i64_16_to_v2i16:
; GCN: s_waitcnt
; GCN-DAG: v_lshrrev_b32_e32 v0, 16, v0
; GCN-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0xffff0000
; GCN: v_and_or_b32 v0, v2, [[MASK]], v0
; GCN-NEXT: s_setpc_b64
define <2 x i16> @trunc_srl_v2i64_16_to_v2i16(<2 x i64> %x) {
%shift = lshr <2 x i64> %x, <i64 16, i64 16>
%trunc = trunc <2 x i64> %shift to <2 x i16>
ret <2 x i16> %trunc
}
; GCN-LABEL: {{^}}s_trunc_srl_i64_16_to_i16:
; GCN: s_load_dword [[VAL:s[0-9]+]]
; GCN: s_lshr_b32 [[VAL_SHIFT:s[0-9]+]], [[VAL]], 16
; GCN: s_or_b32 [[RESULT:s[0-9]+]], [[VAL_SHIFT]], 4
; GCN: v_mov_b32_e32 [[V_RESULT:v[0-9]+]], [[RESULT]]
; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[V_RESULT]]
define amdgpu_kernel void @s_trunc_srl_i64_16_to_i16(i64 %x) {
%shift = lshr i64 %x, 16
%trunc = trunc i64 %shift to i16
%add = or i16 %trunc, 4
store i16 %add, i16 addrspace(1)* undef
ret void
}
; GCN-LABEL: {{^}}trunc_srl_i64_var_mask15_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_and_b32_e32 v1, 15, v2
; GCN-NEXT: v_lshrrev_b32_e32 v0, v1, v0
; GCN-NEXT: s_setpc_b64
define i16 @trunc_srl_i64_var_mask15_to_i16(i64 %x, i64 %amt) {
%amt.masked = and i64 %amt, 15
%shift = lshr i64 %x, %amt.masked
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_srl_i64_var_mask16_to_i16:
; GCN: s_waitcnt
; GCN-NEXT: v_and_b32_e32 v2, 16, v2
; GCN-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; GCN-NEXT: s_setpc_b64
define i16 @trunc_srl_i64_var_mask16_to_i16(i64 %x, i64 %amt) {
%amt.masked = and i64 %amt, 16
%shift = lshr i64 %x, %amt.masked
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}
; GCN-LABEL: {{^}}trunc_srl_i64_var_mask31_to_i16:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v2, 31, v2
; GCN-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; GCN-NEXT: s_setpc_b64 s[30:31]
define i16 @trunc_srl_i64_var_mask31_to_i16(i64 %x, i64 %amt) {
%amt.masked = and i64 %amt, 31
%shift = lshr i64 %x, %amt.masked
%trunc = trunc i64 %shift to i16
ret i16 %trunc
}