bool-legalization.ll
3.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; End to end tests for scalar vs. vector boolean legalization strategies.
define amdgpu_ps float @select_vgpr_sgpr_trunc_cond(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: select_vgpr_sgpr_trunc_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, 1, s0
; GCN-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GCN-NEXT: ; return to shader part epilog
%cc = trunc i32 %a to i1
%r = select i1 %cc, i32 %b, i32 %c
%r.f = bitcast i32 %r to float
ret float %r.f
}
define amdgpu_ps float @select_vgpr_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 %b, i32 %c) {
; GCN-LABEL: select_vgpr_sgpr_trunc_and_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, 1, s0
; GCN-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GCN-NEXT: ; return to shader part epilog
%cc.0 = trunc i32 %a.0 to i1
%cc.1 = trunc i32 %a.1 to i1
%and = and i1 %cc.0, %cc.1
%r = select i1 %and, i32 %b, i32 %c
%r.f = bitcast i32 %r to float
ret float %r.f
}
define amdgpu_ps i32 @select_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 inreg %b, i32 inreg %c) {
; GCN-LABEL: select_sgpr_trunc_and_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cselect_b32 s0, s2, s3
; GCN-NEXT: ; return to shader part epilog
%cc.0 = trunc i32 %a.0 to i1
%cc.1 = trunc i32 %a.1 to i1
%and = and i1 %cc.0, %cc.1
%r = select i1 %and, i32 %b, i32 %c
ret i32 %r
}
define amdgpu_kernel void @sgpr_trunc_brcond(i32 %cond) {
; GCN-LABEL: sgpr_trunc_brcond:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cbranch_scc0 BB3_2
; GCN-NEXT: ; %bb.1: ; %bb0
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: flat_store_dword v[0:1], v0
; GCN-NEXT: BB3_2: ; %bb1
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: flat_store_dword v[0:1], v0
entry:
%trunc = trunc i32 %cond to i1
br i1 %trunc, label %bb0, label %bb1
bb0:
store volatile i32 0, i32 addrspace(1)* undef
unreachable
bb1:
store volatile i32 1, i32 addrspace(1)* undef
unreachable
}
define amdgpu_kernel void @brcond_sgpr_trunc_and(i32 %cond0, i32 %cond1) {
; GCN-LABEL: brcond_sgpr_trunc_and:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cbranch_scc0 BB4_2
; GCN-NEXT: ; %bb.1: ; %bb0
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: flat_store_dword v[0:1], v0
; GCN-NEXT: BB4_2: ; %bb1
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: flat_store_dword v[0:1], v0
entry:
%trunc0 = trunc i32 %cond0 to i1
%trunc1 = trunc i32 %cond1 to i1
%and = and i1 %trunc0, %trunc1
br i1 %and, label %bb0, label %bb1
bb0:
store volatile i32 0, i32 addrspace(1)* undef
unreachable
bb1:
store volatile i32 1, i32 addrspace(1)* undef
unreachable
}