fold-immediate-operand-shrink-with-carry.mir
4.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination %s -o - | FileCheck -check-prefix=GCN %s
---
# Uses a carry out in an instruction that can't be shrunk.
name: shrink_scalar_imm_vgpr_v_add_i32_e64_other_carry_out_use
tracksRegLiveness: true
body: |
bb.0:
; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_other_carry_out_use
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY killed $vcc
; GCN: S_ENDPGM 0, implicit [[COPY]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
%4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
S_ENDPGM 0, implicit %5
...
---
name: shrink_scalar_imm_multi_use_with_used_carry
tracksRegLiveness: true
body: |
bb.0:
; GCN-LABEL: name: shrink_scalar_imm_multi_use_with_used_carry
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 [[S_MOV_B32_]], [[DEF]], 0, implicit $exec
; GCN: [[V_ADD_CO_U32_e64_2:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_3:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 [[S_MOV_B32_]], [[DEF1]], 0, implicit $exec
; GCN: S_ENDPGM 0, implicit [[V_ADD_CO_U32_e64_1]], implicit [[V_ADD_CO_U32_e64_2]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
%4:vgpr_32 = IMPLICIT_DEF
%5:vgpr_32, %6:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
%7:vgpr_32, %8:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %2, 0, implicit $exec
S_ENDPGM 0, implicit %6, implicit %7
...
---
# TODO: Is it OK to leave the broken use around on the DBG_VALUE?
name: shrink_scalar_imm_vgpr_v_add_i32_e64_dbg_only_carry_out_use
tracksRegLiveness: true
body: |
bb.0:
; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_dbg_only_carry_out_use
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
; GCN: DBG_VALUE %5:sreg_64_xexec, $noreg
; GCN: S_ENDPGM 0, implicit [[V_ADD_CO_U32_e32_]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
%4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
DBG_VALUE %5, $noreg
S_ENDPGM 0, implicit %4
...
---
# Uses carry out in a normal pattern
name: shrink_scalar_imm_vgpr_v_add_i32_e64_carry_out_use
tracksRegLiveness: true
body: |
bb.0:
; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_carry_out_use
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY killed $vcc
; GCN: [[V_ADDC_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADDC_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[DEF1]], [[DEF2]], [[COPY]], 0, implicit $exec
; GCN: S_ENDPGM 0, implicit [[V_ADDC_U32_e64_]]
%0:sreg_32_xm0 = S_MOV_B32 12345
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
%4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
%6:vgpr_32, %7:sreg_64_xexec = V_ADDC_U32_e64 %2, %3, %5, 0, implicit $exec
S_ENDPGM 0, implicit %6
...