sve-split-store.ll
5.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
; UNPREDICATED
define void @store_promote_4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8>* %a) {
; CHECK-LABEL: store_promote_4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
; CHECK-NEXT: ret
store <vscale x 4 x i8> %data, <vscale x 4 x i8>* %a
ret void
}
define void @store_split_i16(<vscale x 16 x i16> %data, <vscale x 16 x i16>* %a) {
; CHECK-LABEL: store_split_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl]
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
store <vscale x 16 x i16> %data, <vscale x 16 x i16>* %a
ret void
}
define void @store_split_16i32(<vscale x 16 x i32> %data, <vscale x 16 x i32>* %a) {
; CHECK-LABEL: store_split_16i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: st1w { z3.s }, p0, [x0, #3, mul vl]
; CHECK-NEXT: st1w { z2.s }, p0, [x0, #2, mul vl]
; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl]
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
store <vscale x 16 x i32> %data, <vscale x 16 x i32>* %a
ret void
}
define void @store_split_16i64(<vscale x 16 x i64> %data, <vscale x 16 x i64>* %a) {
; CHECK-LABEL: store_split_16i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: st1d { z7.d }, p0, [x0, #7, mul vl]
; CHECK-NEXT: st1d { z6.d }, p0, [x0, #6, mul vl]
; CHECK-NEXT: st1d { z5.d }, p0, [x0, #5, mul vl]
; CHECK-NEXT: st1d { z4.d }, p0, [x0, #4, mul vl]
; CHECK-NEXT: st1d { z3.d }, p0, [x0, #3, mul vl]
; CHECK-NEXT: st1d { z2.d }, p0, [x0, #2, mul vl]
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
store <vscale x 16 x i64> %data, <vscale x 16 x i64>* %a
ret void
}
; MASKED
define void @masked_store_promote_2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8> *%a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: masked_store_promote_2i8:
; CHECK: // %bb.0:
; CHECK-NEXT: st1b { z0.d }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %pg)
ret void
}
define void @masked_store_split_32i8(<vscale x 32 x i8> %data, <vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_store_split_32i8:
; CHECK: // %bb.0:
; CHECK-NEXT: st1b { z1.b }, p1, [x0, #1, mul vl]
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.masked.store.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg)
ret void
}
define void @masked_store_split_32i16(<vscale x 32 x i16> %data, <vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_store_split_32i16:
; CHECK: // %bb.0:
; CHECK-NEXT: pfalse p2.b
; CHECK-NEXT: zip2 p3.b, p1.b, p2.b
; CHECK-NEXT: zip1 p1.b, p1.b, p2.b
; CHECK-NEXT: st1h { z3.h }, p3, [x0, #3, mul vl]
; CHECK-NEXT: zip2 p3.b, p0.b, p2.b
; CHECK-NEXT: zip1 p0.b, p0.b, p2.b
; CHECK-NEXT: st1h { z2.h }, p1, [x0, #2, mul vl]
; CHECK-NEXT: st1h { z1.h }, p3, [x0, #1, mul vl]
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.masked.store.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg)
ret void
}
define void @masked_store_split_8i32(<vscale x 8 x i32> %data, <vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_store_split_8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: pfalse p1.b
; CHECK-NEXT: zip2 p2.h, p0.h, p1.h
; CHECK-NEXT: zip1 p0.h, p0.h, p1.h
; CHECK-NEXT: st1w { z1.s }, p2, [x0, #1, mul vl]
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.masked.store.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg)
ret void
}
define void @masked_store_split_8i64(<vscale x 8 x i64> %data, <vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_store_split_8i64:
; CHECK: // %bb.0:
; CHECK-NEXT: pfalse p1.b
; CHECK-NEXT: zip2 p2.h, p0.h, p1.h
; CHECK-NEXT: zip1 p0.h, p0.h, p1.h
; CHECK-NEXT: zip2 p3.s, p2.s, p1.s
; CHECK-NEXT: zip1 p2.s, p2.s, p1.s
; CHECK-NEXT: st1d { z2.d }, p2, [x0, #2, mul vl]
; CHECK-NEXT: zip2 p2.s, p0.s, p1.s
; CHECK-NEXT: zip1 p0.s, p0.s, p1.s
; CHECK-NEXT: st1d { z3.d }, p3, [x0, #3, mul vl]
; CHECK-NEXT: st1d { z1.d }, p2, [x0, #1, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
call void @llvm.masked.store.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg)
ret void
}
declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
declare void @llvm.masked.store.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32, <vscale x 32 x i1>)
declare void @llvm.masked.store.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32, <vscale x 32 x i1>)
declare void @llvm.masked.store.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32, <vscale x 8 x i1>)
declare void @llvm.masked.store.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32, <vscale x 8 x i1>)