promotion.mlir
4.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
// RUN: mlir-opt -allow-unregistered-dialect -test-gpu-memory-promotion -pass-pipeline='gpu.module(gpu.func(test-gpu-memory-promotion))' -split-input-file %s | FileCheck %s
gpu.module @foo {
// Verify that the attribution was indeed introduced
// CHECK-LABEL: @memref3d
// CHECK-SAME: (%[[arg:.*]]: memref<5x4xf32>
// CHECK-SAME: workgroup(%[[promoted:.*]] : memref<5x4xf32, 3>)
gpu.func @memref3d(%arg0: memref<5x4xf32> {gpu.test_promote_workgroup}) kernel {
// Verify that loop bounds are emitted, the order does not matter.
// CHECK-DAG: %[[c1:.*]] = constant 1
// CHECK-DAG: %[[c4:.*]] = constant 4
// CHECK-DAG: %[[c5:.*]] = constant 5
// CHECK-DAG: %[[tx:.*]] = "gpu.thread_id"() {dimension = "x"}
// CHECK-DAG: %[[ty:.*]] = "gpu.thread_id"() {dimension = "y"}
// CHECK-DAG: %[[tz:.*]] = "gpu.thread_id"() {dimension = "z"}
// CHECK-DAG: %[[bdx:.*]] = "gpu.block_dim"() {dimension = "x"}
// CHECK-DAG: %[[bdy:.*]] = "gpu.block_dim"() {dimension = "y"}
// CHECK-DAG: %[[bdz:.*]] = "gpu.block_dim"() {dimension = "z"}
// Verify that loops for the copy are emitted. We only check the number of
// loops here since their bounds are produced by mapLoopToProcessorIds,
// tested separately.
// CHECK: scf.for %[[i0:.*]] =
// CHECK: scf.for %[[i1:.*]] =
// CHECK: scf.for %[[i2:.*]] =
// Verify that the copy is emitted and uses only the last two loops.
// CHECK: %[[v:.*]] = load %[[arg]][%[[i1]], %[[i2]]]
// CHECK: store %[[v]], %[[promoted]][%[[i1]], %[[i2]]]
// Verify that the use has been rewritten.
// CHECK: "use"(%[[promoted]]) : (memref<5x4xf32, 3>)
"use"(%arg0) : (memref<5x4xf32>) -> ()
// Verify that loops for the copy are emitted. We only check the number of
// loops here since their bounds are produced by mapLoopToProcessorIds,
// tested separately.
// CHECK: scf.for %[[i0:.*]] =
// CHECK: scf.for %[[i1:.*]] =
// CHECK: scf.for %[[i2:.*]] =
// Verify that the copy is emitted and uses only the last two loops.
// CHECK: %[[v:.*]] = load %[[promoted]][%[[i1]], %[[i2]]]
// CHECK: store %[[v]], %[[arg]][%[[i1]], %[[i2]]]
gpu.return
}
}
// -----
gpu.module @foo {
// Verify that the attribution was indeed introduced
// CHECK-LABEL: @memref5d
// CHECK-SAME: (%[[arg:.*]]: memref<8x7x6x5x4xf32>
// CHECK-SAME: workgroup(%[[promoted:.*]] : memref<8x7x6x5x4xf32, 3>)
gpu.func @memref5d(%arg0: memref<8x7x6x5x4xf32> {gpu.test_promote_workgroup}) kernel {
// Verify that loop bounds are emitted, the order does not matter.
// CHECK-DAG: %[[c0:.*]] = constant 0
// CHECK-DAG: %[[c1:.*]] = constant 1
// CHECK-DAG: %[[c4:.*]] = constant 4
// CHECK-DAG: %[[c5:.*]] = constant 5
// CHECK-DAG: %[[c6:.*]] = constant 6
// CHECK-DAG: %[[c7:.*]] = constant 7
// CHECK-DAG: %[[c8:.*]] = constant 8
// CHECK-DAG: %[[tx:.*]] = "gpu.thread_id"() {dimension = "x"}
// CHECK-DAG: %[[ty:.*]] = "gpu.thread_id"() {dimension = "y"}
// CHECK-DAG: %[[tz:.*]] = "gpu.thread_id"() {dimension = "z"}
// CHECK-DAG: %[[bdx:.*]] = "gpu.block_dim"() {dimension = "x"}
// CHECK-DAG: %[[bdy:.*]] = "gpu.block_dim"() {dimension = "y"}
// CHECK-DAG: %[[bdz:.*]] = "gpu.block_dim"() {dimension = "z"}
// Verify that loops for the copy are emitted.
// CHECK: scf.for %[[i0:.*]] =
// CHECK: scf.for %[[i1:.*]] =
// CHECK: scf.for %[[i2:.*]] =
// CHECK: scf.for %[[i3:.*]] =
// CHECK: scf.for %[[i4:.*]] =
// Verify that the copy is emitted.
// CHECK: %[[v:.*]] = load %[[arg]][%[[i0]], %[[i1]], %[[i2]], %[[i3]], %[[i4]]]
// CHECK: store %[[v]], %[[promoted]][%[[i0]], %[[i1]], %[[i2]], %[[i3]], %[[i4]]]
// Verify that the use has been rewritten.
// CHECK: "use"(%[[promoted]]) : (memref<8x7x6x5x4xf32, 3>)
"use"(%arg0) : (memref<8x7x6x5x4xf32>) -> ()
// Verify that loop loops for the copy are emitted.
// CHECK: scf.for %[[i0:.*]] =
// CHECK: scf.for %[[i1:.*]] =
// CHECK: scf.for %[[i2:.*]] =
// CHECK: scf.for %[[i3:.*]] =
// CHECK: scf.for %[[i4:.*]] =
// Verify that the copy is emitted.
// CHECK: %[[v:.*]] = load %[[promoted]][%[[i0]], %[[i1]], %[[i2]], %[[i3]], %[[i4]]]
// CHECK: store %[[v]], %[[arg]][%[[i0]], %[[i1]], %[[i2]], %[[i3]], %[[i4]]]
gpu.return
}
}
// -----
gpu.module @foo {
// Check that attribution insertion works fine.
// CHECK-LABEL: @insert
// CHECK-SAME: (%{{.*}}: memref<4xf32>
// CHECK-SAME: workgroup(%{{.*}}: memref<1x1xf64, 3>
// CHECK-SAME: %[[wg2:.*]] : memref<4xf32, 3>)
// CHECK-SAME: private(%{{.*}}: memref<1x1xi64, 5>)
gpu.func @insert(%arg0: memref<4xf32> {gpu.test_promote_workgroup})
workgroup(%arg1: memref<1x1xf64, 3>)
private(%arg2: memref<1x1xi64, 5>)
kernel {
// CHECK: "use"(%[[wg2]])
"use"(%arg0) : (memref<4xf32>) -> ()
gpu.return
}
}