test-vector-reductions-fp.mlir
3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
// RUN: mlir-cpu-runner %s -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
// End-to-end test of all fp reduction intrinsics (not exhaustive unit tests).
module {
llvm.func @printNewline()
llvm.func @printF32(!llvm.float)
llvm.func @entry() {
// Setup (1,2,3,4).
%0 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
%1 = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float
%2 = llvm.mlir.constant(3.000000e+00 : f32) : !llvm.float
%3 = llvm.mlir.constant(4.000000e+00 : f32) : !llvm.float
%4 = llvm.mlir.undef : !llvm.vec<4 x float>
%5 = llvm.mlir.constant(0 : index) : !llvm.i64
%6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x float>
%7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32]
: !llvm.vec<4 x float>, !llvm.vec<4 x float>
%8 = llvm.mlir.constant(1 : i64) : !llvm.i64
%9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x float>
%10 = llvm.mlir.constant(2 : i64) : !llvm.i64
%11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x float>
%12 = llvm.mlir.constant(3 : i64) : !llvm.i64
%v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x float>
%max = "llvm.intr.experimental.vector.reduce.fmax"(%v)
: (!llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%max) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 4
%min = "llvm.intr.experimental.vector.reduce.fmin"(%v)
: (!llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%min) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 1
%add1 = "llvm.intr.experimental.vector.reduce.v2.fadd"(%0, %v)
: (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%add1) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 11
%add1r = "llvm.intr.experimental.vector.reduce.v2.fadd"(%0, %v)
{reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%add1r) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 11
%add2 = "llvm.intr.experimental.vector.reduce.v2.fadd"(%1, %v)
: (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%add2) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 12
%add2r = "llvm.intr.experimental.vector.reduce.v2.fadd"(%1, %v)
{reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%add2r) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 12
%mul1 = "llvm.intr.experimental.vector.reduce.v2.fmul"(%0, %v)
: (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%mul1) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 24
%mul1r = "llvm.intr.experimental.vector.reduce.v2.fmul"(%0, %v)
{reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%mul1r) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 24
%mul2 = "llvm.intr.experimental.vector.reduce.v2.fmul"(%1, %v)
: (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%mul2) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 48
%mul2r = "llvm.intr.experimental.vector.reduce.v2.fmul"(%1, %v)
{reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
llvm.call @printF32(%mul2r) : (!llvm.float) -> ()
llvm.call @printNewline() : () -> ()
// CHECK: 48
llvm.return
}
}