legalize-fp-arith.mir 4.1 KB
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
---
name:            test_fadd_v2s64
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fadd_v2s64
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
    ; CHECK: $q0 = COPY [[FADD]](<2 x s64>)
    %0:_(<2 x s64>) = COPY $q0
    %1:_(<2 x s64>) = COPY $q1
    %2:_(<2 x s64>) = G_FADD %0, %1
    $q0 = COPY %2(<2 x s64>)

...
---
name:            test_fdiv_v2s32
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fdiv_v2s32
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
    ; CHECK: [[FDIV:%[0-9]+]]:_(<2 x s32>) = G_FDIV [[COPY]], [[COPY1]]
    ; CHECK: $d0 = COPY [[FDIV]](<2 x s32>)
    %0:_(<2 x s32>) = COPY $d0
    %1:_(<2 x s32>) = COPY $d1
    %2:_(<2 x s32>) = G_FDIV %0, %1
    $d0 = COPY %2(<2 x s32>)

...
---
name:            test_fsub_v2s32
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fsub_v2s32
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
    ; CHECK: [[FSUB:%[0-9]+]]:_(<2 x s32>) = G_FSUB [[COPY]], [[COPY1]]
    ; CHECK: $d0 = COPY [[FSUB]](<2 x s32>)
    %0:_(<2 x s32>) = COPY $d0
    %1:_(<2 x s32>) = COPY $d1
    %2:_(<2 x s32>) = G_FSUB %0, %1
    $d0 = COPY %2(<2 x s32>)

...
---
name:            test_fneg_v2s32
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fneg_v2s32
    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
    ; CHECK: [[FNEG:%[0-9]+]]:_(<2 x s32>) = G_FNEG [[COPY]]
    ; CHECK: $d0 = COPY [[FNEG]](<2 x s32>)
    %0:_(<2 x s32>) = COPY $d0
    %1:_(<2 x s32>) = G_FNEG %0
    $d0 = COPY %1(<2 x s32>)

...
---
name:            test_fmul_v4s32
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fmul_v4s32
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
    ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[COPY]], [[COPY1]]
    ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>)
    %0:_(<4 x s32>) = COPY $q0
    %1:_(<4 x s32>) = COPY $q1
    %2:_(<4 x s32>) = G_FMUL %0, %1
    $q0 = COPY %2(<4 x s32>)

...
---
name:            test_fmul_v4s64
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fmul_v4s64
    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
    ; CHECK: [[FMUL:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
    ; CHECK: [[FMUL1:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
    ; CHECK: $q0 = COPY [[FMUL]](<2 x s64>)
    ; CHECK: $q1 = COPY [[FMUL1]](<2 x s64>)
    %0:_(<4 x s64>) = G_IMPLICIT_DEF
    %1:_(<4 x s64>) = G_IMPLICIT_DEF
    %2:_(<4 x s64>) = G_FMUL %0, %1
    %uv1:_(<2 x s64>), %uv2:_(<2 x s64>) = G_UNMERGE_VALUES %2
    $q0 = COPY %uv1(<2 x s64>)
    $q1 = COPY %uv2(<2 x s64>)

...
---
name:            test_fmul_v8s32
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: test_fmul_v8s32
    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
    ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
    ; CHECK: [[FMUL1:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
    ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>)
    ; CHECK: $q1 = COPY [[FMUL1]](<4 x s32>)
    %0:_(<8 x s32>) = G_IMPLICIT_DEF
    %1:_(<8 x s32>) = G_IMPLICIT_DEF
    %2:_(<8 x s32>) = G_FMUL %0, %1
    %uv1:_(<4 x s32>), %uv2:_(<4 x s32>) = G_UNMERGE_VALUES %2
    $q0 = COPY %uv1(<4 x s32>)
    $q1 = COPY %uv2(<4 x s32>)
...