arg-copy-elide-win64.ll
1.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-windows-msvc -mattr=avx2 | FileCheck %s
; Make sure we don't try to copy elide these arguments since they will be
; passed indirectly.
define void @baz(<16 x double> %arg, <16 x double> %arg1) #0 {
; CHECK-LABEL: baz:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: subq $368, %rsp # imm = 0x170
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
; CHECK-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: andq $-128, %rsp
; CHECK-NEXT: movq 288(%rbp), %rax
; CHECK-NEXT: vmovaps (%rax), %ymm0
; CHECK-NEXT: movq 296(%rbp), %rax
; CHECK-NEXT: vmovaps (%rax), %ymm1
; CHECK-NEXT: movq 304(%rbp), %rax
; CHECK-NEXT: vmovaps (%rax), %ymm2
; CHECK-NEXT: movq 312(%rbp), %rax
; CHECK-NEXT: vmovaps (%rax), %ymm3
; CHECK-NEXT: vmovaps (%rcx), %ymm4
; CHECK-NEXT: vmovaps (%rdx), %ymm5
; CHECK-NEXT: vmovaps (%r8), %ymm6
; CHECK-NEXT: vmovaps (%r9), %ymm7
; CHECK-NEXT: vmovaps %ymm7, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm6, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm5, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm4, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm3, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm2, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; CHECK-NEXT: leaq 240(%rbp), %rsp
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
bb:
%tmp = alloca <16 x double>
%tmp2 = alloca <16 x double>
store <16 x double> %arg, <16 x double>* %tmp
store <16 x double> %arg1, <16 x double>* %tmp2
ret void
}
attributes #0 = { nounwind }