byval3.ll 3.64 KB
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-linux -mattr=-avx | FileCheck %s -check-prefix=X64
; Win64 has not supported byval yet.
; RUN: llc < %s -mtriple=i686-- -mattr=-avx | FileCheck %s -check-prefix=X32

%struct.s = type { i32, i32, i32, i32, i32, i32, i32, i32,
                   i32, i32, i32, i32, i32, i32, i32, i32,
                   i32, i32, i32, i32, i32, i32, i32, i32,
                   i32, i32, i32, i32, i32, i32, i32, i32,
                   i32 }

define void @g(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) nounwind {
; X64-LABEL: g:
; X64:       # %bb.0: # %entry
; X64-NEXT:    pushq %rbx
; X64-NEXT:    subq $288, %rsp # imm = 0x120
; X64-NEXT:    movl %edi, {{[0-9]+}}(%rsp)
; X64-NEXT:    movl %esi, {{[0-9]+}}(%rsp)
; X64-NEXT:    movl %edx, {{[0-9]+}}(%rsp)
; X64-NEXT:    movl %ecx, {{[0-9]+}}(%rsp)
; X64-NEXT:    movl %r8d, {{[0-9]+}}(%rsp)
; X64-NEXT:    movl %r9d, {{[0-9]+}}(%rsp)
; X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbx
; X64-NEXT:    movl $16, %ecx
; X64-NEXT:    movq %rsp, %rdi
; X64-NEXT:    movq %rbx, %rsi
; X64-NEXT:    rep;movsq (%rsi), %es:(%rdi)
; X64-NEXT:    movl {{[0-9]+}}(%rsp), %eax
; X64-NEXT:    movl %eax, {{[0-9]+}}(%rsp)
; X64-NEXT:    callq f
; X64-NEXT:    movl $16, %ecx
; X64-NEXT:    movq %rsp, %rdi
; X64-NEXT:    movq %rbx, %rsi
; X64-NEXT:    rep;movsq (%rsi), %es:(%rdi)
; X64-NEXT:    movl {{[0-9]+}}(%rsp), %eax
; X64-NEXT:    movl %eax, {{[0-9]+}}(%rsp)
; X64-NEXT:    callq f
; X64-NEXT:    addq $288, %rsp # imm = 0x120
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X32-LABEL: g:
; X32:       # %bb.0: # %entry
; X32-NEXT:    pushl %ebp
; X32-NEXT:    movl %esp, %ebp
; X32-NEXT:    pushl %ebx
; X32-NEXT:    pushl %edi
; X32-NEXT:    pushl %esi
; X32-NEXT:    andl $-16, %esp
; X32-NEXT:    subl $288, %esp # imm = 0x120
; X32-NEXT:    movl 8(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    movl 12(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    movl 16(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    movl 20(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    movl 24(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    movl 28(%ebp), %eax
; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
; X32-NEXT:    leal {{[0-9]+}}(%esp), %ebx
; X32-NEXT:    movl $33, %ecx
; X32-NEXT:    movl %esp, %edi
; X32-NEXT:    movl %ebx, %esi
; X32-NEXT:    rep;movsl (%esi), %es:(%edi)
; X32-NEXT:    calll f
; X32-NEXT:    movl $33, %ecx
; X32-NEXT:    movl %esp, %edi
; X32-NEXT:    movl %ebx, %esi
; X32-NEXT:    rep;movsl (%esi), %es:(%edi)
; X32-NEXT:    calll f
; X32-NEXT:    leal -12(%ebp), %esp
; X32-NEXT:    popl %esi
; X32-NEXT:    popl %edi
; X32-NEXT:    popl %ebx
; X32-NEXT:    popl %ebp
; X32-NEXT:    retl
entry:
        %d = alloca %struct.s, align 16
        %tmp = getelementptr %struct.s, %struct.s* %d, i32 0, i32 0
        store i32 %a1, i32* %tmp, align 16
        %tmp2 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 1
        store i32 %a2, i32* %tmp2, align 16
        %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2
        store i32 %a3, i32* %tmp4, align 16
        %tmp6 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 3
        store i32 %a4, i32* %tmp6, align 16
        %tmp8 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 4
        store i32 %a5, i32* %tmp8, align 16
        %tmp10 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 5
        store i32 %a6, i32* %tmp10, align 16
        call void @f( %struct.s* byval %d)
        call void @f( %struct.s* byval %d)
        ret void
}

declare void @f(%struct.s* byval)