var_arg.mir
5.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
--- |
@.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
declare void @llvm.va_start(i8*) #0
declare void @llvm.va_copy(i8*, i8*) #0
declare i32 @printf(i8*, ...)
define void @testVaCopyArg(i8* %fmt, ...) {
entry:
%fmt.addr = alloca i8*, align 4
%ap = alloca i8*, align 4
%aq = alloca i8*, align 4
%s = alloca i8*, align 4
store i8* %fmt, i8** %fmt.addr, align 4
%ap1 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap1)
%0 = bitcast i8** %aq to i8*
%1 = bitcast i8** %ap to i8*
call void @llvm.va_copy(i8* %0, i8* %1)
%argp.cur = load i8*, i8** %aq, align 4
%argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
store i8* %argp.next, i8** %aq, align 4
%2 = bitcast i8* %argp.cur to i8**
%3 = load i8*, i8** %2, align 4
store i8* %3, i8** %s, align 4
%4 = load i8*, i8** %s, align 4
%call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
ret void
}
...
---
name: testVaCopyArg
alignment: 4
tracksRegLiveness: true
liveins:
- { reg: '$a0' }
fixedStack:
- { id: 0, offset: 12, size: 4, alignment: 4, isImmutable: true }
- { id: 1, offset: 8, size: 4, alignment: 8, isImmutable: true }
- { id: 2, offset: 4, size: 4, alignment: 4, isImmutable: true }
- { id: 3, offset: 4, size: 4, alignment: 4, isImmutable: true }
stack:
- { id: 0, name: fmt.addr, size: 4, alignment: 4 }
- { id: 1, name: ap, size: 4, alignment: 4 }
- { id: 2, name: aq, size: 4, alignment: 4 }
- { id: 3, name: s, size: 4, alignment: 4 }
machineFunctionInfo: {}
body: |
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
; MIPS32-LABEL: name: testVaCopyArg
; MIPS32: liveins: $a0, $a1, $a2, $a3
; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; MIPS32: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store 4 into %fixed-stack.1)
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; MIPS32: G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %fixed-stack.2)
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
; MIPS32: G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store 4 into %fixed-stack.3)
; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @.str
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY [[GV]](p0)
; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.fmt.addr
; MIPS32: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.ap
; MIPS32: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2.aq
; MIPS32: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3.s
; MIPS32: G_STORE [[COPY]](p0), [[FRAME_INDEX3]](p0) :: (store 4 into %ir.fmt.addr)
; MIPS32: G_VASTART [[FRAME_INDEX4]](p0) :: (store 4 into %ir.ap1, align 1)
; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX4]](p0) :: (load 4)
; MIPS32: G_STORE [[LOAD]](p0), [[FRAME_INDEX5]](p0) :: (store 4)
; MIPS32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX5]](p0) :: (load 4 from %ir.aq)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD1]], [[C]](s32)
; MIPS32: G_STORE [[GEP]](p0), [[FRAME_INDEX5]](p0) :: (store 4 into %ir.aq)
; MIPS32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.2)
; MIPS32: G_STORE [[LOAD2]](p0), [[FRAME_INDEX6]](p0) :: (store 4 into %ir.s)
; MIPS32: [[LOAD3:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX6]](p0) :: (load 4 from %ir.s)
; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
; MIPS32: $a0 = COPY [[COPY4]](p0)
; MIPS32: $a1 = COPY [[LOAD3]](p0)
; MIPS32: JAL @printf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $v0
; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
; MIPS32: RetRA
%0:_(p0) = COPY $a0
%1:_(s32) = COPY $a1
%2:_(p0) = G_FRAME_INDEX %fixed-stack.2
G_STORE %1(s32), %2(p0) :: (store 4 into %fixed-stack.2)
%3:_(s32) = COPY $a2
%4:_(p0) = G_FRAME_INDEX %fixed-stack.1
G_STORE %3(s32), %4(p0) :: (store 4 into %fixed-stack.1)
%5:_(s32) = COPY $a3
%6:_(p0) = G_FRAME_INDEX %fixed-stack.0
G_STORE %5(s32), %6(p0) :: (store 4 into %fixed-stack.0)
%18:_(p0) = G_GLOBAL_VALUE @.str
%17:_(p0) = COPY %18(p0)
%7:_(p0) = G_FRAME_INDEX %stack.0.fmt.addr
%8:_(p0) = G_FRAME_INDEX %stack.1.ap
%9:_(p0) = G_FRAME_INDEX %stack.2.aq
%10:_(p0) = G_FRAME_INDEX %stack.3.s
G_STORE %0(p0), %7(p0) :: (store 4 into %ir.fmt.addr)
G_VASTART %8(p0) :: (store 4 into %ir.ap1, align 1)
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), %9(p0), %8(p0)
%11:_(p0) = G_LOAD %9(p0) :: (load 4 from %ir.aq)
%12:_(s32) = G_CONSTANT i32 4
%13:_(p0) = G_PTR_ADD %11, %12(s32)
G_STORE %13(p0), %9(p0) :: (store 4 into %ir.aq)
%14:_(p0) = G_LOAD %11(p0) :: (load 4 from %ir.2)
G_STORE %14(p0), %10(p0) :: (store 4 into %ir.s)
%15:_(p0) = G_LOAD %10(p0) :: (load 4 from %ir.s)
ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
$a0 = COPY %17(p0)
$a1 = COPY %15(p0)
JAL @printf, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $v0
ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
RetRA
...