arm64_32.ll
21.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
; RUN: llc -mtriple=arm64_32-apple-ios7.0 %s -filetype=obj -o - -disable-post-ra -frame-pointer=non-leaf | \
; RUN: llvm-objdump --private-headers - | \
; RUN: FileCheck %s --check-prefix=CHECK-MACHO
; RUN: llc -mtriple=arm64_32-apple-ios7.0 %s -o - -aarch64-enable-atomic-cfg-tidy=0 -disable-post-ra -frame-pointer=non-leaf | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-OPT
; RUN: llc -mtriple=arm64_32-apple-ios7.0 %s -o - -fast-isel -aarch64-enable-atomic-cfg-tidy=0 -disable-post-ra -frame-pointer=non-leaf | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FAST
; CHECK-MACHO: Mach header
; CHECK-MACHO: MH_MAGIC ARM64_32 V8
@var64 = global i64 zeroinitializer, align 8
@var32 = global i32 zeroinitializer, align 4
@var_got = external global i8
define i32* @test_global_addr() {
; CHECK-LABEL: test_global_addr:
; CHECK: adrp [[PAGE:x[0-9]+]], _var32@PAGE
; CHECK-OPT: add x0, [[PAGE]], _var32@PAGEOFF
; CHECK-FAST: add [[TMP:x[0-9]+]], [[PAGE]], _var32@PAGEOFF
; CHECK-FAST: and x0, [[TMP]], #0xffffffff
ret i32* @var32
}
; ADRP is necessarily 64-bit. The important point to check is that, however that
; gets truncated to 32-bits, it's free. No need to zero out higher bits of that
; register.
define i64 @test_global_addr_extension() {
; CHECK-LABEL: test_global_addr_extension:
; CHECK: adrp [[PAGE:x[0-9]+]], _var32@PAGE
; CHECK: add x0, [[PAGE]], _var32@PAGEOFF
; CHECK-NOT: and
; CHECK: ret
ret i64 ptrtoint(i32* @var32 to i64)
}
define i32 @test_global_value() {
; CHECK-LABEL: test_global_value:
; CHECK: adrp x[[PAGE:[0-9]+]], _var32@PAGE
; CHECK: ldr w0, [x[[PAGE]], _var32@PAGEOFF]
%val = load i32, i32* @var32, align 4
ret i32 %val
}
; Because the addition may wrap, it is not safe to use "ldr w0, [xN, #32]" here.
define i32 @test_unsafe_indexed_add() {
; CHECK-LABEL: test_unsafe_indexed_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_plus_32 = add i32 %addr_int, 32
%addr = inttoptr i32 %addr_plus_32 to i32*
%val = load i32, i32* %addr, align 4
ret i32 %val
}
; Since we've promised there is no unsigned overflow, @var32 must be at least
; 32-bytes below 2^32, and we can use the load this time.
define i32 @test_safe_indexed_add() {
; CHECK-LABEL: test_safe_indexed_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i64
%addr_plus_32 = add nuw i64 %addr_int, 32
%addr = inttoptr i64 %addr_plus_32 to i32*
%val = load i32, i32* %addr, align 4
ret i32 %val
}
define i32 @test_safe_indexed_or(i32 %in) {
; CHECK-LABEL: test_safe_indexed_or:
; CHECK: and [[TMP:w[0-9]+]], {{w[0-9]+}}, #0xfffffff0
; CHECK: orr w[[ADDR:[0-9]+]], [[TMP]], #0x4
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = and i32 %in, -16
%addr_plus_4 = or i32 %addr_int, 4
%addr = inttoptr i32 %addr_plus_4 to i32*
%val = load i32, i32* %addr, align 4
ret i32 %val
}
; Promising nsw is not sufficient because the addressing mode basically
; calculates "zext(base) + zext(offset)" and nsw only guarantees
; "sext(base) + sext(offset) == base + offset".
define i32 @test_unsafe_nsw_indexed_add() {
; CHECK-LABEL: test_unsafe_nsw_indexed_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
; CHECK-NOT: ubfx
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_plus_32 = add nsw i32 %addr_int, 32
%addr = inttoptr i32 %addr_plus_32 to i32*
%val = load i32, i32* %addr, align 4
ret i32 %val
}
; Because the addition may wrap, it is not safe to use "ldr w0, [xN, #32]" here.
define i32 @test_unsafe_unscaled_add() {
; CHECK-LABEL: test_unsafe_unscaled_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_plus_3 = add i32 %addr_int, 3
%addr = inttoptr i32 %addr_plus_3 to i32*
%val = load i32, i32* %addr, align 1
ret i32 %val
}
; Since we've promised there is no unsigned overflow, @var32 must be at least
; 32-bytes below 2^32, and we can use the load this time.
define i32 @test_safe_unscaled_add() {
; CHECK-LABEL: test_safe_unscaled_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_plus_3 = add nuw i32 %addr_int, 3
%addr = inttoptr i32 %addr_plus_3 to i32*
%val = load i32, i32* %addr, align 1
ret i32 %val
}
; Promising nsw is not sufficient because the addressing mode basically
; calculates "zext(base) + zext(offset)" and nsw only guarantees
; "sext(base) + sext(offset) == base + offset".
define i32 @test_unsafe_nsw_unscaled_add() {
; CHECK-LABEL: test_unsafe_nsw_unscaled_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
; CHECK-NOT: ubfx
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_plus_3 = add nsw i32 %addr_int, 3
%addr = inttoptr i32 %addr_plus_3 to i32*
%val = load i32, i32* %addr, align 1
ret i32 %val
}
; Because the addition may wrap, it is not safe to use "ldur w0, [xN, #-3]"
; here.
define i32 @test_unsafe_negative_unscaled_add() {
; CHECK-LABEL: test_unsafe_negative_unscaled_add:
; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32@PAGEOFF
; CHECK: sub w[[ADDR:[0-9]+]], w[[VAR32]], #3
; CHECK: ldr w0, [x[[ADDR]]]
%addr_int = ptrtoint i32* @var32 to i32
%addr_minus_3 = add i32 %addr_int, -3
%addr = inttoptr i32 %addr_minus_3 to i32*
%val = load i32, i32* %addr, align 1
ret i32 %val
}
define i8* @test_got_addr() {
; CHECK-LABEL: test_got_addr:
; CHECK: adrp x[[PAGE:[0-9]+]], _var_got@GOTPAGE
; CHECK-OPT: ldr w0, [x[[PAGE]], _var_got@GOTPAGEOFF]
; CHECK-FAST: ldr w[[TMP:[0-9]+]], [x[[PAGE]], _var_got@GOTPAGEOFF]
; CHECK-FAST: and x0, x[[TMP]], #0xffffffff
ret i8* @var_got
}
define float @test_va_arg_f32(i8** %list) {
; CHECK-LABEL: test_va_arg_f32:
; CHECK: ldr w[[START:[0-9]+]], [x0]
; CHECK: add [[AFTER:w[0-9]+]], w[[START]], #8
; CHECK: str [[AFTER]], [x0]
; Floating point arguments get promoted to double as per C99.
; CHECK: ldr [[DBL:d[0-9]+]], [x[[START]]]
; CHECK: fcvt s0, [[DBL]]
%res = va_arg i8** %list, float
ret float %res
}
; Interesting point is that the slot is 4 bytes.
define i8 @test_va_arg_i8(i8** %list) {
; CHECK-LABEL: test_va_arg_i8:
; CHECK: ldr w[[START:[0-9]+]], [x0]
; CHECK: add [[AFTER:w[0-9]+]], w[[START]], #4
; CHECK: str [[AFTER]], [x0]
; i8 gets promoted to int (again, as per C99).
; CHECK: ldr w0, [x[[START]]]
%res = va_arg i8** %list, i8
ret i8 %res
}
; Interesting point is that the slot needs aligning (again, min size is 4
; bytes).
define i64 @test_va_arg_i64(i64** %list) {
; CHECK-LABEL: test_va_arg_i64:
; Update the list for the next user (minimum slot size is 4, but the actual
; argument is 8 which had better be reflected!)
; CHECK: ldr w[[UNALIGNED_START:[0-9]+]], [x0]
; CHECK: add [[ALIGN_TMP:x[0-9]+]], x[[UNALIGNED_START]], #7
; CHECK: and x[[START:[0-9]+]], [[ALIGN_TMP]], #0x1fffffff8
; CHECK: add w[[AFTER:[0-9]+]], w[[START]], #8
; CHECK: str w[[AFTER]], [x0]
; CHECK: ldr x0, [x[[START]]]
%res = va_arg i64** %list, i64
ret i64 %res
}
declare void @bar(...)
define void @test_va_call(i8 %l, i8 %r, float %in, i8* %ptr) {
; CHECK-LABEL: test_va_call:
; CHECK: add [[SUM:w[0-9]+]], {{w[0-9]+}}, w1
; CHECK-DAG: str w2, [sp, #32]
; CHECK-DAG: str xzr, [sp, #24]
; CHECK-DAG: str s0, [sp, #16]
; CHECK-DAG: str xzr, [sp, #8]
; CHECK-DAG: str [[SUM]], [sp]
; Add them to ensure real promotion occurs.
%sum = add i8 %l, %r
call void(...) @bar(i8 %sum, i64 0, float %in, double 0.0, i8* %ptr)
ret void
}
declare i8* @llvm.frameaddress(i32)
define i8* @test_frameaddr() {
; CHECK-LABEL: test_frameaddr:
; CHECK-OPT: ldr x0, [x29]
; CHECK-FAST: ldr [[TMP:x[0-9]+]], [x29]
; CHECK-FAST: and x0, [[TMP]], #0xffffffff
%val = call i8* @llvm.frameaddress(i32 1)
ret i8* %val
}
declare i8* @llvm.returnaddress(i32)
define i8* @test_toplevel_returnaddr() {
; CHECK-LABEL: test_toplevel_returnaddr:
; CHECK-OPT: mov x0, x30
; CHECK-FAST: and x0, x30, #0xffffffff
%val = call i8* @llvm.returnaddress(i32 0)
ret i8* %val
}
define i8* @test_deep_returnaddr() {
; CHECK-LABEL: test_deep_returnaddr:
; CHECK: ldr x[[FRAME_REC:[0-9]+]], [x29]
; CHECK-OPT: ldr x30, [x[[FRAME_REC]], #8]
; CHECK-OPT: hint #7
; CHECK-OPT: mov x0, x30
; CHECK-FAST: ldr [[TMP:x[0-9]+]], [x[[FRAME_REC]], #8]
; CHECK-FAST: and x0, [[TMP]], #0xffffffff
%val = call i8* @llvm.returnaddress(i32 1)
ret i8* %val
}
define void @test_indirect_call(void()* %func) {
; CHECK-LABEL: test_indirect_call:
; CHECK: blr x0
call void() %func()
ret void
}
; Safe to use the unextended address here
define void @test_indirect_safe_call(i32* %weird_funcs) {
; CHECK-LABEL: test_indirect_safe_call:
; CHECK: add w[[ADDR32:[0-9]+]], w0, #4
; CHECK-OPT-NOT: ubfx
; CHECK: blr x[[ADDR32]]
%addr = getelementptr i32, i32* %weird_funcs, i32 1
%func = bitcast i32* %addr to void()*
call void() %func()
ret void
}
declare void @simple()
define void @test_simple_tail_call() {
; CHECK-LABEL: test_simple_tail_call:
; CHECK: b _simple
tail call void @simple()
ret void
}
define void @test_indirect_tail_call(void()* %func) {
; CHECK-LABEL: test_indirect_tail_call:
; CHECK: br x0
tail call void() %func()
ret void
}
; Safe to use the unextended address here
define void @test_indirect_safe_tail_call(i32* %weird_funcs) {
; CHECK-LABEL: test_indirect_safe_tail_call:
; CHECK: add w[[ADDR32:[0-9]+]], w0, #4
; CHECK-OPT-NOT: ubfx
; CHECK-OPT: br x[[ADDR32]]
%addr = getelementptr i32, i32* %weird_funcs, i32 1
%func = bitcast i32* %addr to void()*
tail call void() %func()
ret void
}
; For the "armv7k" slice, Clang will be emitting some small structs as [N x
; i32]. For ABI compatibility with arm64_32 these need to be passed in *X*
; registers (e.g. [2 x i32] would be packed into a single register).
define i32 @test_in_smallstruct_low([3 x i32] %in) {
; CHECK-LABEL: test_in_smallstruct_low:
; CHECK: mov x0, x1
%val = extractvalue [3 x i32] %in, 2
ret i32 %val
}
define i32 @test_in_smallstruct_high([3 x i32] %in) {
; CHECK-LABEL: test_in_smallstruct_high:
; CHECK: lsr x0, x0, #32
%val = extractvalue [3 x i32] %in, 1
ret i32 %val
}
; The 64-bit DarwinPCS ABI has the quirk that structs on the stack are always
; 64-bit aligned. This must not happen for arm64_32 since othwerwise va_arg will
; be incompatible with the armv7k ABI.
define i32 @test_in_smallstruct_stack([8 x i64], i32, [3 x i32] %in) {
; CHECK-LABEL: test_in_smallstruct_stack:
; CHECK: ldr w0, [sp, #4]
%val = extractvalue [3 x i32] %in, 0
ret i32 %val
}
define [2 x i32] @test_ret_smallstruct([3 x i32] %in) {
; CHECK-LABEL: test_ret_smallstruct:
; CHECK: mov x0, #1
; CHECK: movk x0, #2, lsl #32
ret [2 x i32] [i32 1, i32 2]
}
declare void @smallstruct_callee([4 x i32])
define void @test_call_smallstruct() {
; CHECK-LABEL: test_call_smallstruct:
; CHECK: mov x0, #1
; CHECK: movk x0, #2, lsl #32
; CHECK: mov x1, #3
; CHECK: movk x1, #4, lsl #32
; CHECK: bl _smallstruct_callee
call void @smallstruct_callee([4 x i32] [i32 1, i32 2, i32 3, i32 4])
ret void
}
declare void @smallstruct_callee_stack([8 x i64], i32, [2 x i32])
define void @test_call_smallstruct_stack() {
; CHECK-LABEL: test_call_smallstruct_stack:
; CHECK: mov [[VAL:x[0-9]+]], #1
; CHECK: movk [[VAL]], #2, lsl #32
; CHECK: stur [[VAL]], [sp, #4]
call void @smallstruct_callee_stack([8 x i64] undef, i32 undef, [2 x i32] [i32 1, i32 2])
ret void
}
declare [3 x i32] @returns_smallstruct()
define i32 @test_use_smallstruct_low() {
; CHECK-LABEL: test_use_smallstruct_low:
; CHECK: bl _returns_smallstruct
; CHECK: mov x0, x1
%struct = call [3 x i32] @returns_smallstruct()
%val = extractvalue [3 x i32] %struct, 2
ret i32 %val
}
define i32 @test_use_smallstruct_high() {
; CHECK-LABEL: test_use_smallstruct_high:
; CHECK: bl _returns_smallstruct
; CHECK: lsr x0, x0, #32
%struct = call [3 x i32] @returns_smallstruct()
%val = extractvalue [3 x i32] %struct, 1
ret i32 %val
}
; If a small struct can't be allocated to x0-x7, the remaining registers should
; be marked as unavailable and subsequent GPR arguments should also be on the
; stack. Obviously the struct itself should be passed entirely on the stack.
define i32 @test_smallstruct_padding([7 x i64], [4 x i32] %struct, i32 %in) {
; CHECK-LABEL: test_smallstruct_padding:
; CHECK-DAG: ldr [[IN:w[0-9]+]], [sp, #16]
; CHECK-DAG: ldr [[LHS:w[0-9]+]], [sp]
; CHECK: add w0, [[LHS]], [[IN]]
%lhs = extractvalue [4 x i32] %struct, 0
%sum = add i32 %lhs, %in
ret i32 %sum
}
declare void @take_small_smallstruct(i64, [1 x i32])
define void @test_small_smallstruct() {
; CHECK-LABEL: test_small_smallstruct:
; CHECK-DAG: mov w0, #1
; CHECK-DAG: mov w1, #2
; CHECK: bl _take_small_smallstruct
call void @take_small_smallstruct(i64 1, [1 x i32] [i32 2])
ret void
}
define void @test_bare_frameaddr(i8** %addr) {
; CHECK-LABEL: test_bare_frameaddr:
; CHECK: add x[[LOCAL:[0-9]+]], sp, #{{[0-9]+}}
; CHECK: str w[[LOCAL]],
%ptr = alloca i8
store i8* %ptr, i8** %addr, align 4
ret void
}
define void @test_sret_use([8 x i64]* sret %out) {
; CHECK-LABEL: test_sret_use:
; CHECK: str xzr, [x8]
%addr = getelementptr [8 x i64], [8 x i64]* %out, i32 0, i32 0
store i64 0, i64* %addr
ret void
}
define i64 @test_sret_call() {
; CHECK-LABEL: test_sret_call:
; CHECK: mov x8, sp
; CHECK: bl _test_sret_use
%arr = alloca [8 x i64]
call void @test_sret_use([8 x i64]* sret %arr)
%addr = getelementptr [8 x i64], [8 x i64]* %arr, i32 0, i32 0
%val = load i64, i64* %addr
ret i64 %val
}
define double @test_constpool() {
; CHECK-LABEL: test_constpool:
; CHECK: adrp x[[PAGE:[0-9]+]], [[POOL:lCPI[0-9]+_[0-9]+]]@PAGE
; CHECK: ldr d0, [x[[PAGE]], [[POOL]]@PAGEOFF]
ret double 1.0e-6
}
define i8* @test_blockaddress() {
; CHECK-LABEL: test_blockaddress:
; CHECK: [[BLOCK:Ltmp[0-9]+]]:
; CHECK: adrp [[PAGE:x[0-9]+]], [[BLOCK]]@PAGE
; CHECK: add x0, [[PAGE]], [[BLOCK]]@PAGEOFF
br label %dest
dest:
ret i8* blockaddress(@test_blockaddress, %dest)
}
define i8* @test_indirectbr(i8* %dest) {
; CHECK-LABEL: test_indirectbr:
; CHECK: br x0
indirectbr i8* %dest, [label %true, label %false]
true:
ret i8* blockaddress(@test_indirectbr, %true)
false:
ret i8* blockaddress(@test_indirectbr, %false)
}
; ISelDAGToDAG tries to fold an offset FI load (in this case var+4) into the
; actual load instruction. This needs to be done slightly carefully since we
; claim the FI in the process -- it doesn't need extending.
define float @test_frameindex_offset_load() {
; CHECK-LABEL: test_frameindex_offset_load:
; CHECK: ldr s0, [sp, #4]
%arr = alloca float, i32 4, align 8
%addr = getelementptr inbounds float, float* %arr, i32 1
%val = load float, float* %addr, align 4
ret float %val
}
define void @test_unaligned_frameindex_offset_store() {
; CHECK-LABEL: test_unaligned_frameindex_offset_store:
; CHECK: mov x[[TMP:[0-9]+]], sp
; CHECK: orr w[[ADDR:[0-9]+]], w[[TMP]], #0x2
; CHECK: mov [[VAL:w[0-9]+]], #42
; CHECK: str [[VAL]], [x[[ADDR]]]
%arr = alloca [4 x i32]
%addr.int = ptrtoint [4 x i32]* %arr to i32
%addr.nextint = add nuw i32 %addr.int, 2
%addr.next = inttoptr i32 %addr.nextint to i32*
store i32 42, i32* %addr.next
ret void
}
define {i64, i64*} @test_pre_idx(i64* %addr) {
; CHECK-LABEL: test_pre_idx:
; CHECK: add w[[ADDR:[0-9]+]], w0, #8
; CHECK: ldr x0, [x[[ADDR]]]
%addr.int = ptrtoint i64* %addr to i32
%addr.next.int = add nuw i32 %addr.int, 8
%addr.next = inttoptr i32 %addr.next.int to i64*
%val = load i64, i64* %addr.next
%tmp = insertvalue {i64, i64*} undef, i64 %val, 0
%res = insertvalue {i64, i64*} %tmp, i64* %addr.next, 1
ret {i64, i64*} %res
}
; Forming a post-indexed load is invalid here since the GEP needs to work when
; %addr wraps round to 0.
define {i64, i64*} @test_invalid_pre_idx(i64* %addr) {
; CHECK-LABEL: test_invalid_pre_idx:
; CHECK: add w1, w0, #8
; CHECK: ldr x0, [x1]
%addr.next = getelementptr i64, i64* %addr, i32 1
%val = load i64, i64* %addr.next
%tmp = insertvalue {i64, i64*} undef, i64 %val, 0
%res = insertvalue {i64, i64*} %tmp, i64* %addr.next, 1
ret {i64, i64*} %res
}
declare void @callee([8 x i32]*)
define void @test_stack_guard() ssp {
; CHECK-LABEL: test_stack_guard:
; CHECK: adrp x[[GUARD_GOTPAGE:[0-9]+]], ___stack_chk_guard@GOTPAGE
; CHECK: ldr w[[GUARD_ADDR:[0-9]+]], [x[[GUARD_GOTPAGE]], ___stack_chk_guard@GOTPAGEOFF]
; CHECK: ldr [[GUARD_VAL:w[0-9]+]], [x[[GUARD_ADDR]]]
; CHECK: stur [[GUARD_VAL]], [x29, #[[GUARD_OFFSET:-[0-9]+]]]
; CHECK: add x0, sp, #{{[0-9]+}}
; CHECK: bl _callee
; CHECK-OPT: adrp x[[GUARD_GOTPAGE:[0-9]+]], ___stack_chk_guard@GOTPAGE
; CHECK-OPT: ldr w[[GUARD_ADDR:[0-9]+]], [x[[GUARD_GOTPAGE]], ___stack_chk_guard@GOTPAGEOFF]
; CHECK-OPT: ldr [[GUARD_VAL:w[0-9]+]], [x[[GUARD_ADDR]]]
; CHECK-OPT: ldur [[NEW_VAL:w[0-9]+]], [x29, #[[GUARD_OFFSET]]]
; CHECK-OPT: cmp [[GUARD_VAL]], [[NEW_VAL]]
; CHECK-OPT: b.ne [[FAIL:LBB[0-9]+_[0-9]+]]
; CHECK-OPT: [[FAIL]]:
; CHECK-OPT-NEXT: bl ___stack_chk_fail
%arr = alloca [8 x i32]
call void @callee([8 x i32]* %arr)
ret void
}
declare i32 @__gxx_personality_v0(...)
declare void @eat_landingpad_args(i32, i8*, i32)
@_ZTI8Whatever = external global i8
define void @test_landingpad_marshalling() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
; CHECK-LABEL: test_landingpad_marshalling:
; CHECK-OPT: mov x2, x1
; CHECK-OPT: mov x1, x0
; CHECK: bl _eat_landingpad_args
invoke void @callee([8 x i32]* undef) to label %done unwind label %lpad
lpad: ; preds = %entry
%exc = landingpad { i8*, i32 }
catch i8* @_ZTI8Whatever
%pointer = extractvalue { i8*, i32 } %exc, 0
%selector = extractvalue { i8*, i32 } %exc, 1
call void @eat_landingpad_args(i32 undef, i8* %pointer, i32 %selector)
ret void
done:
ret void
}
define void @test_dynamic_stackalloc() {
; CHECK-LABEL: test_dynamic_stackalloc:
; CHECK: sub [[REG:x[0-9]+]], sp, #32
; CHECK: mov sp, [[REG]]
; CHECK-OPT-NOT: ubfx
; CHECK: bl _callee
br label %next
next:
%val = alloca [8 x i32]
call void @callee([8 x i32]* %val)
ret void
}
define void @test_asm_memory(i32* %base.addr) {
; CHECK-LABEL: test_asm_memory:
; CHECK: add w[[ADDR:[0-9]+]], w0, #4
; CHECK: str wzr, [x[[ADDR]]
%addr = getelementptr i32, i32* %base.addr, i32 1
call void asm sideeffect "str wzr, $0", "*m"(i32* %addr)
ret void
}
define void @test_unsafe_asm_memory(i64 %val) {
; CHECK-LABEL: test_unsafe_asm_memory:
; CHECK: and x[[ADDR:[0-9]+]], x0, #0xffffffff
; CHECK: str wzr, [x[[ADDR]]]
%addr_int = trunc i64 %val to i32
%addr = inttoptr i32 %addr_int to i32*
call void asm sideeffect "str wzr, $0", "*m"(i32* %addr)
ret void
}
define [9 x i8*] @test_demoted_return(i8* %in) {
; CHECK-LABEL: test_demoted_return:
; CHECK: str w0, [x8, #32]
%res = insertvalue [9 x i8*] undef, i8* %in, 8
ret [9 x i8*] %res
}
define i8* @test_inttoptr(i64 %in) {
; CHECK-LABEL: test_inttoptr:
; CHECK: and x0, x0, #0xffffffff
%res = inttoptr i64 %in to i8*
ret i8* %res
}
declare i32 @llvm.get.dynamic.area.offset.i32()
define i32 @test_dynamic_area() {
; CHECK-LABEL: test_dynamic_area:
; CHECK: mov w0, wzr
%res = call i32 @llvm.get.dynamic.area.offset.i32()
ret i32 %res
}
define void @test_pointer_vec_store(<2 x i8*>* %addr) {
; CHECK-LABEL: test_pointer_vec_store:
; CHECK: str xzr, [x0]
; CHECK-NOT: str
; CHECK-NOT: stp
store <2 x i8*> zeroinitializer, <2 x i8*>* %addr, align 16
ret void
}
define <2 x i8*> @test_pointer_vec_load(<2 x i8*>* %addr) {
; CHECK-LABEL: test_pointer_vec_load:
; CHECK: ldr d[[TMP:[0-9]+]], [x0]
; CHECK: ushll.2d v0, v[[TMP]], #0
%val = load <2 x i8*>, <2 x i8*>* %addr, align 16
ret <2 x i8*> %val
}
define void @test_inline_asm_mem_pointer(i32* %in) {
; CHECK-LABEL: test_inline_asm_mem_pointer:
; CHECK: str w0,
tail call void asm sideeffect "ldr x0, $0", "rm"(i32* %in)
ret void
}
define void @test_struct_hi(i32 %hi) nounwind {
; CHECK-LABEL: test_struct_hi:
; CHECK: mov w[[IN:[0-9]+]], w0
; CHECK: bl _get_int
; CHECK-FAST-NEXT: mov w0, w0
; CHECK-NEXT: bfi x0, x[[IN]], #32, #32
; CHECK-NEXT: bl _take_pair
%val.64 = call i64 @get_int()
%val.32 = trunc i64 %val.64 to i32
%pair.0 = insertvalue [2 x i32] undef, i32 %val.32, 0
%pair.1 = insertvalue [2 x i32] %pair.0, i32 %hi, 1
call void @take_pair([2 x i32] %pair.1)
ret void
}
declare void @take_pair([2 x i32])
declare i64 @get_int()
define i1 @test_icmp_ptr(i8* %in) {
; CHECK-LABEL: test_icmp_ptr
; CHECK: ubfx x0, x0, #31, #1
%res = icmp slt i8* %in, null
ret i1 %res
}
define void @test_multiple_icmp_ptr(i8* %l, i8* %r) {
; CHECK-LABEL: test_multiple_icmp_ptr:
; CHECK: tbnz w0, #31, [[FALSEBB:LBB[0-9]+_[0-9]+]]
; CHECK: tbnz w1, #31, [[FALSEBB]]
%tst1 = icmp sgt i8* %l, inttoptr (i32 -1 to i8*)
%tst2 = icmp sgt i8* %r, inttoptr (i32 -1 to i8*)
%tst = and i1 %tst1, %tst2
br i1 %tst, label %true, label %false
true:
call void(...) @bar()
ret void
false:
ret void
}
define { [18 x i8] }* @test_gep_nonpow2({ [18 x i8] }* %a0, i32 %a1) {
; CHECK-LABEL: test_gep_nonpow2:
; CHECK-OPT: mov w[[SIZE:[0-9]+]], #18
; CHECK-OPT-NEXT: smaddl x0, w1, w[[SIZE]], x0
; CHECK-OPT-NEXT: ret
; CHECK-FAST: mov w[[SIZE:[0-9]+]], #18
; CHECK-FAST-NEXT: smaddl [[TMP:x[0-9]+]], w1, w[[SIZE]], x0
; CHECK-FAST-NEXT: and x0, [[TMP]], #0xffffffff
; CHECK-FAST-NEXT: ret
%tmp0 = getelementptr inbounds { [18 x i8] }, { [18 x i8] }* %a0, i32 %a1
ret { [18 x i8] }* %tmp0
}
define void @test_bzero(i64 %in) {
; CHECK-LABEL: test_bzero:
; CHECK-DAG: lsr x1, x0, #32
; CHECK-DAG: and x0, x0, #0xffffffff
; CHECK: bl _bzero
%ptr.i32 = trunc i64 %in to i32
%size.64 = lshr i64 %in, 32
%size = trunc i64 %size.64 to i32
%ptr = inttoptr i32 %ptr.i32 to i8*
tail call void @llvm.memset.p0i8.i32(i8* align 4 %ptr, i8 0, i32 %size, i1 false)
ret void
}
declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)