dag-merge-fast-accesses.ll
2.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-slow-unaligned-mem-16 | FileCheck %s --check-prefix=FAST
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-unaligned-mem-16 | FileCheck %s --check-prefix=SLOW
; Verify that the DAGCombiner is creating unaligned 16-byte loads and stores
; if and only if those are fast.
define void @merge_const_vec_store(i64* %ptr) {
; FAST-LABEL: merge_const_vec_store:
; FAST: # %bb.0:
; FAST-NEXT: xorps %xmm0, %xmm0
; FAST-NEXT: movups %xmm0, (%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_const_vec_store:
; SLOW: # %bb.0:
; SLOW-NEXT: movq $0, (%rdi)
; SLOW-NEXT: movq $0, 8(%rdi)
; SLOW-NEXT: retq
%idx0 = getelementptr i64, i64* %ptr, i64 0
%idx1 = getelementptr i64, i64* %ptr, i64 1
store i64 0, i64* %idx0, align 8
store i64 0, i64* %idx1, align 8
ret void
}
define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
; FAST-LABEL: merge_vec_element_store:
; FAST: # %bb.0:
; FAST-NEXT: movups %xmm0, (%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_vec_element_store:
; SLOW: # %bb.0:
; SLOW-NEXT: movlps %xmm0, (%rdi)
; SLOW-NEXT: movhps %xmm0, 8(%rdi)
; SLOW-NEXT: retq
%vecext0 = extractelement <4 x double> %v, i32 0
%vecext1 = extractelement <4 x double> %v, i32 1
%idx0 = getelementptr double, double* %ptr, i64 0
%idx1 = getelementptr double, double* %ptr, i64 1
store double %vecext0, double* %idx0, align 8
store double %vecext1, double* %idx1, align 8
ret void
}
define void @merge_vec_load_and_stores(i64 *%ptr) {
; FAST-LABEL: merge_vec_load_and_stores:
; FAST: # %bb.0:
; FAST-NEXT: movups (%rdi), %xmm0
; FAST-NEXT: movups %xmm0, 40(%rdi)
; FAST-NEXT: retq
;
; SLOW-LABEL: merge_vec_load_and_stores:
; SLOW: # %bb.0:
; SLOW-NEXT: movq (%rdi), %rax
; SLOW-NEXT: movq 8(%rdi), %rcx
; SLOW-NEXT: movq %rax, 40(%rdi)
; SLOW-NEXT: movq %rcx, 48(%rdi)
; SLOW-NEXT: retq
%idx0 = getelementptr i64, i64* %ptr, i64 0
%idx1 = getelementptr i64, i64* %ptr, i64 1
%ld0 = load i64, i64* %idx0, align 4
%ld1 = load i64, i64* %idx1, align 4
%idx4 = getelementptr i64, i64* %ptr, i64 5
%idx5 = getelementptr i64, i64* %ptr, i64 6
store i64 %ld0, i64* %idx4, align 4
store i64 %ld1, i64* %idx5, align 4
ret void
}