peephole-fold-movsd.ll
1.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
;
; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into
; addpd.
; rdar://problem/18236850
%struct.S1 = type { double, double }
@g = common global %struct.S1 zeroinitializer, align 8
declare void @foo3(%struct.S1*)
define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) nounwind {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: movq %rsp, %rdi
; CHECK-NEXT: callq foo3
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movapd {{.*#+}} xmm1 = <1.0E+0,u>
; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: addpd %xmm0, %xmm1
; CHECK-NEXT: movapd %xmm1, {{.*}}(%rip)
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%1 = alloca <2 x double>, align 16
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
call void @foo3(%struct.S1* %tmpcast) #2
%p2 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 0
%2 = load double, double* %p2, align 16
%p3 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 1
%3 = load double, double* %p3, align 8
%4 = insertelement <2 x double> undef, double %2, i32 0
%5 = insertelement <2 x double> %4, double 0.000000e+00, i32 1
%6 = insertelement <2 x double> undef, double %3, i32 1
%7 = insertelement <2 x double> %6, double 1.000000e+00, i32 0
%8 = fadd <2 x double> %5, %7
store <2 x double> %8, <2 x double>* bitcast (%struct.S1* @g to <2 x double>*), align 16
ret void
}