maskmovdqu.ll
1.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- -mattr=+sse2,-avx | FileCheck %s --check-prefixes=ALL,i686_SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | FileCheck %s --check-prefixes=ALL,x86_64_SSE2
; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,i686_AVX
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,x86_64_AVX
; rdar://6573467
define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, i8* %c) nounwind {
; i686_SSE2-LABEL: test:
; i686_SSE2: # %bb.0: # %entry
; i686_SSE2-NEXT: pushl %edi
; i686_SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
; i686_SSE2-NEXT: maskmovdqu %xmm1, %xmm0
; i686_SSE2-NEXT: popl %edi
; i686_SSE2-NEXT: retl
;
; x86_64_SSE2-LABEL: test:
; x86_64_SSE2: # %bb.0: # %entry
; x86_64_SSE2-NEXT: movq %rsi, %rdi
; x86_64_SSE2-NEXT: maskmovdqu %xmm1, %xmm0
; x86_64_SSE2-NEXT: retq
;
; i686_AVX-LABEL: test:
; i686_AVX: # %bb.0: # %entry
; i686_AVX-NEXT: pushl %edi
; i686_AVX-NEXT: movl {{[0-9]+}}(%esp), %edi
; i686_AVX-NEXT: vmaskmovdqu %xmm1, %xmm0
; i686_AVX-NEXT: popl %edi
; i686_AVX-NEXT: retl
;
; x86_64_AVX-LABEL: test:
; x86_64_AVX: # %bb.0: # %entry
; x86_64_AVX-NEXT: movq %rsi, %rdi
; x86_64_AVX-NEXT: vmaskmovdqu %xmm1, %xmm0
; x86_64_AVX-NEXT: retq
entry:
tail call void @llvm.x86.sse2.maskmov.dqu( <16 x i8> %a, <16 x i8> %b, i8* %c )
ret void
}
declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind