memcpy_likely_aligned.S
1.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
//===------------------------- memcopy routines ---------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
.macro FUNCTION_BEGIN name
.text
.p2align 5
.globl \name
.type \name, @function
\name:
.endm
.macro FUNCTION_END name
.size \name, . - \name
.endm
FUNCTION_BEGIN __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
{
p0 = bitsclr(r1,#7)
p0 = bitsclr(r0,#7)
if (p0.new) r5:4 = memd(r1)
r3 = #-3
}
{
if (!p0) jump .Lmemcpy_call
if (p0) memd(r0++#8) = r5:4
if (p0) r5:4 = memd(r1+#8)
r3 += lsr(r2,#3)
}
{
memd(r0++#8) = r5:4
r5:4 = memd(r1+#16)
r1 = add(r1,#24)
loop0(1f,r3)
}
.falign
1:
{
memd(r0++#8) = r5:4
r5:4 = memd(r1++#8)
}:endloop0
{
memd(r0) = r5:4
r0 -= add(r2,#-8)
jumpr r31
}
FUNCTION_END __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
.Lmemcpy_call:
#ifdef __PIC__
jump memcpy@PLT
#else
jump memcpy
#endif
.globl __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes
.set __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes, \
__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes