1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <private/bionic_asm.h>
30
31        /*
32         * Optimized memcpy() for ARM.
33         *
34         * note that memcpy() always returns the destination pointer,
35         * so we have to preserve R0.
36         */
37
38         .syntax unified
39
40ENTRY(__memcpy_chk)
41        cmp         r2, r3
42        bls         memcpy
43
44        // Preserve lr for backtrace.
45        push        {lr}
46        .cfi_def_cfa_offset 4
47        .cfi_rel_offset lr, 0
48
49        bl          __memcpy_chk_fail
50END(__memcpy_chk)
51
52ENTRY(memcpy)
53        /* The stack must always be 64-bits aligned to be compliant with the
54         * ARM ABI. Since we have to save R0, we might as well save R4
55         * which we can use for better pipelining of the reads below
56         */
57        stmfd       sp!, {r0, r4, lr}
58        .cfi_def_cfa_offset 12
59        .cfi_rel_offset r0, 0
60        .cfi_rel_offset r4, 4
61        .cfi_rel_offset lr, 8
62        /* Making room for r5-r11 which will be spilled later */
63        sub         sp, sp, #28
64        .cfi_adjust_cfa_offset 28
65
66        // preload the destination because we'll align it to a cache line
67        // with small writes. Also start the source "pump".
68        pld         [r0, #0]
69        pld         [r1, #0]
70        pld         [r1, #32]
71
72        /* it simplifies things to take care of len<4 early */
73        cmp         r2, #4
74        blo         .Lcopy_last_3_and_return
75
76        /* compute the offset to align the source
77         * offset = (4-(src&3))&3 = -src & 3
78         */
79        rsb         r3, r1, #0
80        ands        r3, r3, #3
81        beq         .Lsrc_aligned
82
83        /* align source to 32 bits. We need to insert 2 instructions between
84         * a ldr[b|h] and str[b|h] because byte and half-word instructions
85         * stall 2 cycles.
86         */
87        movs        r12, r3, lsl #31
88        sub         r2, r2, r3      /* we know that r3 <= r2 because r2 >= 4 */
89        ldrbmi      r3, [r1], #1
90        ldrbcs      r4, [r1], #1
91        ldrbcs      r12,[r1], #1
92        strbmi      r3, [r0], #1
93        strbcs      r4, [r0], #1
94        strbcs      r12,[r0], #1
95
96.Lsrc_aligned:
97
98        /* see if src and dst are aligned together (congruent) */
99        eor         r12, r0, r1
100        tst         r12, #3
101        bne         .Lnon_congruent
102
103        /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
104         * frame. Don't update sp.
105         */
106        stmea       sp, {r5-r11}
107
108        /* align the destination to a cache-line */
109        rsb         r3, r0, #0
110        ands        r3, r3, #0x1C
111        beq         .Lcongruent_aligned32
112        cmp         r3, r2
113        andhi       r3, r2, #0x1C
114
115        /* conditionally copies 0 to 7 words (length in r3) */
116        movs        r12, r3, lsl #28
117        ldmcs       r1!, {r4, r5, r6, r7}   /* 16 bytes */
118        ldmmi       r1!, {r8, r9}           /*  8 bytes */
119        stmcs       r0!, {r4, r5, r6, r7}
120        stmmi       r0!, {r8, r9}
121        tst         r3, #0x4
122        ldrne       r10,[r1], #4            /*  4 bytes */
123        strne       r10,[r0], #4
124        sub         r2, r2, r3
125
126.Lcongruent_aligned32:
127        /*
128         * here source is aligned to 32 bytes.
129         */
130
131.Lcached_aligned32:
132        subs        r2, r2, #32
133        blo         .Lless_than_32_left
134
135        /*
136         * We preload a cache-line up to 64 bytes ahead. On the 926, this will
137         * stall only until the requested world is fetched, but the linefill
138         * continues in the the background.
139         * While the linefill is going, we write our previous cache-line
140         * into the write-buffer (which should have some free space).
141         * When the linefill is done, the writebuffer will
142         * start dumping its content into memory
143         *
144         * While all this is going, we then load a full cache line into
145         * 8 registers, this cache line should be in the cache by now
146         * (or partly in the cache).
147         *
148         * This code should work well regardless of the source/dest alignment.
149         *
150         */
151
152        // Align the preload register to a cache-line because the cpu does
153        // "critical word first" (the first word requested is loaded first).
154        bic         r12, r1, #0x1F
155        add         r12, r12, #64
156
1571:      ldmia       r1!, { r4-r11 }
158        pld         [r12, #64]
159        subs        r2, r2, #32
160
161        // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
162        // for ARM9 preload will not be safely guarded by the preceding subs.
163        // When it is safely guarded the only possibility to have SIGSEGV here
164        // is because the caller overstates the length.
165        ldrhi       r3, [r12], #32      /* cheap ARM9 preload */
166        stmia       r0!, { r4-r11 }
167        bhs         1b
168
169        add         r2, r2, #32
170
171.Lless_than_32_left:
172        /*
173         * less than 32 bytes left at this point (length in r2)
174         */
175
176        /* skip all this if there is nothing to do, which should
177         * be a common case (if not executed the code below takes
178         * about 16 cycles)
179         */
180        tst         r2, #0x1F
181        beq         1f
182
183        /* conditionnaly copies 0 to 31 bytes */
184        movs        r12, r2, lsl #28
185        ldmcs       r1!, {r4, r5, r6, r7}   /* 16 bytes */
186        ldmmi       r1!, {r8, r9}           /*  8 bytes */
187        stmcs       r0!, {r4, r5, r6, r7}
188        stmmi       r0!, {r8, r9}
189        movs        r12, r2, lsl #30
190        ldrcs       r3, [r1], #4            /*  4 bytes */
191        ldrhmi      r4, [r1], #2            /*  2 bytes */
192        strcs       r3, [r0], #4
193        strhmi      r4, [r0], #2
194        tst         r2, #0x1
195        ldrbne      r3, [r1]                /*  last byte  */
196        strbne      r3, [r0]
197
198        /* we're done! restore everything and return */
1991:      ldmfd       sp!, {r5-r11}
200        ldmfd       sp!, {r0, r4, pc}
201
202        /********************************************************************/
203
204.Lnon_congruent:
205        /*
206         * here source is aligned to 4 bytes
207         * but destination is not.
208         *
209         * in the code below r2 is the number of bytes read
210         * (the number of bytes written is always smaller, because we have
211         * partial words in the shift queue)
212         */
213        cmp         r2, #4
214        blo         .Lcopy_last_3_and_return
215
216        /* Use post-increment mode for stm to spill r5-r11 to reserved stack
217         * frame. Don't update sp.
218         */
219        stmea       sp, {r5-r11}
220
221        /* compute shifts needed to align src to dest */
222        rsb         r5, r0, #0
223        and         r5, r5, #3          /* r5 = # bytes in partial words */
224        mov         r12, r5, lsl #3     /* r12 = right */
225        rsb         lr, r12, #32        /* lr = left  */
226
227        /* read the first word */
228        ldr         r3, [r1], #4
229        sub         r2, r2, #4
230
231        /* write a partial word (0 to 3 bytes), such that destination
232         * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
233         */
234        movs        r5, r5, lsl #31
235        strbmi      r3, [r0], #1
236        movmi       r3, r3, lsr #8
237        strbcs      r3, [r0], #1
238        movcs       r3, r3, lsr #8
239        strbcs      r3, [r0], #1
240        movcs       r3, r3, lsr #8
241
242        cmp         r2, #4
243        blo         .Lpartial_word_tail
244
245        /* Align destination to 32 bytes (cache line boundary) */
2461:      tst         r0, #0x1c
247        beq         2f
248        ldr         r5, [r1], #4
249        sub         r2, r2, #4
250        orr         r4, r3, r5,     lsl lr
251        mov         r3, r5,         lsr r12
252        str         r4, [r0], #4
253        cmp         r2, #4
254        bhs         1b
255        blo         .Lpartial_word_tail
256
257        /* copy 32 bytes at a time */
2582:      subs        r2, r2, #32
259        blo         .Lless_than_thirtytwo
260
261        /* Use immediate mode for the shifts, because there is an extra cycle
262         * for register shifts, which could account for up to 50% of
263         * performance hit.
264         */
265
266        cmp         r12, #24
267        beq         .Lloop24
268        cmp         r12, #8
269        beq         .Lloop8
270
271.Lloop16:
272        ldr         r12, [r1], #4
2731:      mov         r4, r12
274        ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
275        pld         [r1, #64]
276        subs        r2, r2, #32
277        ldrhs       r12, [r1], #4
278        orr         r3, r3, r4,     lsl #16
279        mov         r4, r4,         lsr #16
280        orr         r4, r4, r5,     lsl #16
281        mov         r5, r5,         lsr #16
282        orr         r5, r5, r6,     lsl #16
283        mov         r6, r6,         lsr #16
284        orr         r6, r6, r7,     lsl #16
285        mov         r7, r7,         lsr #16
286        orr         r7, r7, r8,     lsl #16
287        mov         r8, r8,         lsr #16
288        orr         r8, r8, r9,     lsl #16
289        mov         r9, r9,         lsr #16
290        orr         r9, r9, r10,    lsl #16
291        mov         r10, r10,       lsr #16
292        orr         r10, r10, r11,  lsl #16
293        stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
294        mov         r3, r11,        lsr #16
295        bhs         1b
296        b           .Lless_than_thirtytwo
297
298.Lloop8:
299        ldr         r12, [r1], #4
3001:      mov         r4, r12
301        ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
302        pld         [r1, #64]
303        subs        r2, r2, #32
304        ldrhs       r12, [r1], #4
305        orr         r3, r3, r4,     lsl #24
306        mov         r4, r4,         lsr #8
307        orr         r4, r4, r5,     lsl #24
308        mov         r5, r5,         lsr #8
309        orr         r5, r5, r6,     lsl #24
310        mov         r6, r6,         lsr #8
311        orr         r6, r6, r7,     lsl #24
312        mov         r7, r7,         lsr #8
313        orr         r7, r7, r8,     lsl #24
314        mov         r8, r8,         lsr #8
315        orr         r8, r8, r9,     lsl #24
316        mov         r9, r9,         lsr #8
317        orr         r9, r9, r10,    lsl #24
318        mov         r10, r10,       lsr #8
319        orr         r10, r10, r11,  lsl #24
320        stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
321        mov         r3, r11,        lsr #8
322        bhs         1b
323        b           .Lless_than_thirtytwo
324
325.Lloop24:
326        ldr         r12, [r1], #4
3271:      mov         r4, r12
328        ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
329        pld         [r1, #64]
330        subs        r2, r2, #32
331        ldrhs       r12, [r1], #4
332        orr         r3, r3, r4,     lsl #8
333        mov         r4, r4,         lsr #24
334        orr         r4, r4, r5,     lsl #8
335        mov         r5, r5,         lsr #24
336        orr         r5, r5, r6,     lsl #8
337        mov         r6, r6,         lsr #24
338        orr         r6, r6, r7,     lsl #8
339        mov         r7, r7,         lsr #24
340        orr         r7, r7, r8,     lsl #8
341        mov         r8, r8,         lsr #24
342        orr         r8, r8, r9,     lsl #8
343        mov         r9, r9,         lsr #24
344        orr         r9, r9, r10,    lsl #8
345        mov         r10, r10,       lsr #24
346        orr         r10, r10, r11,  lsl #8
347        stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
348        mov         r3, r11,        lsr #24
349        bhs         1b
350
351
352.Lless_than_thirtytwo:
353        /* copy the last 0 to 31 bytes of the source */
354        rsb         r12, lr, #32        /* we corrupted r12, recompute it  */
355        add         r2, r2, #32
356        cmp         r2, #4
357        blo         .Lpartial_word_tail
358
3591:      ldr         r5, [r1], #4
360        sub         r2, r2, #4
361        orr         r4, r3, r5,     lsl lr
362        mov         r3, r5,         lsr r12
363        str         r4, [r0], #4
364        cmp         r2, #4
365        bhs         1b
366
367.Lpartial_word_tail:
368        /* we have a partial word in the input buffer */
369        movs        r5, lr, lsl #(31-3)
370        strbmi      r3, [r0], #1
371        movmi       r3, r3, lsr #8
372        strbcs      r3, [r0], #1
373        movcs       r3, r3, lsr #8
374        strbcs      r3, [r0], #1
375
376        /* Refill spilled registers from the stack. Don't update sp. */
377        ldmfd       sp, {r5-r11}
378
379.Lcopy_last_3_and_return:
380        movs        r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
381        ldrbmi      r2, [r1], #1
382        ldrbcs      r3, [r1], #1
383        ldrbcs      r12,[r1]
384        strbmi      r2, [r0], #1
385        strbcs      r3, [r0], #1
386        strbcs      r12,[r0]
387
388        /* we're done! restore sp and spilled registers and return */
389        add         sp,  sp, #28
390        ldmfd       sp!, {r0, r4, pc}
391END(memcpy)
392