1/*
2 * Copyright (c) 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 *    products derived from this software without specific prior written
15 *    permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30#include <private/bionic_asm.h>
31
32#ifdef __ARMEB__
33#define S2LOMEM lsl
34#define S2LOMEMEQ lsleq
35#define S2HIMEM lsr
36#define MSB 0x000000ff
37#define LSB 0xff000000
38#define BYTE0_OFFSET 24
39#define BYTE1_OFFSET 16
40#define BYTE2_OFFSET 8
41#define BYTE3_OFFSET 0
42#else /* not  __ARMEB__ */
43#define S2LOMEM lsr
44#define S2LOMEMEQ lsreq
45#define S2HIMEM lsl
46#define BYTE0_OFFSET 0
47#define BYTE1_OFFSET 8
48#define BYTE2_OFFSET 16
49#define BYTE3_OFFSET 24
50#define MSB 0xff000000
51#define LSB 0x000000ff
52#endif /* not  __ARMEB__ */
53
54.syntax         unified
55
56#if defined (__thumb__)
57        .thumb
58        .thumb_func
59#endif
60
61ENTRY(strcmp)
62      /* Use LDRD whenever possible.  */
63
64/* The main thing to look out for when comparing large blocks is that
65   the loads do not cross a page boundary when loading past the index
66   of the byte with the first difference or the first string-terminator.
67
68   For example, if the strings are identical and the string-terminator
69   is at index k, byte by byte comparison will not load beyond address
70   s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
71   k; double word - up to 7 bytes.  If the load of these bytes crosses
72   a page boundary, it might cause a memory fault (if the page is not mapped)
73   that would not have happened in byte by byte comparison.
74
75   If an address is (double) word aligned, then a load of a (double) word
76   from that address will not cross a page boundary.
77   Therefore, the algorithm below considers word and double-word alignment
78   of strings separately.  */
79
80/* High-level description of the algorithm.
81
82   * The fast path: if both strings are double-word aligned,
83     use LDRD to load two words from each string in every loop iteration.
84   * If the strings have the same offset from a word boundary,
85     use LDRB to load and compare byte by byte until
86     the first string is aligned to a word boundary (at most 3 bytes).
87     This is optimized for quick return on short unaligned strings.
88   * If the strings have the same offset from a double-word boundary,
89     use LDRD to load two words from each string in every loop iteration, as in the fast path.
90   * If the strings do not have the same offset from a double-word boundary,
91     load a word from the second string before the loop to initialize the queue.
92     Use LDRD to load two words from every string in every loop iteration.
93     Inside the loop, load the second word from the second string only after comparing
94     the first word, using the queued value, to guarantee safety across page boundaries.
95   * If the strings do not have the same offset from a word boundary,
96     use LDR and a shift queue. Order of loads and comparisons matters,
97     similarly to the previous case.
98
99   * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
100   * The only difference between ARM and Thumb modes is the use of CBZ instruction.
101   * The only difference between big and little endian is the use of REV in little endian
102     to compute the return value, instead of MOV.
103*/
104
105        .macro m_cbz reg label
106#ifdef __thumb2__
107        cbz     \reg, \label
108#else   /* not defined __thumb2__ */
109        cmp     \reg, #0
110        beq     \label
111#endif /* not defined __thumb2__ */
112        .endm /* m_cbz */
113
114        .macro m_cbnz reg label
115#ifdef __thumb2__
116        cbnz    \reg, \label
117#else   /* not defined __thumb2__ */
118        cmp     \reg, #0
119        bne     \label
120#endif /* not defined __thumb2__ */
121        .endm /* m_cbnz */
122
123        .macro  init
124        /* Macro to save temporary registers and prepare magic values.  */
125        subs    sp, sp, #16
126        .cfi_def_cfa_offset 16
127        strd    r4, r5, [sp, #8]
128        .cfi_rel_offset r4, 0
129        .cfi_rel_offset r5, 4
130        strd    r6, r7, [sp]
131        .cfi_rel_offset r6, 8
132        .cfi_rel_offset r7, 12
133        mvn     r6, #0  /* all F */
134        mov     r7, #0  /* all 0 */
135        .endm   /* init */
136
137        .macro  magic_compare_and_branch w1 w2 label
138        /* Macro to compare registers w1 and w2 and conditionally branch to label.  */
139        cmp     \w1, \w2        /* Are w1 and w2 the same?  */
140        magic_find_zero_bytes \w1
141        it      eq
142        cmpeq   ip, #0          /* Is there a zero byte in w1?  */
143        bne     \label
144        .endm /* magic_compare_and_branch */
145
146        .macro  magic_find_zero_bytes w1
147        /* Macro to find all-zero bytes in w1, result is in ip.  */
148        uadd8   ip, \w1, r6
149        sel     ip, r7, r6
150        .endm /* magic_find_zero_bytes */
151
152        .macro  setup_return w1 w2
153#ifdef __ARMEB__
154        mov     r1, \w1
155        mov     r2, \w2
156#else /* not  __ARMEB__ */
157        rev     r1, \w1
158        rev     r2, \w2
159#endif /* not  __ARMEB__ */
160        .endm /* setup_return */
161
162        pld [r0, #0]
163        pld [r1, #0]
164
165        /* Are both strings double-word aligned?  */
166        orr     ip, r0, r1
167        tst     ip, #7
168        bne     .L_do_align
169
170        /* Fast path.  */
171        init
172
173.L_doubleword_aligned:
174
175        /* Get here when the strings to compare are double-word aligned.  */
176        /* Compare two words in every iteration.  */
177        .p2align        2
1782:
179        pld [r0, #16]
180        pld [r1, #16]
181
182        /* Load the next double-word from each string.  */
183        ldrd    r2, r3, [r0], #8
184        ldrd    r4, r5, [r1], #8
185
186        magic_compare_and_branch w1=r2, w2=r4, label=.L_return_24
187        magic_compare_and_branch w1=r3, w2=r5, label=.L_return_35
188        b       2b
189
190.L_do_align:
191        /* Is the first string word-aligned?  */
192        ands    ip, r0, #3
193        beq     .L_word_aligned_r0
194
195        /* Fast compare byte by byte until the first string is word-aligned.  */
196        /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
197        to read until the next word boundary is 4-ip.  */
198        bic     r0, r0, #3
199        ldr     r2, [r0], #4
200        lsls    ip, ip, #31
201        beq     .L_byte2
202        bcs     .L_byte3
203
204.L_byte1:
205        ldrb    ip, [r1], #1
206        uxtb    r3, r2, ror #BYTE1_OFFSET
207        subs    ip, r3, ip
208        bne     .L_fast_return
209        m_cbz   reg=r3, label=.L_fast_return
210
211.L_byte2:
212        ldrb    ip, [r1], #1
213        uxtb    r3, r2, ror #BYTE2_OFFSET
214        subs    ip, r3, ip
215        bne     .L_fast_return
216        m_cbz   reg=r3, label=.L_fast_return
217
218.L_byte3:
219        ldrb    ip, [r1], #1
220        uxtb    r3, r2, ror #BYTE3_OFFSET
221        subs    ip, r3, ip
222        bne     .L_fast_return
223        m_cbnz  reg=r3, label=.L_word_aligned_r0
224
225.L_fast_return:
226        mov     r0, ip
227        bx      lr
228
229.L_word_aligned_r0:
230        init
231        /* The first string is word-aligned.  */
232        /* Is the second string word-aligned?  */
233        ands    ip, r1, #3
234        bne     .L_strcmp_unaligned
235
236.L_word_aligned:
237        /* The strings are word-aligned. */
238        /* Is the first string double-word aligned?  */
239        tst     r0, #4
240        beq     .L_doubleword_aligned_r0
241
242        /* If r0 is not double-word aligned yet, align it by loading
243        and comparing the next word from each string.  */
244        ldr     r2, [r0], #4
245        ldr     r4, [r1], #4
246        magic_compare_and_branch w1=r2 w2=r4 label=.L_return_24
247
248.L_doubleword_aligned_r0:
249        /* Get here when r0 is double-word aligned.  */
250        /* Is r1 doubleword_aligned?  */
251        tst     r1, #4
252        beq     .L_doubleword_aligned
253
254        /* Get here when the strings to compare are word-aligned,
255        r0 is double-word aligned, but r1 is not double-word aligned.  */
256
257        /* Initialize the queue.  */
258        ldr     r5, [r1], #4
259
260        /* Compare two words in every iteration.  */
261        .p2align        2
2623:
263        pld [r0, #16]
264        pld [r1, #16]
265
266        /* Load the next double-word from each string and compare.  */
267        ldrd    r2, r3, [r0], #8
268        magic_compare_and_branch w1=r2 w2=r5 label=.L_return_25
269        ldrd    r4, r5, [r1], #8
270        magic_compare_and_branch w1=r3 w2=r4 label=.L_return_34
271        b       3b
272
273        .macro miscmp_word offsetlo offsethi
274        /* Macro to compare misaligned strings.  */
275        /* r0, r1 are word-aligned, and at least one of the strings
276        is not double-word aligned.  */
277        /* Compare one word in every loop iteration.  */
278        /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
279        OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word).  */
280
281        /* Initialize the shift queue.  */
282        ldr     r5, [r1], #4
283
284        /* Compare one word from each string in every loop iteration.  */
285        .p2align        2
2867:
287        ldr     r3, [r0], #4
288        S2LOMEM r5, r5, #\offsetlo
289        magic_find_zero_bytes w1=r3
290        cmp     r7, ip, S2HIMEM #\offsetlo
291        and     r2, r3, r6, S2LOMEM #\offsetlo
292        it      eq
293        cmpeq   r2, r5
294        bne     .L_return_25
295        ldr     r5, [r1], #4
296        cmp     ip, #0
297        eor r3, r2, r3
298        S2HIMEM r2, r5, #\offsethi
299        it      eq
300        cmpeq   r3, r2
301        bne     .L_return_32
302        b       7b
303        .endm /* miscmp_word */
304
305.L_strcmp_unaligned:
306        /* r0 is word-aligned, r1 is at offset ip from a word.  */
307        /* Align r1 to the (previous) word-boundary.  */
308        bic     r1, r1, #3
309
310        /* Unaligned comparison word by word using LDRs. */
311        cmp     ip, #2
312        beq     .L_miscmp_word_16                 /* If ip == 2.  */
313        bge     .L_miscmp_word_24                 /* If ip == 3.  */
314        miscmp_word offsetlo=8 offsethi=24        /* If ip == 1.  */
315.L_miscmp_word_24:  miscmp_word offsetlo=24 offsethi=8
316
317
318.L_return_32:
319        setup_return w1=r3, w2=r2
320        b       .L_do_return
321.L_return_34:
322        setup_return w1=r3, w2=r4
323        b       .L_do_return
324.L_return_25:
325        setup_return w1=r2, w2=r5
326        b       .L_do_return
327.L_return_35:
328        setup_return w1=r3, w2=r5
329        b       .L_do_return
330.L_return_24:
331        setup_return w1=r2, w2=r4
332
333.L_do_return:
334
335#ifdef __ARMEB__
336        mov     r0, ip
337#else /* not  __ARMEB__ */
338        rev     r0, ip
339#endif /* not  __ARMEB__ */
340
341        /* Restore temporaries early, before computing the return value.  */
342        ldrd    r6, r7, [sp]
343        ldrd    r4, r5, [sp, #8]
344        adds    sp, sp, #16
345        .cfi_def_cfa_offset 0
346        .cfi_restore r4
347        .cfi_restore r5
348        .cfi_restore r6
349        .cfi_restore r7
350
351        /* There is a zero or a different byte between r1 and r2.  */
352        /* r0 contains a mask of all-zero bytes in r1.  */
353        /* Using r0 and not ip here because cbz requires low register.  */
354        m_cbz   reg=r0, label=.L_compute_return_value
355        clz     r0, r0
356        /* r0 contains the number of bits on the left of the first all-zero byte in r1.  */
357        rsb     r0, r0, #24
358        /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1.  */
359        lsr     r1, r1, r0
360        lsr     r2, r2, r0
361
362.L_compute_return_value:
363        movs    r0, #1
364        cmp     r1, r2
365        /* The return value is computed as follows.
366        If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
367        If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
368        which means r0:=r0-r0-1 and r0 is #-1 at return.
369        If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
370        which means r0:=r0-r0 and r0 is #0 at return.
371        (C==0 and Z==1) cannot happen because the carry bit is "not borrow".  */
372        it      ls
373        sbcls   r0, r0, r0
374        bx      lr
375
376    /* The code from the previous version of strcmp.S handles this
377     * particular case (the second string is 2 bytes off a word alignment)
378     * faster than any current version. In this very specific case, use the
379     * previous version. See bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S
380     * for the unedited version of this code.
381     */
382.L_miscmp_word_16:
383	wp1 .req r0
384	wp2 .req r1
385	b1  .req r2
386	w1  .req r4
387	w2  .req r5
388	t1  .req ip
389	@ r3 is scratch
390
391    /* At this point, wp1 (r0) has already been word-aligned. */
3922:
393	mov	b1, #1
394	orr	b1, b1, b1, lsl #8
395	orr	b1, b1, b1, lsl #16
396
397	and	t1, wp2, #3
398	bic	wp2, wp2, #3
399	ldr	w1, [wp1], #4
400	ldr	w2, [wp2], #4
401
402	/* Critical inner Loop: Block with 2 bytes initial overlap */
403	.p2align	2
4042:
405	S2HIMEM	t1, w1, #16
406	sub	r3, w1, b1
407	S2LOMEM	t1, t1, #16
408	bic	r3, r3, w1
409	cmp	t1, w2, S2LOMEM #16
410	bne	4f
411	ands	r3, r3, b1, lsl #7
412	it	eq
413	ldreq	w2, [wp2], #4
414	bne	5f
415	eor	t1, t1, w1
416	cmp	t1, w2, S2HIMEM #16
417	bne	6f
418	ldr	w1, [wp1], #4
419	b	2b
420
4215:
422#ifdef __ARMEB__
423	/* The syndrome value may contain false ones if the string ends
424	 * with the bytes 0x01 0x00
425	 */
426	tst	w1, #0xff000000
427	it	ne
428	tstne	w1, #0x00ff0000
429	beq	7f
430#else
431	lsls	r3, r3, #16
432	bne	7f
433#endif
434	ldrh	w2, [wp2]
435	S2LOMEM	t1, w1, #16
436#ifdef __ARMEB__
437	lsl	w2, w2, #16
438#endif
439	b	8f
440
4416:
442	S2HIMEM	w2, w2, #16
443	S2LOMEM	t1, w1, #16
4444:
445	S2LOMEM	w2, w2, #16
446	b	8f
447
4487:
449	mov	r0, #0
450
451    /* Restore registers and stack. */
452    ldrd    r6, r7, [sp]
453    ldrd    r4, r5, [sp, #8]
454    adds    sp, sp, #16
455    .cfi_def_cfa_offset 0
456    .cfi_restore r4
457    .cfi_restore r5
458    .cfi_restore r6
459    .cfi_restore r7
460
461	bx	lr
462
4638:
464	and	r2, t1, #LSB
465	and	r0, w2, #LSB
466	cmp	r0, #1
467	it	cs
468	cmpcs	r0, r2
469	itt	eq
470	S2LOMEMEQ	t1, t1, #8
471	S2LOMEMEQ	w2, w2, #8
472	beq	8b
473	sub	r0, r2, r0
474
475    /* Restore registers and stack. */
476    ldrd    r6, r7, [sp]
477    ldrd    r4, r5, [sp, #8]
478    adds    sp, sp, #16
479    .cfi_def_cfa_offset 0
480    .cfi_restore r4
481    .cfi_restore r5
482    .cfi_restore r6
483    .cfi_restore r7
484
485	bx	lr
486END(strcmp)
487