Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <private/bionic_asm.h>
     30 
     31         /*
     32          * Optimized memcpy() for ARM.
     33          *
     34          * note that memcpy() always returns the destination pointer,
     35          * so we have to preserve R0.
     36          */
     37 
     38          .syntax unified
     39 
     40 ENTRY(memcpy)
     41         /* The stack must always be 64-bits aligned to be compliant with the
     42          * ARM ABI. Since we have to save R0, we might as well save R4
     43          * which we can use for better pipelining of the reads below
     44          */
     45         stmfd       sp!, {r0, r4, lr}
     46         .cfi_def_cfa_offset 12
     47         .cfi_rel_offset r0, 0
     48         .cfi_rel_offset r4, 4
     49         .cfi_rel_offset lr, 8
     50         /* Making room for r5-r11 which will be spilled later */
     51         sub         sp, sp, #28
     52         .cfi_adjust_cfa_offset 28
     53 
     54         // preload the destination because we'll align it to a cache line
     55         // with small writes. Also start the source "pump".
     56         pld         [r0, #0]
     57         pld         [r1, #0]
     58         pld         [r1, #32]
     59 
     60         /* it simplifies things to take care of len<4 early */
     61         cmp         r2, #4
     62         blo         .Lcopy_last_3_and_return
     63 
     64         /* compute the offset to align the source
     65          * offset = (4-(src&3))&3 = -src & 3
     66          */
     67         rsb         r3, r1, #0
     68         ands        r3, r3, #3
     69         beq         .Lsrc_aligned
     70 
     71         /* align source to 32 bits. We need to insert 2 instructions between
     72          * a ldr[b|h] and str[b|h] because byte and half-word instructions
     73          * stall 2 cycles.
     74          */
     75         movs        r12, r3, lsl #31
     76         sub         r2, r2, r3      /* we know that r3 <= r2 because r2 >= 4 */
     77         ldrbmi      r3, [r1], #1
     78         ldrbcs      r4, [r1], #1
     79         ldrbcs      r12,[r1], #1
     80         strbmi      r3, [r0], #1
     81         strbcs      r4, [r0], #1
     82         strbcs      r12,[r0], #1
     83 
     84 .Lsrc_aligned:
     85 
     86         /* see if src and dst are aligned together (congruent) */
     87         eor         r12, r0, r1
     88         tst         r12, #3
     89         bne         .Lnon_congruent
     90 
     91         /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
     92          * frame. Don't update sp.
     93          */
     94         stmea       sp, {r5-r11}
     95 
     96         /* align the destination to a cache-line */
     97         rsb         r3, r0, #0
     98         ands        r3, r3, #0x1C
     99         beq         .Lcongruent_aligned32
    100         cmp         r3, r2
    101         andhi       r3, r2, #0x1C
    102 
    103         /* conditionally copies 0 to 7 words (length in r3) */
    104         movs        r12, r3, lsl #28
    105         ldmcs       r1!, {r4, r5, r6, r7}   /* 16 bytes */
    106         ldmmi       r1!, {r8, r9}           /*  8 bytes */
    107         stmcs       r0!, {r4, r5, r6, r7}
    108         stmmi       r0!, {r8, r9}
    109         tst         r3, #0x4
    110         ldrne       r10,[r1], #4            /*  4 bytes */
    111         strne       r10,[r0], #4
    112         sub         r2, r2, r3
    113 
    114 .Lcongruent_aligned32:
    115         /*
    116          * here source is aligned to 32 bytes.
    117          */
    118 
    119 .Lcached_aligned32:
    120         subs        r2, r2, #32
    121         blo         .Lless_than_32_left
    122 
    123         /*
    124          * We preload a cache-line up to 64 bytes ahead. On the 926, this will
    125          * stall only until the requested world is fetched, but the linefill
    126          * continues in the the background.
    127          * While the linefill is going, we write our previous cache-line
    128          * into the write-buffer (which should have some free space).
    129          * When the linefill is done, the writebuffer will
    130          * start dumping its content into memory
    131          *
    132          * While all this is going, we then load a full cache line into
    133          * 8 registers, this cache line should be in the cache by now
    134          * (or partly in the cache).
    135          *
    136          * This code should work well regardless of the source/dest alignment.
    137          *
    138          */
    139 
    140         // Align the preload register to a cache-line because the cpu does
    141         // "critical word first" (the first word requested is loaded first).
    142         bic         r12, r1, #0x1F
    143         add         r12, r12, #64
    144 
    145 1:      ldmia       r1!, { r4-r11 }
    146         pld         [r12, #64]
    147         subs        r2, r2, #32
    148 
    149         // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
    150         // for ARM9 preload will not be safely guarded by the preceding subs.
    151         // When it is safely guarded the only possibility to have SIGSEGV here
    152         // is because the caller overstates the length.
    153         ldrhi       r3, [r12], #32      /* cheap ARM9 preload */
    154         stmia       r0!, { r4-r11 }
    155         bhs         1b
    156 
    157         add         r2, r2, #32
    158 
    159 .Lless_than_32_left:
    160         /*
    161          * less than 32 bytes left at this point (length in r2)
    162          */
    163 
    164         /* skip all this if there is nothing to do, which should
    165          * be a common case (if not executed the code below takes
    166          * about 16 cycles)
    167          */
    168         tst         r2, #0x1F
    169         beq         1f
    170 
    171         /* conditionnaly copies 0 to 31 bytes */
    172         movs        r12, r2, lsl #28
    173         ldmcs       r1!, {r4, r5, r6, r7}   /* 16 bytes */
    174         ldmmi       r1!, {r8, r9}           /*  8 bytes */
    175         stmcs       r0!, {r4, r5, r6, r7}
    176         stmmi       r0!, {r8, r9}
    177         movs        r12, r2, lsl #30
    178         ldrcs       r3, [r1], #4            /*  4 bytes */
    179         ldrhmi      r4, [r1], #2            /*  2 bytes */
    180         strcs       r3, [r0], #4
    181         strhmi      r4, [r0], #2
    182         tst         r2, #0x1
    183         ldrbne      r3, [r1]                /*  last byte  */
    184         strbne      r3, [r0]
    185 
    186         /* we're done! restore everything and return */
    187 1:      ldmfd       sp!, {r5-r11}
    188         ldmfd       sp!, {r0, r4, pc}
    189 
    190         /********************************************************************/
    191 
    192 .Lnon_congruent:
    193         /*
    194          * here source is aligned to 4 bytes
    195          * but destination is not.
    196          *
    197          * in the code below r2 is the number of bytes read
    198          * (the number of bytes written is always smaller, because we have
    199          * partial words in the shift queue)
    200          */
    201         cmp         r2, #4
    202         blo         .Lcopy_last_3_and_return
    203 
    204         /* Use post-increment mode for stm to spill r5-r11 to reserved stack
    205          * frame. Don't update sp.
    206          */
    207         stmea       sp, {r5-r11}
    208 
    209         /* compute shifts needed to align src to dest */
    210         rsb         r5, r0, #0
    211         and         r5, r5, #3          /* r5 = # bytes in partial words */
    212         mov         r12, r5, lsl #3     /* r12 = right */
    213         rsb         lr, r12, #32        /* lr = left  */
    214 
    215         /* read the first word */
    216         ldr         r3, [r1], #4
    217         sub         r2, r2, #4
    218 
    219         /* write a partial word (0 to 3 bytes), such that destination
    220          * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
    221          */
    222         movs        r5, r5, lsl #31
    223         strbmi      r3, [r0], #1
    224         movmi       r3, r3, lsr #8
    225         strbcs      r3, [r0], #1
    226         movcs       r3, r3, lsr #8
    227         strbcs      r3, [r0], #1
    228         movcs       r3, r3, lsr #8
    229 
    230         cmp         r2, #4
    231         blo         .Lpartial_word_tail
    232 
    233         /* Align destination to 32 bytes (cache line boundary) */
    234 1:      tst         r0, #0x1c
    235         beq         2f
    236         ldr         r5, [r1], #4
    237         sub         r2, r2, #4
    238         orr         r4, r3, r5,     lsl lr
    239         mov         r3, r5,         lsr r12
    240         str         r4, [r0], #4
    241         cmp         r2, #4
    242         bhs         1b
    243         blo         .Lpartial_word_tail
    244 
    245         /* copy 32 bytes at a time */
    246 2:      subs        r2, r2, #32
    247         blo         .Lless_than_thirtytwo
    248 
    249         /* Use immediate mode for the shifts, because there is an extra cycle
    250          * for register shifts, which could account for up to 50% of
    251          * performance hit.
    252          */
    253 
    254         cmp         r12, #24
    255         beq         .Lloop24
    256         cmp         r12, #8
    257         beq         .Lloop8
    258 
    259 .Lloop16:
    260         ldr         r12, [r1], #4
    261 1:      mov         r4, r12
    262         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
    263         pld         [r1, #64]
    264         subs        r2, r2, #32
    265         ldrhs       r12, [r1], #4
    266         orr         r3, r3, r4,     lsl #16
    267         mov         r4, r4,         lsr #16
    268         orr         r4, r4, r5,     lsl #16
    269         mov         r5, r5,         lsr #16
    270         orr         r5, r5, r6,     lsl #16
    271         mov         r6, r6,         lsr #16
    272         orr         r6, r6, r7,     lsl #16
    273         mov         r7, r7,         lsr #16
    274         orr         r7, r7, r8,     lsl #16
    275         mov         r8, r8,         lsr #16
    276         orr         r8, r8, r9,     lsl #16
    277         mov         r9, r9,         lsr #16
    278         orr         r9, r9, r10,    lsl #16
    279         mov         r10, r10,       lsr #16
    280         orr         r10, r10, r11,  lsl #16
    281         stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
    282         mov         r3, r11,        lsr #16
    283         bhs         1b
    284         b           .Lless_than_thirtytwo
    285 
    286 .Lloop8:
    287         ldr         r12, [r1], #4
    288 1:      mov         r4, r12
    289         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
    290         pld         [r1, #64]
    291         subs        r2, r2, #32
    292         ldrhs       r12, [r1], #4
    293         orr         r3, r3, r4,     lsl #24
    294         mov         r4, r4,         lsr #8
    295         orr         r4, r4, r5,     lsl #24
    296         mov         r5, r5,         lsr #8
    297         orr         r5, r5, r6,     lsl #24
    298         mov         r6, r6,         lsr #8
    299         orr         r6, r6, r7,     lsl #24
    300         mov         r7, r7,         lsr #8
    301         orr         r7, r7, r8,     lsl #24
    302         mov         r8, r8,         lsr #8
    303         orr         r8, r8, r9,     lsl #24
    304         mov         r9, r9,         lsr #8
    305         orr         r9, r9, r10,    lsl #24
    306         mov         r10, r10,       lsr #8
    307         orr         r10, r10, r11,  lsl #24
    308         stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
    309         mov         r3, r11,        lsr #8
    310         bhs         1b
    311         b           .Lless_than_thirtytwo
    312 
    313 .Lloop24:
    314         ldr         r12, [r1], #4
    315 1:      mov         r4, r12
    316         ldmia       r1!, {   r5,r6,r7,  r8,r9,r10,r11}
    317         pld         [r1, #64]
    318         subs        r2, r2, #32
    319         ldrhs       r12, [r1], #4
    320         orr         r3, r3, r4,     lsl #8
    321         mov         r4, r4,         lsr #24
    322         orr         r4, r4, r5,     lsl #8
    323         mov         r5, r5,         lsr #24
    324         orr         r5, r5, r6,     lsl #8
    325         mov         r6, r6,         lsr #24
    326         orr         r6, r6, r7,     lsl #8
    327         mov         r7, r7,         lsr #24
    328         orr         r7, r7, r8,     lsl #8
    329         mov         r8, r8,         lsr #24
    330         orr         r8, r8, r9,     lsl #8
    331         mov         r9, r9,         lsr #24
    332         orr         r9, r9, r10,    lsl #8
    333         mov         r10, r10,       lsr #24
    334         orr         r10, r10, r11,  lsl #8
    335         stmia       r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
    336         mov         r3, r11,        lsr #24
    337         bhs         1b
    338 
    339 
    340 .Lless_than_thirtytwo:
    341         /* copy the last 0 to 31 bytes of the source */
    342         rsb         r12, lr, #32        /* we corrupted r12, recompute it  */
    343         add         r2, r2, #32
    344         cmp         r2, #4
    345         blo         .Lpartial_word_tail
    346 
    347 1:      ldr         r5, [r1], #4
    348         sub         r2, r2, #4
    349         orr         r4, r3, r5,     lsl lr
    350         mov         r3, r5,         lsr r12
    351         str         r4, [r0], #4
    352         cmp         r2, #4
    353         bhs         1b
    354 
    355 .Lpartial_word_tail:
    356         /* we have a partial word in the input buffer */
    357         movs        r5, lr, lsl #(31-3)
    358         strbmi      r3, [r0], #1
    359         movmi       r3, r3, lsr #8
    360         strbcs      r3, [r0], #1
    361         movcs       r3, r3, lsr #8
    362         strbcs      r3, [r0], #1
    363 
    364         /* Refill spilled registers from the stack. Don't update sp. */
    365         ldmfd       sp, {r5-r11}
    366 
    367 .Lcopy_last_3_and_return:
    368         movs        r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
    369         ldrbmi      r2, [r1], #1
    370         ldrbcs      r3, [r1], #1
    371         ldrbcs      r12,[r1]
    372         strbmi      r2, [r0], #1
    373         strbcs      r3, [r0], #1
    374         strbcs      r12,[r0]
    375 
    376         /* we're done! restore sp and spilled registers and return */
    377         add         sp,  sp, #28
    378         ldmfd       sp!, {r0, r4, pc}
    379 END(memcpy)
    380