Home | History | Annotate | Download | only in armv7
      1 .text
      2 .p2align 2
      3 .global ia_eld_decoder_sbr_pre_twiddle
      4 
      5 
      6 ia_eld_decoder_sbr_pre_twiddle:
      7 
      8 
      9 
     10     STMFD           sp!, {r4-r12, r14}
     11     LDR             r4, [r0, #0]        @Xre  = *pXre
     12     MOV             r3, #62             @Loop count
     13     LDR             r5, [r1, #0]        @Xim  = *pXim
     14 
     15 LOOP:
     16     LDR             r6, [r2], #4        @Load and increment pointer *pTwiddles++ Lower - cosine , higher - sine
     17     SUBS            r3, r3, #1          @Decrement loop count by 1
     18 
     19     SMULWB          r8, r4, r6          @mult32x16in32(Xre, cosine)
     20     LSL             r8, r8, #1          @Left shift the multiplied value by 1
     21 
     22     SMULWT          r10, r5, r6         @mult32x16in32( Xim , sine)
     23 
     24     ADD             r12, r8, r10, LSL #1 @mac32x16in32_shl( mult32x16in32_shl(Xre, cosine) , mult32x16in32_shl( Xim , sine))@
     25 
     26 
     27     SMULWT          r7, r4, r6          @mult32x16in32(Xre, sine)
     28     LDR             r4, [r0, #4]        @Load next iteration value Xre  = *pXre
     29 
     30     SMULWB          r9, r5, r6          @mult32x16in32(Xim, cosine)
     31     STR             r12, [r0], #4       @Store and increment pointer *pXre++ = re
     32 
     33     LSL             r9, r9, #1          @Left shift the multiplied value by 1
     34     LDR             r5, [r1, #4]        @Load next iteration value Xim  = *pXim
     35 
     36 
     37     SUB             r14, r9, r7, LSL #1 @sub32(mult32x16in32_shl(Xim, cosine) , mult32x16in32_shl(Xre, sine))
     38 
     39     STR             r14, [r1], #4       @Store and increment pointer *pXim++ = im
     40 
     41     BNE             LOOP                @Check r3 equals 0 and continue
     42 
     43 EPILOUGE:
     44 
     45     LDR             r6, [r2], #4
     46 
     47     SMULWB          r8, r4, r6
     48     LSL             r8, r8, #1
     49 
     50     SMULWT          r10, r5, r6
     51 
     52     ADD             r12, r8, r10, LSL #1
     53 
     54 
     55     SMULWB          r9, r5, r6
     56     LSL             r9, r9, #1
     57 
     58     SMULWT          r7, r4, r6
     59 
     60     SUB             r14, r9, r7, LSL #1
     61 
     62     STR             r12, [r0], #4
     63     STR             r14, [r1], #4
     64 
     65 END_LOOP:
     66 
     67     LDMFD           sp!, {r4-r12, pc}
     68