HomeSort by relevance Sort by last modified time
    Searched refs:pu1_src_tmp (Results 1 - 23 of 23) sorted by null

  /external/libhevc/common/arm/
ihevc_inter_pred_chroma_copy.s 131 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
132 add r5,r0,r2 @pu1_src_tmp += src_strd
135 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
138 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
141 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
148 sub r0,r5,r11 @pu1_src = pu1_src_tmp
163 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
164 add r5,r0,r2 @pu1_src_tmp += src_strd
167 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
185 add r5,r0,r2 @pu1_src_tmp += src_str
    [all...]
ihevc_inter_pred_luma_copy_w16out.s 107 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp)
109 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
113 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp)
117 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp)
118 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp)
120 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp)
123 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp)
125 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp)
152 add r6,r0,r2 @pu1_src_tmp += src_strd
154 vld1.8 {d8},[r0]! @vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_chroma_copy_w16out.s 139 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp)
141 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
145 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp)
149 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp)
150 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp)
152 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp)
155 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp)
157 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp)
181 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp)
183 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_luma_copy.s 106 vld1.32 {d0[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
107 add r5,r0,r2 @pu1_src_tmp += src_strd
110 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
113 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
116 vld1.32 {d0[0]},[r5],r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
124 sub r0,r5,r11 @pu1_src = pu1_src_tmp
141 add r5,r0,r2 @pu1_src_tmp += src_strd
142 vld1.8 {d0},[r0]! @vld1_u8(pu1_src_tmp)
145 vld1.8 {d1},[r5],r2 @vld1_u8(pu1_src_tmp)
148 vld1.8 {d2},[r5],r2 @vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 148 add r3,r0,r2 @pu1_src_tmp += src_strd@
149 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
150 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@
152 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
154 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
156 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@
158 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
160 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
162 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
167 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_luma_vert_w16inp_w16out.s 158 add r3,r0,r2 @pu1_src_tmp += src_strd@
159 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
160 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@
162 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
164 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
166 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@
168 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
170 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
172 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
177 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_filters_luma_vert.s 159 add r3,r0,r2 @pu1_src_tmp += src_strd@
160 vld1.u8 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
161 vld1.u8 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@
163 vld1.u8 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
165 vld1.u8 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
167 vld1.u8 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@
169 vld1.u8 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@
171 vld1.u8 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@
173 vld1.u8 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
175 vld1.u8 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)
    [all...]
ihevc_inter_pred_chroma_vert.s 188 vld1.32 {d6[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp1, 0
191 vld1.32 {d6[1]},[r6],r2 @loads pu1_src_tmp
193 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
ihevc_inter_pred_chroma_vert_w16out.s 187 vld1.32 {d6[0]},[r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp1, 0
190 vld1.32 {d6[1]},[r6],r2 @loads pu1_src_tmp
192 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
  /external/libhevc/encoder/arm/
ihevce_decomp_pre_intra_pass_neon.c 74 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd; local
82 tmp = (i4_ftaps[3] * pu1_src_tmp[j] +
83 i4_ftaps[2] * (pu1_src_tmp[j - 1] + pu1_src_tmp[j + 1]) +
84 i4_ftaps[1] * (pu1_src_tmp[j + 2] + pu1_src_tmp[j - 2]) +
85 i4_ftaps[0] * (pu1_src_tmp[j + 3] + pu1_src_tmp[j - 3]) +
91 pu1_src_tmp += src_strd;
169 UWORD8 *pu1_src_tmp = pu1_src + wd_offset + ht_offset * src_strd local
    [all...]
ihevce_scale_by_2_neon.c 74 UWORD8 *pu1_src_tmp = pu1_src + j * src_strd - 3; local
79 uint8x16x2_t src = vld2q_u8(pu1_src_tmp);
107 pu1_src_tmp += 16;
125 src[mod8] = vld1q_u8(pu1_src_tmp); \
126 pu1_src_tmp += src_strd; \
133 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd + i; local
218 UWORD8 *pu1_src_tmp = pu1_src + j * src_strd; local
234 pu1_src_tmp - 3 * src_strd + i,
  /external/libavc/common/arm/
ih264_inter_pred_luma_copy_a9q.s 96 vld1.32 {d0[0]}, [r0] @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
97 add r5, r0, r2 @pu1_src_tmp += src_strd
100 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
103 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
106 vld1.32 {d0[0]}, [r5], r2 @vld1_lane_u32((uint32_t *)pu1_src_tmp, src_tmp, 0)
114 sub r0, r5, r11 @pu1_src = pu1_src_tmp
132 add r5, r0, r2 @pu1_src_tmp += src_strd
133 vld1.8 {d0}, [r0]! @vld1_u8(pu1_src_tmp)
136 vld1.8 {d1}, [r5], r2 @vld1_u8(pu1_src_tmp)
139 vld1.8 {d2}, [r5], r2 @vld1_u8(pu1_src_tmp)
    [all...]
  /external/libvpx/libvpx/vpx_dsp/arm/
vpx_convolve8_avg_vert_filter_type1_neon.asm 76 add r3, r0, r2 ;pu1_src_tmp += src_strd;
78 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
79 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp);
81 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
84 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
87 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
90 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
93 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
96 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
99 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_avg_vert_filter_type2_neon.asm 77 add r3, r0, r2 ;pu1_src_tmp += src_strd;
79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp);
82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_vert_filter_type1_neon.asm 77 add r3, r0, r2 ;pu1_src_tmp += src_strd;
79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp);
82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_vert_filter_type2_neon.asm 77 add r3, r0, r2 ;pu1_src_tmp += src_strd;
79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp);
82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp);
94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp);
97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp);
100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
  /external/libvpx/config/arm-neon/vpx_dsp/arm/
vpx_convolve8_avg_vert_filter_type1_neon.asm.S 83 add r3, r0, r2 @pu1_src_tmp += src_strd;
85 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
86 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp);
88 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
91 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
94 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
97 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
100 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
103 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
106 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_avg_vert_filter_type2_neon.asm.S 84 add r3, r0, r2 @pu1_src_tmp += src_strd;
86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp);
89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_vert_filter_type1_neon.asm.S 84 add r3, r0, r2 @pu1_src_tmp += src_strd;
86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp);
89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
vpx_convolve8_vert_filter_type2_neon.asm.S 84 add r3, r0, r2 @pu1_src_tmp += src_strd;
86 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
87 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp);
89 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
92 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
95 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
98 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp);
101 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp);
104 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp);
107 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp);
    [all...]
  /external/libhevc/decoder/
ihevcd_sao.c 243 UWORD8 *pu1_src_tmp = pu1_src_luma; local
260 pu1_src_tmp += MIN((WORD32)CTZ(u4_no_loop_filter_flag), tmp_wd);
270 pu1_src_copy[row * src_strd + col] = pu1_src_tmp[row * tmp_strd + col];
275 pu1_src_tmp += MIN((WORD32)CTZ(~u4_no_loop_filter_flag), tmp_wd);
281 pu1_src_tmp -= sao_wd_luma;
284 pu1_src_tmp += min_cu * src_strd;
323 UWORD8 *pu1_src_tmp = pu1_src_luma; local
338 pu1_src_tmp += MIN((WORD32)CTZ(u4_no_loop_filter_flag), tmp_wd);
348 pu1_src_tmp[row * src_strd + col] = pu1_src_copy[row * tmp_strd + col];
353 pu1_src_tmp += MIN((WORD32)CTZ(~u4_no_loop_filter_flag), tmp_wd)
392 UWORD8 *pu1_src_tmp = pu1_src_chroma; local
477 UWORD8 *pu1_src_tmp = pu1_src_chroma; local
    [all...]
  /external/libhevc/encoder/
ihevce_decomp_pre_intra_pass.c 1603 UWORD8 *pu1_src_tmp = pu1_src - 3 * src_strd; local
1700 UWORD8 *pu1_src_tmp = pu1_src + wd_offset + ht_offset * src_strd; local
    [all...]
hme_utils.c 2118 U08 *pu1_src, *pu1_dst, *pu1_src_tmp; local
    [all...]

Completed in 867 milliseconds