1 /* 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <stdlib.h> 12 #include <time.h> 13 14 #include "libyuv/basic_types.h" 15 #include "libyuv/compare.h" 16 #include "libyuv/convert.h" 17 #include "libyuv/convert_argb.h" 18 #include "libyuv/convert_from.h" 19 #include "libyuv/convert_from_argb.h" 20 #include "libyuv/cpu_id.h" 21 #ifdef HAVE_JPEG 22 #include "libyuv/mjpeg_decoder.h" 23 #endif 24 #include "../unit_test/unit_test.h" 25 #include "libyuv/planar_functions.h" 26 #include "libyuv/rotate.h" 27 #include "libyuv/video_common.h" 28 29 namespace libyuv { 30 31 #define SUBSAMPLE(v, a) ((((v) + (a)-1)) / (a)) 32 33 #define TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 34 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \ 35 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ 36 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 37 const int kHeight = benchmark_height_; \ 38 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 39 align_buffer_page_end(src_u, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ 40 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \ 41 OFF); \ 42 align_buffer_page_end(src_v, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ 43 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \ 44 OFF); \ 45 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 46 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 47 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 48 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 49 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 50 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 51 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 52 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 53 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 54 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 55 for (int i = 0; i < kHeight; ++i) \ 56 for (int j = 0; j < kWidth; ++j) \ 57 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ 58 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ 59 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ 60 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ 61 (fastrand() & 0xff); \ 62 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ 63 (fastrand() & 0xff); \ 64 } \ 65 } \ 66 memset(dst_y_c, 1, kWidth* kHeight); \ 67 memset(dst_u_c, 2, \ 68 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 69 memset(dst_v_c, 3, \ 70 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 71 memset(dst_y_opt, 101, kWidth* kHeight); \ 72 memset(dst_u_opt, 102, \ 73 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 74 memset(dst_v_opt, 103, \ 75 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 76 MaskCpuFlags(disable_cpu_flags_); \ 77 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 78 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 79 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_c, kWidth, \ 80 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \ 81 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 82 MaskCpuFlags(benchmark_cpu_info_); \ 83 for (int i = 0; i < benchmark_iterations_; ++i) { \ 84 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 85 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 86 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_opt, kWidth, \ 87 dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_opt, \ 88 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 89 } \ 90 int max_diff = 0; \ 91 for (int i = 0; i < kHeight; ++i) { \ 92 for (int j = 0; j < kWidth; ++j) { \ 93 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \ 94 static_cast<int>(dst_y_opt[i * kWidth + j])); \ 95 if (abs_diff > max_diff) { \ 96 max_diff = abs_diff; \ 97 } \ 98 } \ 99 } \ 100 EXPECT_EQ(0, max_diff); \ 101 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 102 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 103 int abs_diff = abs( \ 104 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 105 static_cast<int>( \ 106 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 107 if (abs_diff > max_diff) { \ 108 max_diff = abs_diff; \ 109 } \ 110 } \ 111 } \ 112 EXPECT_LE(max_diff, 3); \ 113 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 114 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 115 int abs_diff = abs( \ 116 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 117 static_cast<int>( \ 118 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 119 if (abs_diff > max_diff) { \ 120 max_diff = abs_diff; \ 121 } \ 122 } \ 123 } \ 124 EXPECT_LE(max_diff, 3); \ 125 free_aligned_buffer_page_end(dst_y_c); \ 126 free_aligned_buffer_page_end(dst_u_c); \ 127 free_aligned_buffer_page_end(dst_v_c); \ 128 free_aligned_buffer_page_end(dst_y_opt); \ 129 free_aligned_buffer_page_end(dst_u_opt); \ 130 free_aligned_buffer_page_end(dst_v_opt); \ 131 free_aligned_buffer_page_end(src_y); \ 132 free_aligned_buffer_page_end(src_u); \ 133 free_aligned_buffer_page_end(src_v); \ 134 } 135 136 #define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 137 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ 138 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 139 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0) \ 140 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 141 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1) \ 142 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 143 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0) \ 144 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 145 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0) 146 147 TESTPLANARTOP(I420, 2, 2, I420, 2, 2) 148 TESTPLANARTOP(I422, 2, 1, I420, 2, 2) 149 TESTPLANARTOP(I444, 1, 1, I420, 2, 2) 150 TESTPLANARTOP(I420, 2, 2, I422, 2, 1) 151 TESTPLANARTOP(I420, 2, 2, I444, 1, 1) 152 TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2) 153 TESTPLANARTOP(I422, 2, 1, I422, 2, 1) 154 TESTPLANARTOP(I444, 1, 1, I444, 1, 1) 155 156 // Test Android 420 to I420 157 #define TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, \ 158 SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 159 W1280, N, NEG, OFF, PN, OFF_U, OFF_V) \ 160 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##_##PN##N) { \ 161 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 162 const int kHeight = benchmark_height_; \ 163 const int kSizeUV = \ 164 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); \ 165 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 166 align_buffer_page_end(src_uv, \ 167 kSizeUV*((PIXEL_STRIDE == 3) ? 3 : 2) + OFF); \ 168 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 169 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 170 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 171 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 172 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 173 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 174 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 175 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 176 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 177 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 178 uint8* src_u = src_uv + OFF_U; \ 179 uint8* src_v = src_uv + (PIXEL_STRIDE == 1 ? kSizeUV : OFF_V); \ 180 int src_stride_uv = SUBSAMPLE(kWidth, SUBSAMP_X) * PIXEL_STRIDE; \ 181 for (int i = 0; i < kHeight; ++i) \ 182 for (int j = 0; j < kWidth; ++j) \ 183 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ 184 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ 185 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ 186 src_u[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \ 187 (fastrand() & 0xff); \ 188 src_v[(i * src_stride_uv) + j * PIXEL_STRIDE + OFF] = \ 189 (fastrand() & 0xff); \ 190 } \ 191 } \ 192 memset(dst_y_c, 1, kWidth* kHeight); \ 193 memset(dst_u_c, 2, \ 194 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 195 memset(dst_v_c, 3, \ 196 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 197 memset(dst_y_opt, 101, kWidth* kHeight); \ 198 memset(dst_u_opt, 102, \ 199 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 200 memset(dst_v_opt, 103, \ 201 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 202 MaskCpuFlags(disable_cpu_flags_); \ 203 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 204 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 205 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, dst_y_c, \ 206 kWidth, dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \ 207 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 208 MaskCpuFlags(benchmark_cpu_info_); \ 209 for (int i = 0; i < benchmark_iterations_; ++i) { \ 210 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 211 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 212 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), PIXEL_STRIDE, \ 213 dst_y_opt, kWidth, dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ 214 dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 215 } \ 216 int max_diff = 0; \ 217 for (int i = 0; i < kHeight; ++i) { \ 218 for (int j = 0; j < kWidth; ++j) { \ 219 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \ 220 static_cast<int>(dst_y_opt[i * kWidth + j])); \ 221 if (abs_diff > max_diff) { \ 222 max_diff = abs_diff; \ 223 } \ 224 } \ 225 } \ 226 EXPECT_EQ(0, max_diff); \ 227 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 228 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 229 int abs_diff = abs( \ 230 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 231 static_cast<int>( \ 232 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 233 if (abs_diff > max_diff) { \ 234 max_diff = abs_diff; \ 235 } \ 236 } \ 237 } \ 238 EXPECT_LE(max_diff, 3); \ 239 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 240 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 241 int abs_diff = abs( \ 242 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 243 static_cast<int>( \ 244 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 245 if (abs_diff > max_diff) { \ 246 max_diff = abs_diff; \ 247 } \ 248 } \ 249 } \ 250 EXPECT_LE(max_diff, 3); \ 251 free_aligned_buffer_page_end(dst_y_c); \ 252 free_aligned_buffer_page_end(dst_u_c); \ 253 free_aligned_buffer_page_end(dst_v_c); \ 254 free_aligned_buffer_page_end(dst_y_opt); \ 255 free_aligned_buffer_page_end(dst_u_opt); \ 256 free_aligned_buffer_page_end(dst_v_opt); \ 257 free_aligned_buffer_page_end(src_y); \ 258 free_aligned_buffer_page_end(src_uv); \ 259 } 260 261 #define TESTAPLANARTOP(SRC_FMT_PLANAR, PN, PIXEL_STRIDE, OFF_U, OFF_V, \ 262 SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, SUBSAMP_X, \ 263 SUBSAMP_Y) \ 264 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 265 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, \ 266 _Any, +, 0, PN, OFF_U, OFF_V) \ 267 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 268 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, \ 269 _Unaligned, +, 1, PN, OFF_U, OFF_V) \ 270 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 271 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, \ 272 -, 0, PN, OFF_U, OFF_V) \ 273 TESTAPLANARTOPI(SRC_FMT_PLANAR, PIXEL_STRIDE, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 274 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, \ 275 0, PN, OFF_U, OFF_V) 276 277 TESTAPLANARTOP(Android420, I420, 1, 0, 0, 2, 2, I420, 2, 2) 278 TESTAPLANARTOP(Android420, NV12, 2, 0, 1, 2, 2, I420, 2, 2) 279 TESTAPLANARTOP(Android420, NV21, 2, 1, 0, 2, 2, I420, 2, 2) 280 281 #define TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 282 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \ 283 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ 284 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 285 const int kHeight = benchmark_height_; \ 286 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 287 align_buffer_page_end(src_u, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ 288 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \ 289 OFF); \ 290 align_buffer_page_end(src_v, SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ 291 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \ 292 OFF); \ 293 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 294 align_buffer_page_end(dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ 295 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 296 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 297 align_buffer_page_end(dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ 298 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 299 for (int i = 0; i < kHeight; ++i) \ 300 for (int j = 0; j < kWidth; ++j) \ 301 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ 302 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ 303 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ 304 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ 305 (fastrand() & 0xff); \ 306 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ 307 (fastrand() & 0xff); \ 308 } \ 309 } \ 310 memset(dst_y_c, 1, kWidth* kHeight); \ 311 memset(dst_uv_c, 2, \ 312 SUBSAMPLE(kWidth * 2, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 313 memset(dst_y_opt, 101, kWidth* kHeight); \ 314 memset(dst_uv_opt, 102, \ 315 SUBSAMPLE(kWidth * 2, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 316 MaskCpuFlags(disable_cpu_flags_); \ 317 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 318 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 319 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_c, kWidth, \ 320 dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X), kWidth, NEG kHeight); \ 321 MaskCpuFlags(benchmark_cpu_info_); \ 322 for (int i = 0; i < benchmark_iterations_; ++i) { \ 323 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 324 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ 325 src_v + OFF, SUBSAMPLE(kWidth, SRC_SUBSAMP_X), dst_y_opt, kWidth, \ 326 dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X), kWidth, NEG kHeight); \ 327 } \ 328 int max_diff = 0; \ 329 for (int i = 0; i < kHeight; ++i) { \ 330 for (int j = 0; j < kWidth; ++j) { \ 331 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \ 332 static_cast<int>(dst_y_opt[i * kWidth + j])); \ 333 if (abs_diff > max_diff) { \ 334 max_diff = abs_diff; \ 335 } \ 336 } \ 337 } \ 338 EXPECT_LE(max_diff, 1); \ 339 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 340 for (int j = 0; j < SUBSAMPLE(kWidth * 2, SUBSAMP_X); ++j) { \ 341 int abs_diff = \ 342 abs(static_cast<int>( \ 343 dst_uv_c[i * SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j]) - \ 344 static_cast<int>( \ 345 dst_uv_opt[i * SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j])); \ 346 if (abs_diff > max_diff) { \ 347 max_diff = abs_diff; \ 348 } \ 349 } \ 350 } \ 351 EXPECT_LE(max_diff, 1); \ 352 free_aligned_buffer_page_end(dst_y_c); \ 353 free_aligned_buffer_page_end(dst_uv_c); \ 354 free_aligned_buffer_page_end(dst_y_opt); \ 355 free_aligned_buffer_page_end(dst_uv_opt); \ 356 free_aligned_buffer_page_end(src_y); \ 357 free_aligned_buffer_page_end(src_u); \ 358 free_aligned_buffer_page_end(src_v); \ 359 } 360 361 #define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 362 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ 363 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 364 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0) \ 365 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 366 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1) \ 367 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 368 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0) \ 369 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 370 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0) 371 372 TESTPLANARTOBP(I420, 2, 2, NV12, 2, 2) 373 TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2) 374 375 #define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 376 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF, \ 377 DOY) \ 378 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ 379 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 380 const int kHeight = benchmark_height_; \ 381 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 382 align_buffer_page_end(src_uv, 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ 383 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + \ 384 OFF); \ 385 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 386 align_buffer_page_end(dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 387 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 388 align_buffer_page_end(dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 389 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 390 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 391 align_buffer_page_end(dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 392 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 393 align_buffer_page_end(dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * \ 394 SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 395 for (int i = 0; i < kHeight; ++i) \ 396 for (int j = 0; j < kWidth; ++j) \ 397 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ 398 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ 399 for (int j = 0; j < 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ 400 src_uv[(i * 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ 401 (fastrand() & 0xff); \ 402 } \ 403 } \ 404 memset(dst_y_c, 1, kWidth* kHeight); \ 405 memset(dst_u_c, 2, \ 406 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 407 memset(dst_v_c, 3, \ 408 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 409 memset(dst_y_opt, 101, kWidth* kHeight); \ 410 memset(dst_u_opt, 102, \ 411 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 412 memset(dst_v_opt, 103, \ 413 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 414 MaskCpuFlags(disable_cpu_flags_); \ 415 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 416 src_y + OFF, kWidth, src_uv + OFF, \ 417 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), DOY ? dst_y_c : NULL, kWidth, \ 418 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_c, \ 419 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 420 MaskCpuFlags(benchmark_cpu_info_); \ 421 for (int i = 0; i < benchmark_iterations_; ++i) { \ 422 SRC_FMT_PLANAR##To##FMT_PLANAR( \ 423 src_y + OFF, kWidth, src_uv + OFF, \ 424 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), DOY ? dst_y_opt : NULL, \ 425 kWidth, dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), dst_v_opt, \ 426 SUBSAMPLE(kWidth, SUBSAMP_X), kWidth, NEG kHeight); \ 427 } \ 428 int max_diff = 0; \ 429 if (DOY) { \ 430 for (int i = 0; i < kHeight; ++i) { \ 431 for (int j = 0; j < kWidth; ++j) { \ 432 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \ 433 static_cast<int>(dst_y_opt[i * kWidth + j])); \ 434 if (abs_diff > max_diff) { \ 435 max_diff = abs_diff; \ 436 } \ 437 } \ 438 } \ 439 EXPECT_LE(max_diff, 1); \ 440 } \ 441 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 442 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 443 int abs_diff = abs( \ 444 static_cast<int>(dst_u_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 445 static_cast<int>( \ 446 dst_u_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 447 if (abs_diff > max_diff) { \ 448 max_diff = abs_diff; \ 449 } \ 450 } \ 451 } \ 452 EXPECT_LE(max_diff, 1); \ 453 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 454 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ 455 int abs_diff = abs( \ 456 static_cast<int>(dst_v_c[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ 457 static_cast<int>( \ 458 dst_v_opt[i * SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ 459 if (abs_diff > max_diff) { \ 460 max_diff = abs_diff; \ 461 } \ 462 } \ 463 } \ 464 EXPECT_LE(max_diff, 1); \ 465 free_aligned_buffer_page_end(dst_y_c); \ 466 free_aligned_buffer_page_end(dst_u_c); \ 467 free_aligned_buffer_page_end(dst_v_c); \ 468 free_aligned_buffer_page_end(dst_y_opt); \ 469 free_aligned_buffer_page_end(dst_u_opt); \ 470 free_aligned_buffer_page_end(dst_v_opt); \ 471 free_aligned_buffer_page_end(src_y); \ 472 free_aligned_buffer_page_end(src_uv); \ 473 } 474 475 #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ 476 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ 477 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 478 SUBSAMP_X, SUBSAMP_Y, benchmark_width_ - 4, _Any, +, 0, 1) \ 479 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 480 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Unaligned, +, 1, \ 481 1) \ 482 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 483 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Invert, -, 0, 1) \ 484 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 485 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _Opt, +, 0, 1) \ 486 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, FMT_PLANAR, \ 487 SUBSAMP_X, SUBSAMP_Y, benchmark_width_, _NullY, +, 0, 0) 488 489 TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2) 490 TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2) 491 492 #define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN)) 493 494 #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 495 YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \ 496 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \ 497 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 498 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ 499 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \ 500 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 501 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \ 502 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 503 align_buffer_page_end(src_u, kSizeUV + OFF); \ 504 align_buffer_page_end(src_v, kSizeUV + OFF); \ 505 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \ 506 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \ 507 for (int i = 0; i < kWidth * kHeight; ++i) { \ 508 src_y[i + OFF] = (fastrand() & 0xff); \ 509 } \ 510 for (int i = 0; i < kSizeUV; ++i) { \ 511 src_u[i + OFF] = (fastrand() & 0xff); \ 512 src_v[i + OFF] = (fastrand() & 0xff); \ 513 } \ 514 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \ 515 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \ 516 MaskCpuFlags(disable_cpu_flags_); \ 517 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 518 src_v + OFF, kStrideUV, dst_argb_c + OFF, kStrideB, \ 519 kWidth, NEG kHeight); \ 520 MaskCpuFlags(benchmark_cpu_info_); \ 521 for (int i = 0; i < benchmark_iterations_; ++i) { \ 522 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 523 src_v + OFF, kStrideUV, dst_argb_opt + OFF, \ 524 kStrideB, kWidth, NEG kHeight); \ 525 } \ 526 int max_diff = 0; \ 527 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \ 528 align_buffer_page_end(dst_argb32_c, kWidth* BPP_C* kHeight); \ 529 align_buffer_page_end(dst_argb32_opt, kWidth* BPP_C* kHeight); \ 530 memset(dst_argb32_c, 2, kWidth* BPP_C* kHeight); \ 531 memset(dst_argb32_opt, 102, kWidth* BPP_C* kHeight); \ 532 FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, dst_argb32_c, kWidth * BPP_C, \ 533 kWidth, kHeight); \ 534 FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, dst_argb32_opt, \ 535 kWidth * BPP_C, kWidth, kHeight); \ 536 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \ 537 int abs_diff = abs(static_cast<int>(dst_argb32_c[i]) - \ 538 static_cast<int>(dst_argb32_opt[i])); \ 539 if (abs_diff > max_diff) { \ 540 max_diff = abs_diff; \ 541 } \ 542 } \ 543 EXPECT_LE(max_diff, DIFF); \ 544 free_aligned_buffer_page_end(src_y); \ 545 free_aligned_buffer_page_end(src_u); \ 546 free_aligned_buffer_page_end(src_v); \ 547 free_aligned_buffer_page_end(dst_argb_c); \ 548 free_aligned_buffer_page_end(dst_argb_opt); \ 549 free_aligned_buffer_page_end(dst_argb32_c); \ 550 free_aligned_buffer_page_end(dst_argb32_opt); \ 551 } 552 553 #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 554 YALIGN, DIFF, FMT_C, BPP_C) \ 555 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 556 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \ 557 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 558 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, \ 559 BPP_C) \ 560 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 561 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \ 562 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 563 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C) 564 565 TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4) 566 TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4) 567 TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4) 568 TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4) 569 TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4) 570 TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1, 2, ARGB, 4) 571 TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4) 572 TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1, 2, ARGB, 4) 573 TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1, 2, ARGB, 4) 574 TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1, 2, ARGB, 4) 575 TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4) 576 TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1, 9, ARGB, 4) 577 TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1, 17, ARGB, 4) 578 TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4) 579 TESTPLANARTOB(I422, 2, 1, RGB565, 2, 2, 1, 9, ARGB, 4) 580 TESTPLANARTOB(J422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4) 581 TESTPLANARTOB(J422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4) 582 TESTPLANARTOB(H422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4) 583 TESTPLANARTOB(H422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4) 584 TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1, 2, ARGB, 4) 585 TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4) 586 TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1, 2, ARGB, 4) 587 TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4) 588 TESTPLANARTOB(J444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4) 589 TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1, 2, ARGB, 4) 590 TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1, 1, ARGB, 4) 591 TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1, 1, ARGB, 4) 592 TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1, 0, ARGB, 4) 593 TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1, 0, ARGB, 4) 594 TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1, 0, ARGB, 4) 595 TESTPLANARTOB(J420, 2, 2, J400, 1, 1, 1, 0, ARGB, 4) 596 597 #define TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 598 YALIGN, W1280, DIFF, N, NEG, OFF, ATTEN) \ 599 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \ 600 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 601 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ 602 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \ 603 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 604 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \ 605 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 606 align_buffer_page_end(src_u, kSizeUV + OFF); \ 607 align_buffer_page_end(src_v, kSizeUV + OFF); \ 608 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \ 609 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \ 610 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \ 611 for (int i = 0; i < kWidth * kHeight; ++i) { \ 612 src_y[i + OFF] = (fastrand() & 0xff); \ 613 src_a[i + OFF] = (fastrand() & 0xff); \ 614 } \ 615 for (int i = 0; i < kSizeUV; ++i) { \ 616 src_u[i + OFF] = (fastrand() & 0xff); \ 617 src_v[i + OFF] = (fastrand() & 0xff); \ 618 } \ 619 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \ 620 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \ 621 MaskCpuFlags(disable_cpu_flags_); \ 622 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 623 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \ 624 dst_argb_c + OFF, kStrideB, kWidth, NEG kHeight, \ 625 ATTEN); \ 626 MaskCpuFlags(benchmark_cpu_info_); \ 627 for (int i = 0; i < benchmark_iterations_; ++i) { \ 628 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 629 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \ 630 dst_argb_opt + OFF, kStrideB, kWidth, NEG kHeight, \ 631 ATTEN); \ 632 } \ 633 int max_diff = 0; \ 634 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \ 635 int abs_diff = abs(static_cast<int>(dst_argb_c[i + OFF]) - \ 636 static_cast<int>(dst_argb_opt[i + OFF])); \ 637 if (abs_diff > max_diff) { \ 638 max_diff = abs_diff; \ 639 } \ 640 } \ 641 EXPECT_LE(max_diff, DIFF); \ 642 free_aligned_buffer_page_end(src_y); \ 643 free_aligned_buffer_page_end(src_u); \ 644 free_aligned_buffer_page_end(src_v); \ 645 free_aligned_buffer_page_end(src_a); \ 646 free_aligned_buffer_page_end(dst_argb_c); \ 647 free_aligned_buffer_page_end(dst_argb_opt); \ 648 } 649 650 #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 651 YALIGN, DIFF) \ 652 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 653 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, 0) \ 654 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 655 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, 0) \ 656 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 657 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, 0) \ 658 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 659 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, 0) \ 660 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 661 YALIGN, benchmark_width_, DIFF, _Premult, +, 0, 1) 662 663 TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1, 2) 664 TESTQPLANARTOB(I420Alpha, 2, 2, ABGR, 4, 4, 1, 2) 665 666 #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ 667 W1280, DIFF, N, NEG, OFF) \ 668 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \ 669 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 670 const int kHeight = benchmark_height_; \ 671 const int kStrideB = kWidth * BPP_B; \ 672 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 673 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 674 align_buffer_page_end(src_uv, \ 675 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \ 676 align_buffer_page_end(dst_argb_c, kStrideB* kHeight); \ 677 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight); \ 678 for (int i = 0; i < kHeight; ++i) \ 679 for (int j = 0; j < kWidth; ++j) \ 680 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \ 681 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 682 for (int j = 0; j < kStrideUV * 2; ++j) { \ 683 src_uv[i * kStrideUV * 2 + j + OFF] = (fastrand() & 0xff); \ 684 } \ 685 } \ 686 memset(dst_argb_c, 1, kStrideB* kHeight); \ 687 memset(dst_argb_opt, 101, kStrideB* kHeight); \ 688 MaskCpuFlags(disable_cpu_flags_); \ 689 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \ 690 dst_argb_c, kWidth * BPP_B, kWidth, NEG kHeight); \ 691 MaskCpuFlags(benchmark_cpu_info_); \ 692 for (int i = 0; i < benchmark_iterations_; ++i) { \ 693 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \ 694 dst_argb_opt, kWidth * BPP_B, kWidth, \ 695 NEG kHeight); \ 696 } \ 697 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \ 698 align_buffer_page_end(dst_argb32_c, kWidth * 4 * kHeight); \ 699 align_buffer_page_end(dst_argb32_opt, kWidth * 4 * kHeight); \ 700 memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \ 701 memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \ 702 FMT_B##ToARGB(dst_argb_c, kStrideB, dst_argb32_c, kWidth * 4, kWidth, \ 703 kHeight); \ 704 FMT_B##ToARGB(dst_argb_opt, kStrideB, dst_argb32_opt, kWidth * 4, kWidth, \ 705 kHeight); \ 706 int max_diff = 0; \ 707 for (int i = 0; i < kHeight; ++i) { \ 708 for (int j = 0; j < kWidth * 4; ++j) { \ 709 int abs_diff = \ 710 abs(static_cast<int>(dst_argb32_c[i * kWidth * 4 + j]) - \ 711 static_cast<int>(dst_argb32_opt[i * kWidth * 4 + j])); \ 712 if (abs_diff > max_diff) { \ 713 max_diff = abs_diff; \ 714 } \ 715 } \ 716 } \ 717 EXPECT_LE(max_diff, DIFF); \ 718 free_aligned_buffer_page_end(src_y); \ 719 free_aligned_buffer_page_end(src_uv); \ 720 free_aligned_buffer_page_end(dst_argb_c); \ 721 free_aligned_buffer_page_end(dst_argb_opt); \ 722 free_aligned_buffer_page_end(dst_argb32_c); \ 723 free_aligned_buffer_page_end(dst_argb32_opt); \ 724 } 725 726 #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \ 727 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ 728 benchmark_width_ - 4, DIFF, _Any, +, 0) \ 729 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ 730 benchmark_width_, DIFF, _Unaligned, +, 1) \ 731 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ 732 benchmark_width_, DIFF, _Invert, -, 0) \ 733 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ 734 benchmark_width_, DIFF, _Opt, +, 0) 735 736 TESTBIPLANARTOB(NV12, 2, 2, ARGB, 4, 2) 737 TESTBIPLANARTOB(NV21, 2, 2, ARGB, 4, 2) 738 TESTBIPLANARTOB(NV12, 2, 2, ABGR, 4, 2) 739 TESTBIPLANARTOB(NV21, 2, 2, ABGR, 4, 2) 740 TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9) 741 742 #ifdef DO_THREE_PLANES 743 // Do 3 allocations for yuv. conventional but slower. 744 #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 745 W1280, DIFF, N, NEG, OFF) \ 746 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \ 747 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 748 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ 749 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 750 const int kStride = (kStrideUV * SUBSAMP_X * 8 * BPP_A + 7) / 8; \ 751 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \ 752 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 753 align_buffer_page_end(dst_u_c, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 754 align_buffer_page_end(dst_v_c, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 755 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 756 align_buffer_page_end(dst_u_opt, \ 757 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 758 align_buffer_page_end(dst_v_opt, \ 759 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 760 memset(dst_y_c, 1, kWidth* kHeight); \ 761 memset(dst_u_c, 2, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 762 memset(dst_v_c, 3, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 763 memset(dst_y_opt, 101, kWidth* kHeight); \ 764 memset(dst_u_opt, 102, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 765 memset(dst_v_opt, 103, kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 766 for (int i = 0; i < kHeight; ++i) \ 767 for (int j = 0; j < kStride; ++j) \ 768 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \ 769 MaskCpuFlags(disable_cpu_flags_); \ 770 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_u_c, \ 771 kStrideUV, dst_v_c, kStrideUV, kWidth, NEG kHeight); \ 772 MaskCpuFlags(benchmark_cpu_info_); \ 773 for (int i = 0; i < benchmark_iterations_; ++i) { \ 774 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \ 775 dst_u_opt, kStrideUV, dst_v_opt, kStrideUV, \ 776 kWidth, NEG kHeight); \ 777 } \ 778 for (int i = 0; i < kHeight; ++i) { \ 779 for (int j = 0; j < kWidth; ++j) { \ 780 EXPECT_NEAR(static_cast<int>(dst_y_c[i * kWidth + j]), \ 781 static_cast<int>(dst_y_opt[i * kWidth + j]), DIFF); \ 782 } \ 783 } \ 784 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 785 for (int j = 0; j < kStrideUV; ++j) { \ 786 EXPECT_NEAR(static_cast<int>(dst_u_c[i * kStrideUV + j]), \ 787 static_cast<int>(dst_u_opt[i * kStrideUV + j]), DIFF); \ 788 } \ 789 } \ 790 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 791 for (int j = 0; j < kStrideUV; ++j) { \ 792 EXPECT_NEAR(static_cast<int>(dst_v_c[i * kStrideUV + j]), \ 793 static_cast<int>(dst_v_opt[i * kStrideUV + j]), DIFF); \ 794 } \ 795 } \ 796 free_aligned_buffer_page_end(dst_y_c); \ 797 free_aligned_buffer_page_end(dst_u_c); \ 798 free_aligned_buffer_page_end(dst_v_c); \ 799 free_aligned_buffer_page_end(dst_y_opt); \ 800 free_aligned_buffer_page_end(dst_u_opt); \ 801 free_aligned_buffer_page_end(dst_v_opt); \ 802 free_aligned_buffer_page_end(src_argb); \ 803 } 804 #else 805 #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 806 W1280, DIFF, N, NEG, OFF) \ 807 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \ 808 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 809 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ 810 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 811 const int kStride = (kStrideUV * SUBSAMP_X * 8 * BPP_A + 7) / 8; \ 812 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \ 813 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 814 align_buffer_page_end(dst_uv_c, \ 815 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 816 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 817 align_buffer_page_end(dst_uv_opt, \ 818 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 819 memset(dst_y_c, 1, kWidth* kHeight); \ 820 memset(dst_uv_c, 2, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 821 memset(dst_y_opt, 101, kWidth* kHeight); \ 822 memset(dst_uv_opt, 102, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 823 for (int i = 0; i < kHeight; ++i) \ 824 for (int j = 0; j < kStride; ++j) \ 825 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \ 826 MaskCpuFlags(disable_cpu_flags_); \ 827 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_uv_c, \ 828 kStrideUV * 2, dst_uv_c + kStrideUV, kStrideUV * 2, \ 829 kWidth, NEG kHeight); \ 830 MaskCpuFlags(benchmark_cpu_info_); \ 831 for (int i = 0; i < benchmark_iterations_; ++i) { \ 832 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \ 833 dst_uv_opt, kStrideUV * 2, dst_uv_opt + kStrideUV, \ 834 kStrideUV * 2, kWidth, NEG kHeight); \ 835 } \ 836 for (int i = 0; i < kHeight; ++i) { \ 837 for (int j = 0; j < kWidth; ++j) { \ 838 EXPECT_NEAR(static_cast<int>(dst_y_c[i * kWidth + j]), \ 839 static_cast<int>(dst_y_opt[i * kWidth + j]), DIFF); \ 840 } \ 841 } \ 842 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y) * 2; ++i) { \ 843 for (int j = 0; j < kStrideUV; ++j) { \ 844 EXPECT_NEAR(static_cast<int>(dst_uv_c[i * kStrideUV + j]), \ 845 static_cast<int>(dst_uv_opt[i * kStrideUV + j]), DIFF); \ 846 } \ 847 } \ 848 free_aligned_buffer_page_end(dst_y_c); \ 849 free_aligned_buffer_page_end(dst_uv_c); \ 850 free_aligned_buffer_page_end(dst_y_opt); \ 851 free_aligned_buffer_page_end(dst_uv_opt); \ 852 free_aligned_buffer_page_end(src_argb); \ 853 } 854 #endif 855 856 #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 857 DIFF) \ 858 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 859 benchmark_width_ - 4, DIFF, _Any, +, 0) \ 860 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 861 benchmark_width_, DIFF, _Unaligned, +, 1) \ 862 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 863 benchmark_width_, DIFF, _Invert, -, 0) \ 864 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 865 benchmark_width_, DIFF, _Opt, +, 0) 866 867 TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4) 868 #if defined(__arm__) || defined(__aarch64__) 869 // arm version subsamples by summing 4 pixels then multiplying by matrix with 870 // 4x smaller coefficients which are rounded to nearest integer. 871 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4) 872 TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 4) 873 #else 874 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0) 875 TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 0) 876 #endif 877 TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4) 878 TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4) 879 TESTATOPLANAR(RGBA, 4, 1, I420, 2, 2, 4) 880 TESTATOPLANAR(RAW, 3, 1, I420, 2, 2, 4) 881 TESTATOPLANAR(RGB24, 3, 1, I420, 2, 2, 4) 882 TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2, 5) 883 // TODO(fbarchard): Make 1555 neon work same as C code, reduce to diff 9. 884 TESTATOPLANAR(ARGB1555, 2, 1, I420, 2, 2, 15) 885 TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2, 17) 886 TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1, 2) 887 TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1, 2) 888 TESTATOPLANAR(YUY2, 2, 1, I420, 2, 2, 2) 889 TESTATOPLANAR(UYVY, 2, 1, I420, 2, 2, 2) 890 TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1, 2) 891 TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2) 892 TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2) 893 TESTATOPLANAR(J400, 1, 1, J420, 2, 2, 2) 894 895 #define TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, \ 896 SUBSAMP_Y, W1280, N, NEG, OFF) \ 897 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \ 898 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 899 const int kHeight = benchmark_height_; \ 900 const int kStride = SUBSAMPLE(kWidth, SUB_A) * BPP_A; \ 901 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 902 align_buffer_page_end(src_argb, kStride* kHeight + OFF); \ 903 align_buffer_page_end(dst_y_c, kWidth* kHeight); \ 904 align_buffer_page_end(dst_uv_c, \ 905 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 906 align_buffer_page_end(dst_y_opt, kWidth* kHeight); \ 907 align_buffer_page_end(dst_uv_opt, \ 908 kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 909 for (int i = 0; i < kHeight; ++i) \ 910 for (int j = 0; j < kStride; ++j) \ 911 src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \ 912 memset(dst_y_c, 1, kWidth* kHeight); \ 913 memset(dst_uv_c, 2, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 914 memset(dst_y_opt, 101, kWidth* kHeight); \ 915 memset(dst_uv_opt, 102, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ 916 MaskCpuFlags(disable_cpu_flags_); \ 917 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_c, kWidth, dst_uv_c, \ 918 kStrideUV * 2, kWidth, NEG kHeight); \ 919 MaskCpuFlags(benchmark_cpu_info_); \ 920 for (int i = 0; i < benchmark_iterations_; ++i) { \ 921 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, dst_y_opt, kWidth, \ 922 dst_uv_opt, kStrideUV * 2, kWidth, NEG kHeight); \ 923 } \ 924 int max_diff = 0; \ 925 for (int i = 0; i < kHeight; ++i) { \ 926 for (int j = 0; j < kWidth; ++j) { \ 927 int abs_diff = abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \ 928 static_cast<int>(dst_y_opt[i * kWidth + j])); \ 929 if (abs_diff > max_diff) { \ 930 max_diff = abs_diff; \ 931 } \ 932 } \ 933 } \ 934 EXPECT_LE(max_diff, 4); \ 935 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ 936 for (int j = 0; j < kStrideUV * 2; ++j) { \ 937 int abs_diff = \ 938 abs(static_cast<int>(dst_uv_c[i * kStrideUV * 2 + j]) - \ 939 static_cast<int>(dst_uv_opt[i * kStrideUV * 2 + j])); \ 940 if (abs_diff > max_diff) { \ 941 max_diff = abs_diff; \ 942 } \ 943 } \ 944 } \ 945 EXPECT_LE(max_diff, 4); \ 946 free_aligned_buffer_page_end(dst_y_c); \ 947 free_aligned_buffer_page_end(dst_uv_c); \ 948 free_aligned_buffer_page_end(dst_y_opt); \ 949 free_aligned_buffer_page_end(dst_uv_opt); \ 950 free_aligned_buffer_page_end(src_argb); \ 951 } 952 953 #define TESTATOBIPLANAR(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ 954 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 955 benchmark_width_ - 4, _Any, +, 0) \ 956 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 957 benchmark_width_, _Unaligned, +, 1) \ 958 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 959 benchmark_width_, _Invert, -, 0) \ 960 TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ 961 benchmark_width_, _Opt, +, 0) 962 963 TESTATOBIPLANAR(ARGB, 1, 4, NV12, 2, 2) 964 TESTATOBIPLANAR(ARGB, 1, 4, NV21, 2, 2) 965 TESTATOBIPLANAR(YUY2, 2, 4, NV12, 2, 2) 966 TESTATOBIPLANAR(UYVY, 2, 4, NV12, 2, 2) 967 968 #define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 969 HEIGHT_B, W1280, DIFF, N, NEG, OFF) \ 970 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \ 971 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 972 const int kHeight = benchmark_height_; \ 973 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ 974 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ 975 const int kStrideA = \ 976 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ 977 const int kStrideB = \ 978 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ 979 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \ 980 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \ 981 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \ 982 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 983 src_argb[i + OFF] = (fastrand() & 0xff); \ 984 } \ 985 memset(dst_argb_c, 1, kStrideB* kHeightB); \ 986 memset(dst_argb_opt, 101, kStrideB* kHeightB); \ 987 MaskCpuFlags(disable_cpu_flags_); \ 988 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, dst_argb_c, kStrideB, kWidth, \ 989 NEG kHeight); \ 990 MaskCpuFlags(benchmark_cpu_info_); \ 991 for (int i = 0; i < benchmark_iterations_; ++i) { \ 992 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, dst_argb_opt, kStrideB, \ 993 kWidth, NEG kHeight); \ 994 } \ 995 int max_diff = 0; \ 996 for (int i = 0; i < kStrideB * kHeightB; ++i) { \ 997 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \ 998 static_cast<int>(dst_argb_opt[i])); \ 999 if (abs_diff > max_diff) { \ 1000 max_diff = abs_diff; \ 1001 } \ 1002 } \ 1003 EXPECT_LE(max_diff, DIFF); \ 1004 free_aligned_buffer_page_end(src_argb); \ 1005 free_aligned_buffer_page_end(dst_argb_c); \ 1006 free_aligned_buffer_page_end(dst_argb_opt); \ 1007 } 1008 1009 #define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, \ 1010 STRIDE_B, HEIGHT_B, DIFF) \ 1011 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##_Random) { \ 1012 for (int times = 0; times < benchmark_iterations_; ++times) { \ 1013 const int kWidth = (fastrand() & 63) + 1; \ 1014 const int kHeight = (fastrand() & 31) + 1; \ 1015 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ 1016 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ 1017 const int kStrideA = \ 1018 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ 1019 const int kStrideB = \ 1020 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ 1021 align_buffer_page_end(src_argb, kStrideA* kHeightA); \ 1022 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \ 1023 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \ 1024 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 1025 src_argb[i] = (fastrand() & 0xff); \ 1026 } \ 1027 memset(dst_argb_c, 123, kStrideB* kHeightB); \ 1028 memset(dst_argb_opt, 123, kStrideB* kHeightB); \ 1029 MaskCpuFlags(disable_cpu_flags_); \ 1030 FMT_A##To##FMT_B(src_argb, kStrideA, dst_argb_c, kStrideB, kWidth, \ 1031 kHeight); \ 1032 MaskCpuFlags(benchmark_cpu_info_); \ 1033 FMT_A##To##FMT_B(src_argb, kStrideA, dst_argb_opt, kStrideB, kWidth, \ 1034 kHeight); \ 1035 int max_diff = 0; \ 1036 for (int i = 0; i < kStrideB * kHeightB; ++i) { \ 1037 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \ 1038 static_cast<int>(dst_argb_opt[i])); \ 1039 if (abs_diff > max_diff) { \ 1040 max_diff = abs_diff; \ 1041 } \ 1042 } \ 1043 EXPECT_LE(max_diff, DIFF); \ 1044 free_aligned_buffer_page_end(src_argb); \ 1045 free_aligned_buffer_page_end(dst_argb_c); \ 1046 free_aligned_buffer_page_end(dst_argb_opt); \ 1047 } \ 1048 } 1049 1050 #define TESTATOB(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1051 HEIGHT_B, DIFF) \ 1052 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1053 HEIGHT_B, benchmark_width_ - 4, DIFF, _Any, +, 0) \ 1054 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1055 HEIGHT_B, benchmark_width_, DIFF, _Unaligned, +, 1) \ 1056 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1057 HEIGHT_B, benchmark_width_, DIFF, _Invert, -, 0) \ 1058 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1059 HEIGHT_B, benchmark_width_, DIFF, _Opt, +, 0) \ 1060 TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1061 HEIGHT_B, DIFF) 1062 1063 TESTATOB(ARGB, 4, 4, 1, ARGB, 4, 4, 1, 0) 1064 TESTATOB(ARGB, 4, 4, 1, BGRA, 4, 4, 1, 0) 1065 TESTATOB(ARGB, 4, 4, 1, ABGR, 4, 4, 1, 0) 1066 TESTATOB(ARGB, 4, 4, 1, RGBA, 4, 4, 1, 0) 1067 TESTATOB(ARGB, 4, 4, 1, RAW, 3, 3, 1, 0) 1068 TESTATOB(ARGB, 4, 4, 1, RGB24, 3, 3, 1, 0) 1069 TESTATOB(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0) 1070 TESTATOB(ARGB, 4, 4, 1, ARGB1555, 2, 2, 1, 0) 1071 TESTATOB(ARGB, 4, 4, 1, ARGB4444, 2, 2, 1, 0) 1072 TESTATOB(ARGB, 4, 4, 1, YUY2, 2, 4, 1, 4) 1073 TESTATOB(ARGB, 4, 4, 1, UYVY, 2, 4, 1, 4) 1074 TESTATOB(ARGB, 4, 4, 1, I400, 1, 1, 1, 2) 1075 TESTATOB(ARGB, 4, 4, 1, J400, 1, 1, 1, 2) 1076 TESTATOB(BGRA, 4, 4, 1, ARGB, 4, 4, 1, 0) 1077 TESTATOB(ABGR, 4, 4, 1, ARGB, 4, 4, 1, 0) 1078 TESTATOB(RGBA, 4, 4, 1, ARGB, 4, 4, 1, 0) 1079 TESTATOB(RAW, 3, 3, 1, ARGB, 4, 4, 1, 0) 1080 TESTATOB(RAW, 3, 3, 1, RGB24, 3, 3, 1, 0) 1081 TESTATOB(RGB24, 3, 3, 1, ARGB, 4, 4, 1, 0) 1082 TESTATOB(RGB565, 2, 2, 1, ARGB, 4, 4, 1, 0) 1083 TESTATOB(ARGB1555, 2, 2, 1, ARGB, 4, 4, 1, 0) 1084 TESTATOB(ARGB4444, 2, 2, 1, ARGB, 4, 4, 1, 0) 1085 TESTATOB(YUY2, 2, 4, 1, ARGB, 4, 4, 1, 4) 1086 TESTATOB(UYVY, 2, 4, 1, ARGB, 4, 4, 1, 4) 1087 TESTATOB(YUY2, 2, 4, 1, Y, 1, 1, 1, 0) 1088 TESTATOB(I400, 1, 1, 1, ARGB, 4, 4, 1, 0) 1089 TESTATOB(J400, 1, 1, 1, ARGB, 4, 4, 1, 0) 1090 TESTATOB(I400, 1, 1, 1, I400, 1, 1, 1, 0) 1091 TESTATOB(J400, 1, 1, 1, J400, 1, 1, 1, 0) 1092 TESTATOB(I400, 1, 1, 1, I400Mirror, 1, 1, 1, 0) 1093 TESTATOB(ARGB, 4, 4, 1, ARGBMirror, 4, 4, 1, 0) 1094 1095 #define TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1096 HEIGHT_B, W1280, DIFF, N, NEG, OFF) \ 1097 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither##N) { \ 1098 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 1099 const int kHeight = benchmark_height_; \ 1100 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ 1101 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ 1102 const int kStrideA = \ 1103 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ 1104 const int kStrideB = \ 1105 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ 1106 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \ 1107 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \ 1108 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \ 1109 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 1110 src_argb[i + OFF] = (fastrand() & 0xff); \ 1111 } \ 1112 memset(dst_argb_c, 1, kStrideB* kHeightB); \ 1113 memset(dst_argb_opt, 101, kStrideB* kHeightB); \ 1114 MaskCpuFlags(disable_cpu_flags_); \ 1115 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_c, kStrideB, \ 1116 NULL, kWidth, NEG kHeight); \ 1117 MaskCpuFlags(benchmark_cpu_info_); \ 1118 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1119 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_opt, \ 1120 kStrideB, NULL, kWidth, NEG kHeight); \ 1121 } \ 1122 int max_diff = 0; \ 1123 for (int i = 0; i < kStrideB * kHeightB; ++i) { \ 1124 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \ 1125 static_cast<int>(dst_argb_opt[i])); \ 1126 if (abs_diff > max_diff) { \ 1127 max_diff = abs_diff; \ 1128 } \ 1129 } \ 1130 EXPECT_LE(max_diff, DIFF); \ 1131 free_aligned_buffer_page_end(src_argb); \ 1132 free_aligned_buffer_page_end(dst_argb_c); \ 1133 free_aligned_buffer_page_end(dst_argb_opt); \ 1134 } 1135 1136 #define TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, \ 1137 STRIDE_B, HEIGHT_B, DIFF) \ 1138 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither_Random) { \ 1139 for (int times = 0; times < benchmark_iterations_; ++times) { \ 1140 const int kWidth = (fastrand() & 63) + 1; \ 1141 const int kHeight = (fastrand() & 31) + 1; \ 1142 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ 1143 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ 1144 const int kStrideA = \ 1145 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ 1146 const int kStrideB = \ 1147 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ 1148 align_buffer_page_end(src_argb, kStrideA* kHeightA); \ 1149 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \ 1150 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \ 1151 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 1152 src_argb[i] = (fastrand() & 0xff); \ 1153 } \ 1154 memset(dst_argb_c, 123, kStrideB* kHeightB); \ 1155 memset(dst_argb_opt, 123, kStrideB* kHeightB); \ 1156 MaskCpuFlags(disable_cpu_flags_); \ 1157 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_c, kStrideB, NULL, \ 1158 kWidth, kHeight); \ 1159 MaskCpuFlags(benchmark_cpu_info_); \ 1160 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_opt, kStrideB, \ 1161 NULL, kWidth, kHeight); \ 1162 int max_diff = 0; \ 1163 for (int i = 0; i < kStrideB * kHeightB; ++i) { \ 1164 int abs_diff = abs(static_cast<int>(dst_argb_c[i]) - \ 1165 static_cast<int>(dst_argb_opt[i])); \ 1166 if (abs_diff > max_diff) { \ 1167 max_diff = abs_diff; \ 1168 } \ 1169 } \ 1170 EXPECT_LE(max_diff, DIFF); \ 1171 free_aligned_buffer_page_end(src_argb); \ 1172 free_aligned_buffer_page_end(dst_argb_c); \ 1173 free_aligned_buffer_page_end(dst_argb_opt); \ 1174 } \ 1175 } 1176 1177 #define TESTATOBD(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1178 HEIGHT_B, DIFF) \ 1179 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1180 HEIGHT_B, benchmark_width_ - 4, DIFF, _Any, +, 0) \ 1181 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1182 HEIGHT_B, benchmark_width_, DIFF, _Unaligned, +, 1) \ 1183 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1184 HEIGHT_B, benchmark_width_, DIFF, _Invert, -, 0) \ 1185 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1186 HEIGHT_B, benchmark_width_, DIFF, _Opt, +, 0) \ 1187 TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \ 1188 HEIGHT_B, DIFF) 1189 1190 TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0) 1191 1192 #define TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, W1280, N, NEG, OFF) \ 1193 TEST_F(LibYUVConvertTest, FMT_ATOB##_Symetric##N) { \ 1194 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 1195 const int kHeight = benchmark_height_; \ 1196 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ 1197 const int kStrideA = \ 1198 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ 1199 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \ 1200 align_buffer_page_end(dst_argb_c, kStrideA* kHeightA); \ 1201 align_buffer_page_end(dst_argb_opt, kStrideA* kHeightA); \ 1202 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 1203 src_argb[i + OFF] = (fastrand() & 0xff); \ 1204 } \ 1205 memset(dst_argb_c, 1, kStrideA* kHeightA); \ 1206 memset(dst_argb_opt, 101, kStrideA* kHeightA); \ 1207 MaskCpuFlags(disable_cpu_flags_); \ 1208 FMT_ATOB(src_argb + OFF, kStrideA, dst_argb_c, kStrideA, kWidth, \ 1209 NEG kHeight); \ 1210 MaskCpuFlags(benchmark_cpu_info_); \ 1211 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1212 FMT_ATOB(src_argb + OFF, kStrideA, dst_argb_opt, kStrideA, kWidth, \ 1213 NEG kHeight); \ 1214 } \ 1215 MaskCpuFlags(disable_cpu_flags_); \ 1216 FMT_ATOB(dst_argb_c, kStrideA, dst_argb_c, kStrideA, kWidth, NEG kHeight); \ 1217 MaskCpuFlags(benchmark_cpu_info_); \ 1218 FMT_ATOB(dst_argb_opt, kStrideA, dst_argb_opt, kStrideA, kWidth, \ 1219 NEG kHeight); \ 1220 for (int i = 0; i < kStrideA * kHeightA; ++i) { \ 1221 EXPECT_EQ(src_argb[i + OFF], dst_argb_opt[i]); \ 1222 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \ 1223 } \ 1224 free_aligned_buffer_page_end(src_argb); \ 1225 free_aligned_buffer_page_end(dst_argb_c); \ 1226 free_aligned_buffer_page_end(dst_argb_opt); \ 1227 } 1228 1229 #define TESTSYM(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A) \ 1230 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_ - 4, _Any, +, \ 1231 0) \ 1232 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, _Unaligned, \ 1233 +, 1) \ 1234 TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, _Opt, +, 0) 1235 1236 TESTSYM(ARGBToARGB, 4, 4, 1) 1237 TESTSYM(ARGBToBGRA, 4, 4, 1) 1238 TESTSYM(ARGBToABGR, 4, 4, 1) 1239 TESTSYM(BGRAToARGB, 4, 4, 1) 1240 TESTSYM(ABGRToARGB, 4, 4, 1) 1241 1242 TEST_F(LibYUVConvertTest, Test565) { 1243 SIMD_ALIGNED(uint8 orig_pixels[256][4]); 1244 SIMD_ALIGNED(uint8 pixels565[256][2]); 1245 1246 for (int i = 0; i < 256; ++i) { 1247 for (int j = 0; j < 4; ++j) { 1248 orig_pixels[i][j] = i; 1249 } 1250 } 1251 ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1); 1252 uint32 checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381); 1253 EXPECT_EQ(610919429u, checksum); 1254 } 1255 1256 #ifdef HAVE_JPEG 1257 TEST_F(LibYUVConvertTest, ValidateJpeg) { 1258 const int kOff = 10; 1259 const int kMinJpeg = 64; 1260 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg 1261 ? benchmark_width_ * benchmark_height_ 1262 : kMinJpeg; 1263 const int kSize = kImageSize + kOff; 1264 align_buffer_page_end(orig_pixels, kSize); 1265 1266 // No SOI or EOI. Expect fail. 1267 memset(orig_pixels, 0, kSize); 1268 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); 1269 1270 // Test special value that matches marker start. 1271 memset(orig_pixels, 0xff, kSize); 1272 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); 1273 1274 // EOI, SOI. Expect pass. 1275 orig_pixels[0] = 0xff; 1276 orig_pixels[1] = 0xd8; // SOI. 1277 orig_pixels[kSize - kOff + 0] = 0xff; 1278 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. 1279 for (int times = 0; times < benchmark_iterations_; ++times) { 1280 EXPECT_TRUE(ValidateJpeg(orig_pixels, kSize)); 1281 } 1282 free_aligned_buffer_page_end(orig_pixels); 1283 } 1284 1285 TEST_F(LibYUVConvertTest, ValidateJpegLarge) { 1286 const int kOff = 10; 1287 const int kMinJpeg = 64; 1288 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg 1289 ? benchmark_width_ * benchmark_height_ 1290 : kMinJpeg; 1291 const int kSize = kImageSize + kOff; 1292 const int kMultiple = 10; 1293 const int kBufSize = kImageSize * kMultiple + kOff; 1294 align_buffer_page_end(orig_pixels, kBufSize); 1295 1296 // No SOI or EOI. Expect fail. 1297 memset(orig_pixels, 0, kBufSize); 1298 EXPECT_FALSE(ValidateJpeg(orig_pixels, kBufSize)); 1299 1300 // EOI, SOI. Expect pass. 1301 orig_pixels[0] = 0xff; 1302 orig_pixels[1] = 0xd8; // SOI. 1303 orig_pixels[kSize - kOff + 0] = 0xff; 1304 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. 1305 for (int times = 0; times < benchmark_iterations_; ++times) { 1306 EXPECT_TRUE(ValidateJpeg(orig_pixels, kBufSize)); 1307 } 1308 free_aligned_buffer_page_end(orig_pixels); 1309 } 1310 1311 TEST_F(LibYUVConvertTest, InvalidateJpeg) { 1312 const int kOff = 10; 1313 const int kMinJpeg = 64; 1314 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg 1315 ? benchmark_width_ * benchmark_height_ 1316 : kMinJpeg; 1317 const int kSize = kImageSize + kOff; 1318 align_buffer_page_end(orig_pixels, kSize); 1319 1320 // NULL pointer. Expect fail. 1321 EXPECT_FALSE(ValidateJpeg(NULL, kSize)); 1322 1323 // Negative size. Expect fail. 1324 EXPECT_FALSE(ValidateJpeg(orig_pixels, -1)); 1325 1326 // Too large size. Expect fail. 1327 EXPECT_FALSE(ValidateJpeg(orig_pixels, 0xfb000000ull)); 1328 1329 // No SOI or EOI. Expect fail. 1330 memset(orig_pixels, 0, kSize); 1331 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); 1332 1333 // SOI but no EOI. Expect fail. 1334 orig_pixels[0] = 0xff; 1335 orig_pixels[1] = 0xd8; // SOI. 1336 for (int times = 0; times < benchmark_iterations_; ++times) { 1337 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); 1338 } 1339 1340 // EOI but no SOI. Expect fail. 1341 orig_pixels[0] = 0; 1342 orig_pixels[1] = 0; 1343 orig_pixels[kSize - kOff + 0] = 0xff; 1344 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. 1345 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); 1346 1347 free_aligned_buffer_page_end(orig_pixels); 1348 } 1349 1350 TEST_F(LibYUVConvertTest, FuzzJpeg) { 1351 // SOI but no EOI. Expect fail. 1352 for (int times = 0; times < benchmark_iterations_; ++times) { 1353 const int kSize = fastrand() % 5000 + 2; 1354 align_buffer_page_end(orig_pixels, kSize); 1355 MemRandomize(orig_pixels, kSize); 1356 1357 // Add SOI so frame will be scanned. 1358 orig_pixels[0] = 0xff; 1359 orig_pixels[1] = 0xd8; // SOI. 1360 orig_pixels[kSize - 1] = 0xff; 1361 ValidateJpeg(orig_pixels, kSize); // Failure normally expected. 1362 free_aligned_buffer_page_end(orig_pixels); 1363 } 1364 } 1365 1366 TEST_F(LibYUVConvertTest, MJPGToI420) { 1367 const int kOff = 10; 1368 const int kMinJpeg = 64; 1369 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg 1370 ? benchmark_width_ * benchmark_height_ 1371 : kMinJpeg; 1372 const int kSize = kImageSize + kOff; 1373 align_buffer_page_end(orig_pixels, kSize); 1374 align_buffer_page_end(dst_y_opt, benchmark_width_ * benchmark_height_); 1375 align_buffer_page_end(dst_u_opt, SUBSAMPLE(benchmark_width_, 2) * 1376 SUBSAMPLE(benchmark_height_, 2)); 1377 align_buffer_page_end(dst_v_opt, SUBSAMPLE(benchmark_width_, 2) * 1378 SUBSAMPLE(benchmark_height_, 2)); 1379 1380 // EOI, SOI to make MJPG appear valid. 1381 memset(orig_pixels, 0, kSize); 1382 orig_pixels[0] = 0xff; 1383 orig_pixels[1] = 0xd8; // SOI. 1384 orig_pixels[kSize - kOff + 0] = 0xff; 1385 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. 1386 1387 for (int times = 0; times < benchmark_iterations_; ++times) { 1388 int ret = 1389 MJPGToI420(orig_pixels, kSize, dst_y_opt, benchmark_width_, dst_u_opt, 1390 SUBSAMPLE(benchmark_width_, 2), dst_v_opt, 1391 SUBSAMPLE(benchmark_width_, 2), benchmark_width_, 1392 benchmark_height_, benchmark_width_, benchmark_height_); 1393 // Expect failure because image is not really valid. 1394 EXPECT_EQ(1, ret); 1395 } 1396 1397 free_aligned_buffer_page_end(dst_y_opt); 1398 free_aligned_buffer_page_end(dst_u_opt); 1399 free_aligned_buffer_page_end(dst_v_opt); 1400 free_aligned_buffer_page_end(orig_pixels); 1401 } 1402 1403 TEST_F(LibYUVConvertTest, MJPGToARGB) { 1404 const int kOff = 10; 1405 const int kMinJpeg = 64; 1406 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg 1407 ? benchmark_width_ * benchmark_height_ 1408 : kMinJpeg; 1409 const int kSize = kImageSize + kOff; 1410 align_buffer_page_end(orig_pixels, kSize); 1411 align_buffer_page_end(dst_argb_opt, benchmark_width_ * benchmark_height_ * 4); 1412 1413 // EOI, SOI to make MJPG appear valid. 1414 memset(orig_pixels, 0, kSize); 1415 orig_pixels[0] = 0xff; 1416 orig_pixels[1] = 0xd8; // SOI. 1417 orig_pixels[kSize - kOff + 0] = 0xff; 1418 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. 1419 1420 for (int times = 0; times < benchmark_iterations_; ++times) { 1421 int ret = MJPGToARGB(orig_pixels, kSize, dst_argb_opt, benchmark_width_ * 4, 1422 benchmark_width_, benchmark_height_, benchmark_width_, 1423 benchmark_height_); 1424 // Expect failure because image is not really valid. 1425 EXPECT_EQ(1, ret); 1426 } 1427 1428 free_aligned_buffer_page_end(dst_argb_opt); 1429 free_aligned_buffer_page_end(orig_pixels); 1430 } 1431 1432 #endif // HAVE_JPEG 1433 1434 TEST_F(LibYUVConvertTest, NV12Crop) { 1435 const int SUBSAMP_X = 2; 1436 const int SUBSAMP_Y = 2; 1437 const int kWidth = benchmark_width_; 1438 const int kHeight = benchmark_height_; 1439 const int crop_y = 1440 ((benchmark_height_ - (benchmark_height_ * 360 / 480)) / 2 + 1) & ~1; 1441 const int kDestWidth = benchmark_width_; 1442 const int kDestHeight = benchmark_height_ - crop_y * 2; 1443 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); 1444 const int sample_size = 1445 kWidth * kHeight + kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y) * 2; 1446 align_buffer_page_end(src_y, sample_size); 1447 uint8* src_uv = src_y + kWidth * kHeight; 1448 1449 align_buffer_page_end(dst_y, kDestWidth * kDestHeight); 1450 align_buffer_page_end(dst_u, SUBSAMPLE(kDestWidth, SUBSAMP_X) * 1451 SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1452 align_buffer_page_end(dst_v, SUBSAMPLE(kDestWidth, SUBSAMP_X) * 1453 SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1454 1455 align_buffer_page_end(dst_y_2, kDestWidth * kDestHeight); 1456 align_buffer_page_end(dst_u_2, SUBSAMPLE(kDestWidth, SUBSAMP_X) * 1457 SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1458 align_buffer_page_end(dst_v_2, SUBSAMPLE(kDestWidth, SUBSAMP_X) * 1459 SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1460 1461 for (int i = 0; i < kHeight * kWidth; ++i) { 1462 src_y[i] = (fastrand() & 0xff); 1463 } 1464 for (int i = 0; i < (SUBSAMPLE(kHeight, SUBSAMP_Y) * kStrideUV) * 2; ++i) { 1465 src_uv[i] = (fastrand() & 0xff); 1466 } 1467 memset(dst_y, 1, kDestWidth * kDestHeight); 1468 memset(dst_u, 2, 1469 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1470 memset(dst_v, 3, 1471 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1472 memset(dst_y_2, 1, kDestWidth * kDestHeight); 1473 memset(dst_u_2, 2, 1474 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1475 memset(dst_v_2, 3, 1476 SUBSAMPLE(kDestWidth, SUBSAMP_X) * SUBSAMPLE(kDestHeight, SUBSAMP_Y)); 1477 1478 ConvertToI420(src_y, sample_size, dst_y_2, kDestWidth, dst_u_2, 1479 SUBSAMPLE(kDestWidth, SUBSAMP_X), dst_v_2, 1480 SUBSAMPLE(kDestWidth, SUBSAMP_X), 0, crop_y, kWidth, kHeight, 1481 kDestWidth, kDestHeight, libyuv::kRotate0, libyuv::FOURCC_NV12); 1482 1483 NV12ToI420(src_y + crop_y * kWidth, kWidth, 1484 src_uv + (crop_y / 2) * kStrideUV * 2, kStrideUV * 2, dst_y, 1485 kDestWidth, dst_u, SUBSAMPLE(kDestWidth, SUBSAMP_X), dst_v, 1486 SUBSAMPLE(kDestWidth, SUBSAMP_X), kDestWidth, kDestHeight); 1487 1488 for (int i = 0; i < kDestHeight; ++i) { 1489 for (int j = 0; j < kDestWidth; ++j) { 1490 EXPECT_EQ(dst_y[i * kWidth + j], dst_y_2[i * kWidth + j]); 1491 } 1492 } 1493 for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) { 1494 for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) { 1495 EXPECT_EQ(dst_u[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j], 1496 dst_u_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]); 1497 } 1498 } 1499 for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) { 1500 for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) { 1501 EXPECT_EQ(dst_v[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j], 1502 dst_v_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]); 1503 } 1504 } 1505 free_aligned_buffer_page_end(dst_y); 1506 free_aligned_buffer_page_end(dst_u); 1507 free_aligned_buffer_page_end(dst_v); 1508 free_aligned_buffer_page_end(dst_y_2); 1509 free_aligned_buffer_page_end(dst_u_2); 1510 free_aligned_buffer_page_end(dst_v_2); 1511 free_aligned_buffer_page_end(src_y); 1512 } 1513 1514 TEST_F(LibYUVConvertTest, TestYToARGB) { 1515 uint8 y[32]; 1516 uint8 expectedg[32]; 1517 for (int i = 0; i < 32; ++i) { 1518 y[i] = i * 5 + 17; 1519 expectedg[i] = static_cast<int>((y[i] - 16) * 1.164f + 0.5f); 1520 } 1521 uint8 argb[32 * 4]; 1522 YToARGB(y, 0, argb, 0, 32, 1); 1523 1524 for (int i = 0; i < 32; ++i) { 1525 printf("%2d %d: %d <-> %d,%d,%d,%d\n", i, y[i], expectedg[i], 1526 argb[i * 4 + 0], argb[i * 4 + 1], argb[i * 4 + 2], argb[i * 4 + 3]); 1527 } 1528 for (int i = 0; i < 32; ++i) { 1529 EXPECT_EQ(expectedg[i], argb[i * 4 + 0]); 1530 } 1531 } 1532 1533 static const uint8 kNoDither4x4[16] = { 1534 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1535 }; 1536 1537 TEST_F(LibYUVConvertTest, TestNoDither) { 1538 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4); 1539 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2); 1540 align_buffer_page_end(dst_rgb565dither, 1541 benchmark_width_ * benchmark_height_ * 2); 1542 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4); 1543 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2); 1544 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2); 1545 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2, 1546 benchmark_width_, benchmark_height_); 1547 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither, 1548 benchmark_width_ * 2, kNoDither4x4, benchmark_width_, 1549 benchmark_height_); 1550 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 2; ++i) { 1551 EXPECT_EQ(dst_rgb565[i], dst_rgb565dither[i]); 1552 } 1553 1554 free_aligned_buffer_page_end(src_argb); 1555 free_aligned_buffer_page_end(dst_rgb565); 1556 free_aligned_buffer_page_end(dst_rgb565dither); 1557 } 1558 1559 // Ordered 4x4 dither for 888 to 565. Values from 0 to 7. 1560 static const uint8 kDither565_4x4[16] = { 1561 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2, 1562 }; 1563 1564 TEST_F(LibYUVConvertTest, TestDither) { 1565 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4); 1566 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2); 1567 align_buffer_page_end(dst_rgb565dither, 1568 benchmark_width_ * benchmark_height_ * 2); 1569 align_buffer_page_end(dst_argb, benchmark_width_ * benchmark_height_ * 4); 1570 align_buffer_page_end(dst_argbdither, 1571 benchmark_width_ * benchmark_height_ * 4); 1572 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4); 1573 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2); 1574 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2); 1575 MemRandomize(dst_argb, benchmark_width_ * benchmark_height_ * 4); 1576 MemRandomize(dst_argbdither, benchmark_width_ * benchmark_height_ * 4); 1577 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2, 1578 benchmark_width_, benchmark_height_); 1579 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither, 1580 benchmark_width_ * 2, kDither565_4x4, benchmark_width_, 1581 benchmark_height_); 1582 RGB565ToARGB(dst_rgb565, benchmark_width_ * 2, dst_argb, benchmark_width_ * 4, 1583 benchmark_width_, benchmark_height_); 1584 RGB565ToARGB(dst_rgb565dither, benchmark_width_ * 2, dst_argbdither, 1585 benchmark_width_ * 4, benchmark_width_, benchmark_height_); 1586 1587 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 4; ++i) { 1588 EXPECT_NEAR(dst_argb[i], dst_argbdither[i], 9); 1589 } 1590 free_aligned_buffer_page_end(src_argb); 1591 free_aligned_buffer_page_end(dst_rgb565); 1592 free_aligned_buffer_page_end(dst_rgb565dither); 1593 free_aligned_buffer_page_end(dst_argb); 1594 free_aligned_buffer_page_end(dst_argbdither); 1595 } 1596 1597 #define TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1598 YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \ 1599 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##Dither##N) { \ 1600 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 1601 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ 1602 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \ 1603 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 1604 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \ 1605 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 1606 align_buffer_page_end(src_u, kSizeUV + OFF); \ 1607 align_buffer_page_end(src_v, kSizeUV + OFF); \ 1608 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \ 1609 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \ 1610 for (int i = 0; i < kWidth * kHeight; ++i) { \ 1611 src_y[i + OFF] = (fastrand() & 0xff); \ 1612 } \ 1613 for (int i = 0; i < kSizeUV; ++i) { \ 1614 src_u[i + OFF] = (fastrand() & 0xff); \ 1615 src_v[i + OFF] = (fastrand() & 0xff); \ 1616 } \ 1617 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \ 1618 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \ 1619 MaskCpuFlags(disable_cpu_flags_); \ 1620 FMT_PLANAR##To##FMT_B##Dither(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 1621 src_v + OFF, kStrideUV, dst_argb_c + OFF, \ 1622 kStrideB, NULL, kWidth, NEG kHeight); \ 1623 MaskCpuFlags(benchmark_cpu_info_); \ 1624 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1625 FMT_PLANAR##To##FMT_B##Dither( \ 1626 src_y + OFF, kWidth, src_u + OFF, kStrideUV, src_v + OFF, kStrideUV, \ 1627 dst_argb_opt + OFF, kStrideB, NULL, kWidth, NEG kHeight); \ 1628 } \ 1629 int max_diff = 0; \ 1630 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \ 1631 align_buffer_page_end(dst_argb32_c, kWidth* BPP_C* kHeight); \ 1632 align_buffer_page_end(dst_argb32_opt, kWidth* BPP_C* kHeight); \ 1633 memset(dst_argb32_c, 2, kWidth* BPP_C* kHeight); \ 1634 memset(dst_argb32_opt, 102, kWidth* BPP_C* kHeight); \ 1635 FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, dst_argb32_c, kWidth * BPP_C, \ 1636 kWidth, kHeight); \ 1637 FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, dst_argb32_opt, \ 1638 kWidth * BPP_C, kWidth, kHeight); \ 1639 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \ 1640 int abs_diff = abs(static_cast<int>(dst_argb32_c[i]) - \ 1641 static_cast<int>(dst_argb32_opt[i])); \ 1642 if (abs_diff > max_diff) { \ 1643 max_diff = abs_diff; \ 1644 } \ 1645 } \ 1646 EXPECT_LE(max_diff, DIFF); \ 1647 free_aligned_buffer_page_end(src_y); \ 1648 free_aligned_buffer_page_end(src_u); \ 1649 free_aligned_buffer_page_end(src_v); \ 1650 free_aligned_buffer_page_end(dst_argb_c); \ 1651 free_aligned_buffer_page_end(dst_argb_opt); \ 1652 free_aligned_buffer_page_end(dst_argb32_c); \ 1653 free_aligned_buffer_page_end(dst_argb32_opt); \ 1654 } 1655 1656 #define TESTPLANARTOBD(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1657 YALIGN, DIFF, FMT_C, BPP_C) \ 1658 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1659 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, \ 1660 BPP_C) \ 1661 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1662 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, \ 1663 BPP_C) \ 1664 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1665 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \ 1666 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ 1667 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C) 1668 1669 TESTPLANARTOBD(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4) 1670 1671 #define TESTPTOB(NAME, UYVYTOI420, UYVYTONV12) \ 1672 TEST_F(LibYUVConvertTest, NAME) { \ 1673 const int kWidth = benchmark_width_; \ 1674 const int kHeight = benchmark_height_; \ 1675 \ 1676 align_buffer_page_end(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2) * kHeight); \ 1677 align_buffer_page_end(orig_y, kWidth* kHeight); \ 1678 align_buffer_page_end(orig_u, \ 1679 SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \ 1680 align_buffer_page_end(orig_v, \ 1681 SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \ 1682 \ 1683 align_buffer_page_end(dst_y_orig, kWidth* kHeight); \ 1684 align_buffer_page_end(dst_uv_orig, \ 1685 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \ 1686 \ 1687 align_buffer_page_end(dst_y, kWidth* kHeight); \ 1688 align_buffer_page_end(dst_uv, \ 1689 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2)); \ 1690 \ 1691 MemRandomize(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2) * kHeight); \ 1692 \ 1693 /* Convert UYVY to NV12 in 2 steps for reference */ \ 1694 libyuv::UYVYTOI420(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), orig_y, kWidth, \ 1695 orig_u, SUBSAMPLE(kWidth, 2), orig_v, \ 1696 SUBSAMPLE(kWidth, 2), kWidth, kHeight); \ 1697 libyuv::I420ToNV12(orig_y, kWidth, orig_u, SUBSAMPLE(kWidth, 2), orig_v, \ 1698 SUBSAMPLE(kWidth, 2), dst_y_orig, kWidth, dst_uv_orig, \ 1699 2 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); \ 1700 \ 1701 /* Convert to NV12 */ \ 1702 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1703 libyuv::UYVYTONV12(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), dst_y, kWidth, \ 1704 dst_uv, 2 * SUBSAMPLE(kWidth, 2), kWidth, kHeight); \ 1705 } \ 1706 \ 1707 for (int i = 0; i < kWidth * kHeight; ++i) { \ 1708 EXPECT_EQ(orig_y[i], dst_y[i]); \ 1709 } \ 1710 for (int i = 0; i < kWidth * kHeight; ++i) { \ 1711 EXPECT_EQ(dst_y_orig[i], dst_y[i]); \ 1712 } \ 1713 for (int i = 0; i < 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2); \ 1714 ++i) { \ 1715 EXPECT_EQ(dst_uv_orig[i], dst_uv[i]); \ 1716 } \ 1717 \ 1718 free_aligned_buffer_page_end(orig_uyvy); \ 1719 free_aligned_buffer_page_end(orig_y); \ 1720 free_aligned_buffer_page_end(orig_u); \ 1721 free_aligned_buffer_page_end(orig_v); \ 1722 free_aligned_buffer_page_end(dst_y_orig); \ 1723 free_aligned_buffer_page_end(dst_uv_orig); \ 1724 free_aligned_buffer_page_end(dst_y); \ 1725 free_aligned_buffer_page_end(dst_uv); \ 1726 } 1727 1728 TESTPTOB(TestYUY2ToNV12, YUY2ToI420, YUY2ToNV12) 1729 TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12) 1730 1731 #define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1732 W1280, N, NEG, OFF, FMT_C, BPP_C) \ 1733 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \ 1734 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 1735 const int kHeight = benchmark_height_; \ 1736 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \ 1737 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \ 1738 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \ 1739 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 1740 align_buffer_page_end(src_u, kSizeUV + OFF); \ 1741 align_buffer_page_end(src_v, kSizeUV + OFF); \ 1742 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \ 1743 for (int i = 0; i < kWidth * kHeight; ++i) { \ 1744 src_y[i + OFF] = (fastrand() & 0xff); \ 1745 } \ 1746 for (int i = 0; i < kSizeUV; ++i) { \ 1747 src_u[i + OFF] = (fastrand() & 0xff); \ 1748 src_v[i + OFF] = (fastrand() & 0xff); \ 1749 } \ 1750 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \ 1751 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1752 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 1753 src_v + OFF, kStrideUV, dst_argb_b + OFF, \ 1754 kStrideB, kWidth, NEG kHeight); \ 1755 } \ 1756 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \ 1757 const int kStrideC = kWidth * BPP_C; \ 1758 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \ 1759 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \ 1760 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \ 1761 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \ 1762 FMT_PLANAR##To##FMT_C(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \ 1763 src_v + OFF, kStrideUV, dst_argb_c + OFF, kStrideC, \ 1764 kWidth, NEG kHeight); \ 1765 /* Convert B to C */ \ 1766 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, kStrideC, \ 1767 kWidth, kHeight); \ 1768 for (int i = 0; i < kStrideC * kHeight; ++i) { \ 1769 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \ 1770 } \ 1771 free_aligned_buffer_page_end(src_y); \ 1772 free_aligned_buffer_page_end(src_u); \ 1773 free_aligned_buffer_page_end(src_v); \ 1774 free_aligned_buffer_page_end(dst_argb_b); \ 1775 free_aligned_buffer_page_end(dst_argb_c); \ 1776 free_aligned_buffer_page_end(dst_argb_bc); \ 1777 } 1778 1779 #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1780 FMT_C, BPP_C) \ 1781 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1782 benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C) \ 1783 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1784 benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \ 1785 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1786 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \ 1787 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1788 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C) 1789 1790 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4) 1791 TESTPLANARTOE(J420, 2, 2, ARGB, 1, 4, ARGB, 4) 1792 TESTPLANARTOE(J420, 2, 2, ABGR, 1, 4, ARGB, 4) 1793 TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, ARGB, 4) 1794 TESTPLANARTOE(H420, 2, 2, ABGR, 1, 4, ARGB, 4) 1795 TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4) 1796 TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4) 1797 TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4) 1798 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4) 1799 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3) 1800 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3) 1801 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3) 1802 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4) 1803 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2) 1804 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2) 1805 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2) 1806 TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, RGB565, 2) 1807 TESTPLANARTOE(J422, 2, 1, ARGB, 1, 4, ARGB, 4) 1808 TESTPLANARTOE(J422, 2, 1, ABGR, 1, 4, ARGB, 4) 1809 TESTPLANARTOE(H422, 2, 1, ARGB, 1, 4, ARGB, 4) 1810 TESTPLANARTOE(H422, 2, 1, ABGR, 1, 4, ARGB, 4) 1811 TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4) 1812 TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4) 1813 TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4) 1814 TESTPLANARTOE(I444, 1, 1, ARGB, 1, 4, ARGB, 4) 1815 TESTPLANARTOE(J444, 1, 1, ARGB, 1, 4, ARGB, 4) 1816 TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4) 1817 TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4) 1818 TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4) 1819 TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4) 1820 TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4) 1821 1822 #define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1823 W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \ 1824 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \ 1825 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ 1826 const int kHeight = benchmark_height_; \ 1827 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \ 1828 const int kSizeUV = \ 1829 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \ 1830 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \ 1831 align_buffer_page_end(src_u, kSizeUV + OFF); \ 1832 align_buffer_page_end(src_v, kSizeUV + OFF); \ 1833 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \ 1834 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \ 1835 for (int i = 0; i < kWidth * kHeight; ++i) { \ 1836 src_y[i + OFF] = (fastrand() & 0xff); \ 1837 src_a[i + OFF] = (fastrand() & 0xff); \ 1838 } \ 1839 for (int i = 0; i < kSizeUV; ++i) { \ 1840 src_u[i + OFF] = (fastrand() & 0xff); \ 1841 src_v[i + OFF] = (fastrand() & 0xff); \ 1842 } \ 1843 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \ 1844 for (int i = 0; i < benchmark_iterations_; ++i) { \ 1845 FMT_PLANAR##To##FMT_B( \ 1846 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ 1847 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \ 1848 dst_argb_b + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \ 1849 } \ 1850 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \ 1851 const int kStrideC = kWidth * BPP_C; \ 1852 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \ 1853 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \ 1854 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \ 1855 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \ 1856 FMT_PLANAR##To##FMT_C( \ 1857 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ 1858 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \ 1859 dst_argb_c + OFF, kStrideC, kWidth, NEG kHeight, ATTEN); \ 1860 /* Convert B to C */ \ 1861 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, kStrideC, \ 1862 kWidth, kHeight); \ 1863 for (int i = 0; i < kStrideC * kHeight; ++i) { \ 1864 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \ 1865 } \ 1866 free_aligned_buffer_page_end(src_y); \ 1867 free_aligned_buffer_page_end(src_u); \ 1868 free_aligned_buffer_page_end(src_v); \ 1869 free_aligned_buffer_page_end(src_a); \ 1870 free_aligned_buffer_page_end(dst_argb_b); \ 1871 free_aligned_buffer_page_end(dst_argb_c); \ 1872 free_aligned_buffer_page_end(dst_argb_bc); \ 1873 } 1874 1875 #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1876 FMT_C, BPP_C) \ 1877 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1878 benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C, 0) \ 1879 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1880 benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C, 0) \ 1881 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1882 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \ 1883 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1884 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \ 1885 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \ 1886 benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1) 1887 1888 TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4) 1889 TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4) 1890 1891 TEST_F(LibYUVConvertTest, RotateWithARGBSource) { 1892 // 2x2 frames 1893 uint32_t src[4]; 1894 uint32_t dst[4]; 1895 // some random input 1896 src[0] = 0x11000000; 1897 src[1] = 0x00450000; 1898 src[2] = 0x00009f00; 1899 src[3] = 0x000000ff; 1900 // zeros on destination 1901 dst[0] = 0x00000000; 1902 dst[1] = 0x00000000; 1903 dst[2] = 0x00000000; 1904 dst[3] = 0x00000000; 1905 1906 int r = ConvertToARGB(reinterpret_cast<uint8_t*>(src), 1907 16, // input size 1908 reinterpret_cast<uint8_t*>(dst), 1909 8, // destination stride 1910 0, // crop_x 1911 0, // crop_y 1912 2, // width 1913 2, // height 1914 2, // crop width 1915 2, // crop height 1916 kRotate90, FOURCC_ARGB); 1917 1918 EXPECT_EQ(r, 0); 1919 // 90 degrees rotation, no conversion 1920 EXPECT_EQ(dst[0], src[2]); 1921 EXPECT_EQ(dst[1], src[0]); 1922 EXPECT_EQ(dst[2], src[3]); 1923 EXPECT_EQ(dst[3], src[1]); 1924 } 1925 1926 } // namespace libyuv 1927