1 /* 2 * libjingle 3 * Copyright 2014 Google Inc. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <string> 29 30 #include "libyuv/convert.h" 31 #include "libyuv/convert_from.h" 32 #include "libyuv/convert_from_argb.h" 33 #include "libyuv/mjpeg_decoder.h" 34 #include "libyuv/planar_functions.h" 35 #include "talk/media/base/testutils.h" 36 #include "talk/media/base/videocommon.h" 37 #include "webrtc/base/flags.h" 38 #include "webrtc/base/gunit.h" 39 #include "webrtc/base/scoped_ptr.h" 40 41 // Undefine macros for the windows build. 42 #undef max 43 #undef min 44 45 using cricket::DumpPlanarYuvTestImage; 46 47 DEFINE_bool(planarfunctions_dump, false, 48 "whether to write out scaled images for inspection"); 49 DEFINE_int(planarfunctions_repeat, 1, 50 "how many times to perform each scaling operation (for perf testing)"); 51 52 namespace cricket { 53 54 // Number of testing colors in each color channel. 55 static const int kTestingColorChannelResolution = 6; 56 57 // The total number of testing colors 58 // kTestingColorNum = kTestingColorChannelResolution^3; 59 static const int kTestingColorNum = kTestingColorChannelResolution * 60 kTestingColorChannelResolution * kTestingColorChannelResolution; 61 62 static const int kWidth = 1280; 63 static const int kHeight = 720; 64 static const int kAlignment = 16; 65 66 class PlanarFunctionsTest : public testing::TestWithParam<int> { 67 protected: 68 PlanarFunctionsTest() : dump_(false), repeat_(1) { 69 InitializeColorBand(); 70 } 71 72 virtual void SetUp() { 73 dump_ = FLAG_planarfunctions_dump; 74 repeat_ = FLAG_planarfunctions_repeat; 75 } 76 77 // Initialize the color band for testing. 78 void InitializeColorBand() { 79 testing_color_y_.reset(new uint8_t[kTestingColorNum]); 80 testing_color_u_.reset(new uint8_t[kTestingColorNum]); 81 testing_color_v_.reset(new uint8_t[kTestingColorNum]); 82 testing_color_r_.reset(new uint8_t[kTestingColorNum]); 83 testing_color_g_.reset(new uint8_t[kTestingColorNum]); 84 testing_color_b_.reset(new uint8_t[kTestingColorNum]); 85 int color_counter = 0; 86 for (int i = 0; i < kTestingColorChannelResolution; ++i) { 87 uint8_t color_r = 88 static_cast<uint8_t>(i * 255 / (kTestingColorChannelResolution - 1)); 89 for (int j = 0; j < kTestingColorChannelResolution; ++j) { 90 uint8_t color_g = static_cast<uint8_t>( 91 j * 255 / (kTestingColorChannelResolution - 1)); 92 for (int k = 0; k < kTestingColorChannelResolution; ++k) { 93 uint8_t color_b = static_cast<uint8_t>( 94 k * 255 / (kTestingColorChannelResolution - 1)); 95 testing_color_r_[color_counter] = color_r; 96 testing_color_g_[color_counter] = color_g; 97 testing_color_b_[color_counter] = color_b; 98 // Converting the testing RGB colors to YUV colors. 99 ConvertRgbPixel(color_r, color_g, color_b, 100 &(testing_color_y_[color_counter]), 101 &(testing_color_u_[color_counter]), 102 &(testing_color_v_[color_counter])); 103 ++color_counter; 104 } 105 } 106 } 107 } 108 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. 109 // (from lmivideoframe_unittest.cc) 110 void ConvertRgbPixel(uint8_t r, 111 uint8_t g, 112 uint8_t b, 113 uint8_t* y, 114 uint8_t* u, 115 uint8_t* v) { 116 *y = ClampUint8(.257 * r + .504 * g + .098 * b + 16); 117 *u = ClampUint8(-.148 * r - .291 * g + .439 * b + 128); 118 *v = ClampUint8(.439 * r - .368 * g - .071 * b + 128); 119 } 120 121 uint8_t ClampUint8(double value) { 122 value = std::max(0., std::min(255., value)); 123 uint8_t uint8_value = static_cast<uint8_t>(value); 124 return uint8_value; 125 } 126 127 // Generate a Red-Green-Blue inter-weaving chessboard-like 128 // YUV testing image (I420/I422/I444). 129 // The pattern looks like c0 c1 c2 c3 ... 130 // c1 c2 c3 c4 ... 131 // c2 c3 c4 c5 ... 132 // ............... 133 // The size of each chrome block is (block_size) x (block_size). 134 uint8_t* CreateFakeYuvTestingImage(int height, 135 int width, 136 int block_size, 137 libyuv::JpegSubsamplingType subsample_type, 138 uint8_t*& y_pointer, 139 uint8_t*& u_pointer, 140 uint8_t*& v_pointer) { 141 if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; } 142 int y_size = height * width; 143 int u_size, v_size; 144 int vertical_sample_ratio = 1, horizontal_sample_ratio = 1; 145 switch (subsample_type) { 146 case libyuv::kJpegYuv420: 147 u_size = ((height + 1) >> 1) * ((width + 1) >> 1); 148 v_size = u_size; 149 vertical_sample_ratio = 2, horizontal_sample_ratio = 2; 150 break; 151 case libyuv::kJpegYuv422: 152 u_size = height * ((width + 1) >> 1); 153 v_size = u_size; 154 vertical_sample_ratio = 1, horizontal_sample_ratio = 2; 155 break; 156 case libyuv::kJpegYuv444: 157 v_size = u_size = y_size; 158 vertical_sample_ratio = 1, horizontal_sample_ratio = 1; 159 break; 160 case libyuv::kJpegUnknown: 161 default: 162 return NULL; 163 break; 164 } 165 uint8_t* image_pointer = new uint8_t[y_size + u_size + v_size + kAlignment]; 166 y_pointer = ALIGNP(image_pointer, kAlignment); 167 u_pointer = ALIGNP(&image_pointer[y_size], kAlignment); 168 v_pointer = ALIGNP(&image_pointer[y_size + u_size], kAlignment); 169 uint8_t* current_y_pointer = y_pointer; 170 uint8_t* current_u_pointer = u_pointer; 171 uint8_t* current_v_pointer = v_pointer; 172 for (int j = 0; j < height; ++j) { 173 for (int i = 0; i < width; ++i) { 174 int color = ((i / block_size) + (j / block_size)) % kTestingColorNum; 175 *(current_y_pointer++) = testing_color_y_[color]; 176 if (i % horizontal_sample_ratio == 0 && 177 j % vertical_sample_ratio == 0) { 178 *(current_u_pointer++) = testing_color_u_[color]; 179 *(current_v_pointer++) = testing_color_v_[color]; 180 } 181 } 182 } 183 return image_pointer; 184 } 185 186 // Generate a Red-Green-Blue inter-weaving chessboard-like 187 // YUY2/UYVY testing image. 188 // The pattern looks like c0 c1 c2 c3 ... 189 // c1 c2 c3 c4 ... 190 // c2 c3 c4 c5 ... 191 // ............... 192 // The size of each chrome block is (block_size) x (block_size). 193 uint8_t* CreateFakeInterleaveYuvTestingImage(int height, 194 int width, 195 int block_size, 196 uint8_t*& yuv_pointer, 197 FourCC fourcc_type) { 198 if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; } 199 if (fourcc_type != FOURCC_YUY2 && fourcc_type != FOURCC_UYVY) { 200 LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type) 201 << " is not supported."; 202 return NULL; 203 } 204 // Regularize the width of the output to be even. 205 int awidth = (width + 1) & ~1; 206 207 uint8_t* image_pointer = new uint8_t[2 * height * awidth + kAlignment]; 208 yuv_pointer = ALIGNP(image_pointer, kAlignment); 209 uint8_t* current_yuv_pointer = yuv_pointer; 210 switch (fourcc_type) { 211 case FOURCC_YUY2: { 212 for (int j = 0; j < height; ++j) { 213 for (int i = 0; i < awidth; i += 2, current_yuv_pointer += 4) { 214 int color1 = ((i / block_size) + (j / block_size)) % 215 kTestingColorNum; 216 int color2 = (((i + 1) / block_size) + (j / block_size)) % 217 kTestingColorNum; 218 current_yuv_pointer[0] = testing_color_y_[color1]; 219 if (i < width) { 220 current_yuv_pointer[1] = static_cast<uint8_t>( 221 (static_cast<uint32_t>(testing_color_u_[color1]) + 222 static_cast<uint32_t>(testing_color_u_[color2])) / 223 2); 224 current_yuv_pointer[2] = testing_color_y_[color2]; 225 current_yuv_pointer[3] = static_cast<uint8_t>( 226 (static_cast<uint32_t>(testing_color_v_[color1]) + 227 static_cast<uint32_t>(testing_color_v_[color2])) / 228 2); 229 } else { 230 current_yuv_pointer[1] = testing_color_u_[color1]; 231 current_yuv_pointer[2] = 0; 232 current_yuv_pointer[3] = testing_color_v_[color1]; 233 } 234 } 235 } 236 break; 237 } 238 case FOURCC_UYVY: { 239 for (int j = 0; j < height; ++j) { 240 for (int i = 0; i < awidth; i += 2, current_yuv_pointer += 4) { 241 int color1 = ((i / block_size) + (j / block_size)) % 242 kTestingColorNum; 243 int color2 = (((i + 1) / block_size) + (j / block_size)) % 244 kTestingColorNum; 245 if (i < width) { 246 current_yuv_pointer[0] = static_cast<uint8_t>( 247 (static_cast<uint32_t>(testing_color_u_[color1]) + 248 static_cast<uint32_t>(testing_color_u_[color2])) / 249 2); 250 current_yuv_pointer[1] = testing_color_y_[color1]; 251 current_yuv_pointer[2] = static_cast<uint8_t>( 252 (static_cast<uint32_t>(testing_color_v_[color1]) + 253 static_cast<uint32_t>(testing_color_v_[color2])) / 254 2); 255 current_yuv_pointer[3] = testing_color_y_[color2]; 256 } else { 257 current_yuv_pointer[0] = testing_color_u_[color1]; 258 current_yuv_pointer[1] = testing_color_y_[color1]; 259 current_yuv_pointer[2] = testing_color_v_[color1]; 260 current_yuv_pointer[3] = 0; 261 } 262 } 263 } 264 break; 265 } 266 } 267 return image_pointer; 268 } 269 270 // Generate a Red-Green-Blue inter-weaving chessboard-like 271 // NV12 testing image. 272 // (Note: No interpolation is used.) 273 // The pattern looks like c0 c1 c2 c3 ... 274 // c1 c2 c3 c4 ... 275 // c2 c3 c4 c5 ... 276 // ............... 277 // The size of each chrome block is (block_size) x (block_size). 278 uint8_t* CreateFakeNV12TestingImage(int height, 279 int width, 280 int block_size, 281 uint8_t*& y_pointer, 282 uint8_t*& uv_pointer) { 283 if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; } 284 285 uint8_t* image_pointer = 286 new uint8_t[height * width + 287 ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment]; 288 y_pointer = ALIGNP(image_pointer, kAlignment); 289 uv_pointer = y_pointer + height * width; 290 uint8_t* current_uv_pointer = uv_pointer; 291 uint8_t* current_y_pointer = y_pointer; 292 for (int j = 0; j < height; ++j) { 293 for (int i = 0; i < width; ++i) { 294 int color = ((i / block_size) + (j / block_size)) % 295 kTestingColorNum; 296 *(current_y_pointer++) = testing_color_y_[color]; 297 } 298 if (j % 2 == 0) { 299 for (int i = 0; i < width; i += 2, current_uv_pointer += 2) { 300 int color = ((i / block_size) + (j / block_size)) % 301 kTestingColorNum; 302 current_uv_pointer[0] = testing_color_u_[color]; 303 current_uv_pointer[1] = testing_color_v_[color]; 304 } 305 } 306 } 307 return image_pointer; 308 } 309 310 // Generate a Red-Green-Blue inter-weaving chessboard-like 311 // M420 testing image. 312 // (Note: No interpolation is used.) 313 // The pattern looks like c0 c1 c2 c3 ... 314 // c1 c2 c3 c4 ... 315 // c2 c3 c4 c5 ... 316 // ............... 317 // The size of each chrome block is (block_size) x (block_size). 318 uint8_t* CreateFakeM420TestingImage(int height, 319 int width, 320 int block_size, 321 uint8_t*& m420_pointer) { 322 if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; } 323 324 uint8_t* image_pointer = 325 new uint8_t[height * width + 326 ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment]; 327 m420_pointer = ALIGNP(image_pointer, kAlignment); 328 uint8_t* current_m420_pointer = m420_pointer; 329 for (int j = 0; j < height; ++j) { 330 for (int i = 0; i < width; ++i) { 331 int color = ((i / block_size) + (j / block_size)) % 332 kTestingColorNum; 333 *(current_m420_pointer++) = testing_color_y_[color]; 334 } 335 if (j % 2 == 1) { 336 for (int i = 0; i < width; i += 2, current_m420_pointer += 2) { 337 int color = ((i / block_size) + ((j - 1) / block_size)) % 338 kTestingColorNum; 339 current_m420_pointer[0] = testing_color_u_[color]; 340 current_m420_pointer[1] = testing_color_v_[color]; 341 } 342 } 343 } 344 return image_pointer; 345 } 346 347 // Generate a Red-Green-Blue inter-weaving chessboard-like 348 // ARGB/ABGR/RAW/BG24 testing image. 349 // The pattern looks like c0 c1 c2 c3 ... 350 // c1 c2 c3 c4 ... 351 // c2 c3 c4 c5 ... 352 // ............... 353 // The size of each chrome block is (block_size) x (block_size). 354 uint8_t* CreateFakeArgbTestingImage(int height, 355 int width, 356 int block_size, 357 uint8_t*& argb_pointer, 358 FourCC fourcc_type) { 359 if (height <= 0 || width <= 0 || block_size <= 0) { return NULL; } 360 uint8_t* image_pointer = NULL; 361 if (fourcc_type == FOURCC_ABGR || fourcc_type == FOURCC_BGRA || 362 fourcc_type == FOURCC_ARGB) { 363 image_pointer = new uint8_t[height * width * 4 + kAlignment]; 364 } else if (fourcc_type == FOURCC_RAW || fourcc_type == FOURCC_24BG) { 365 image_pointer = new uint8_t[height * width * 3 + kAlignment]; 366 } else { 367 LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type) 368 << " is not supported."; 369 return NULL; 370 } 371 argb_pointer = ALIGNP(image_pointer, kAlignment); 372 uint8_t* current_pointer = argb_pointer; 373 switch (fourcc_type) { 374 case FOURCC_ARGB: { 375 for (int j = 0; j < height; ++j) { 376 for (int i = 0; i < width; ++i) { 377 int color = ((i / block_size) + (j / block_size)) % 378 kTestingColorNum; 379 *(current_pointer++) = testing_color_b_[color]; 380 *(current_pointer++) = testing_color_g_[color]; 381 *(current_pointer++) = testing_color_r_[color]; 382 *(current_pointer++) = 255; 383 } 384 } 385 break; 386 } 387 case FOURCC_ABGR: { 388 for (int j = 0; j < height; ++j) { 389 for (int i = 0; i < width; ++i) { 390 int color = ((i / block_size) + (j / block_size)) % 391 kTestingColorNum; 392 *(current_pointer++) = testing_color_r_[color]; 393 *(current_pointer++) = testing_color_g_[color]; 394 *(current_pointer++) = testing_color_b_[color]; 395 *(current_pointer++) = 255; 396 } 397 } 398 break; 399 } 400 case FOURCC_BGRA: { 401 for (int j = 0; j < height; ++j) { 402 for (int i = 0; i < width; ++i) { 403 int color = ((i / block_size) + (j / block_size)) % 404 kTestingColorNum; 405 *(current_pointer++) = 255; 406 *(current_pointer++) = testing_color_r_[color]; 407 *(current_pointer++) = testing_color_g_[color]; 408 *(current_pointer++) = testing_color_b_[color]; 409 } 410 } 411 break; 412 } 413 case FOURCC_24BG: { 414 for (int j = 0; j < height; ++j) { 415 for (int i = 0; i < width; ++i) { 416 int color = ((i / block_size) + (j / block_size)) % 417 kTestingColorNum; 418 *(current_pointer++) = testing_color_b_[color]; 419 *(current_pointer++) = testing_color_g_[color]; 420 *(current_pointer++) = testing_color_r_[color]; 421 } 422 } 423 break; 424 } 425 case FOURCC_RAW: { 426 for (int j = 0; j < height; ++j) { 427 for (int i = 0; i < width; ++i) { 428 int color = ((i / block_size) + (j / block_size)) % 429 kTestingColorNum; 430 *(current_pointer++) = testing_color_r_[color]; 431 *(current_pointer++) = testing_color_g_[color]; 432 *(current_pointer++) = testing_color_b_[color]; 433 } 434 } 435 break; 436 } 437 default: { 438 LOG(LS_ERROR) << "Format " << static_cast<int>(fourcc_type) 439 << " is not supported."; 440 } 441 } 442 return image_pointer; 443 } 444 445 // Check if two memory chunks are equal. 446 // (tolerate MSE errors within a threshold). 447 static bool IsMemoryEqual(const uint8_t* ibuf, 448 const uint8_t* obuf, 449 int osize, 450 double average_error) { 451 double sse = cricket::ComputeSumSquareError(ibuf, obuf, osize); 452 double error = sse / osize; // Mean Squared Error. 453 double PSNR = cricket::ComputePSNR(sse, osize); 454 LOG(LS_INFO) << "Image MSE: " << error << " Image PSNR: " << PSNR 455 << " First Diff Byte: " << FindDiff(ibuf, obuf, osize); 456 return (error < average_error); 457 } 458 459 // Returns the index of the first differing byte. Easier to debug than memcmp. 460 static int FindDiff(const uint8_t* buf1, const uint8_t* buf2, int len) { 461 int i = 0; 462 while (i < len && buf1[i] == buf2[i]) { 463 i++; 464 } 465 return (i < len) ? i : -1; 466 } 467 468 // Dump the result image (ARGB format). 469 void DumpArgbImage(const uint8_t* obuf, int width, int height) { 470 DumpPlanarArgbTestImage(GetTestName(), obuf, width, height); 471 } 472 473 // Dump the result image (YUV420 format). 474 void DumpYuvImage(const uint8_t* obuf, int width, int height) { 475 DumpPlanarYuvTestImage(GetTestName(), obuf, width, height); 476 } 477 478 std::string GetTestName() { 479 const testing::TestInfo* const test_info = 480 testing::UnitTest::GetInstance()->current_test_info(); 481 std::string test_name(test_info->name()); 482 return test_name; 483 } 484 485 bool dump_; 486 int repeat_; 487 488 // Y, U, V and R, G, B channels of testing colors. 489 rtc::scoped_ptr<uint8_t[]> testing_color_y_; 490 rtc::scoped_ptr<uint8_t[]> testing_color_u_; 491 rtc::scoped_ptr<uint8_t[]> testing_color_v_; 492 rtc::scoped_ptr<uint8_t[]> testing_color_r_; 493 rtc::scoped_ptr<uint8_t[]> testing_color_g_; 494 rtc::scoped_ptr<uint8_t[]> testing_color_b_; 495 }; 496 497 TEST_F(PlanarFunctionsTest, I420Copy) { 498 uint8_t* y_pointer = nullptr; 499 uint8_t* u_pointer = nullptr; 500 uint8_t* v_pointer = nullptr; 501 int y_pitch = kWidth; 502 int u_pitch = (kWidth + 1) >> 1; 503 int v_pitch = (kWidth + 1) >> 1; 504 int y_size = kHeight * kWidth; 505 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 506 int block_size = 3; 507 // Generate a fake input image. 508 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeYuvTestingImage( 509 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_pointer, u_pointer, 510 v_pointer)); 511 // Allocate space for the output image. 512 rtc::scoped_ptr<uint8_t[]> yuv_output( 513 new uint8_t[I420_SIZE(kHeight, kWidth) + kAlignment]); 514 uint8_t* y_output_pointer = ALIGNP(yuv_output.get(), kAlignment); 515 uint8_t* u_output_pointer = y_output_pointer + y_size; 516 uint8_t* v_output_pointer = u_output_pointer + uv_size; 517 518 for (int i = 0; i < repeat_; ++i) { 519 libyuv::I420Copy(y_pointer, y_pitch, 520 u_pointer, u_pitch, 521 v_pointer, v_pitch, 522 y_output_pointer, y_pitch, 523 u_output_pointer, u_pitch, 524 v_output_pointer, v_pitch, 525 kWidth, kHeight); 526 } 527 528 // Expect the copied frame to be exactly the same. 529 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_pointer, 530 I420_SIZE(kHeight, kWidth), 1.e-6)); 531 532 if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); } 533 } 534 535 TEST_F(PlanarFunctionsTest, I422ToI420) { 536 uint8_t* y_pointer = nullptr; 537 uint8_t* u_pointer = nullptr; 538 uint8_t* v_pointer = nullptr; 539 int y_pitch = kWidth; 540 int u_pitch = (kWidth + 1) >> 1; 541 int v_pitch = (kWidth + 1) >> 1; 542 int y_size = kHeight * kWidth; 543 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 544 int block_size = 2; 545 // Generate a fake input image. 546 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeYuvTestingImage( 547 kHeight, kWidth, block_size, libyuv::kJpegYuv422, y_pointer, u_pointer, 548 v_pointer)); 549 // Allocate space for the output image. 550 rtc::scoped_ptr<uint8_t[]> yuv_output( 551 new uint8_t[I420_SIZE(kHeight, kWidth) + kAlignment]); 552 uint8_t* y_output_pointer = ALIGNP(yuv_output.get(), kAlignment); 553 uint8_t* u_output_pointer = y_output_pointer + y_size; 554 uint8_t* v_output_pointer = u_output_pointer + uv_size; 555 // Generate the expected output. 556 uint8_t* y_expected_pointer = nullptr; 557 uint8_t* u_expected_pointer = nullptr; 558 uint8_t* v_expected_pointer = nullptr; 559 rtc::scoped_ptr<uint8_t[]> yuv_output_expected(CreateFakeYuvTestingImage( 560 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_expected_pointer, 561 u_expected_pointer, v_expected_pointer)); 562 563 for (int i = 0; i < repeat_; ++i) { 564 libyuv::I422ToI420(y_pointer, y_pitch, 565 u_pointer, u_pitch, 566 v_pointer, v_pitch, 567 y_output_pointer, y_pitch, 568 u_output_pointer, u_pitch, 569 v_output_pointer, v_pitch, 570 kWidth, kHeight); 571 } 572 573 // Compare the output frame with what is expected; expect exactly the same. 574 // Note: MSE should be set to a larger threshold if an odd block width 575 // is used, since the conversion will be lossy. 576 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer, 577 I420_SIZE(kHeight, kWidth), 1.e-6)); 578 579 if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); } 580 } 581 582 TEST_P(PlanarFunctionsTest, M420ToI420) { 583 // Get the unalignment offset 584 int unalignment = GetParam(); 585 uint8_t* m420_pointer = NULL; 586 int y_pitch = kWidth; 587 int m420_pitch = kWidth; 588 int u_pitch = (kWidth + 1) >> 1; 589 int v_pitch = (kWidth + 1) >> 1; 590 int y_size = kHeight * kWidth; 591 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 592 int block_size = 2; 593 // Generate a fake input image. 594 rtc::scoped_ptr<uint8_t[]> yuv_input( 595 CreateFakeM420TestingImage(kHeight, kWidth, block_size, m420_pointer)); 596 // Allocate space for the output image. 597 rtc::scoped_ptr<uint8_t[]> yuv_output( 598 new uint8_t[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]); 599 uint8_t* y_output_pointer = 600 ALIGNP(yuv_output.get(), kAlignment) + unalignment; 601 uint8_t* u_output_pointer = y_output_pointer + y_size; 602 uint8_t* v_output_pointer = u_output_pointer + uv_size; 603 // Generate the expected output. 604 uint8_t* y_expected_pointer = nullptr; 605 uint8_t* u_expected_pointer = nullptr; 606 uint8_t* v_expected_pointer = nullptr; 607 rtc::scoped_ptr<uint8_t[]> yuv_output_expected(CreateFakeYuvTestingImage( 608 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_expected_pointer, 609 u_expected_pointer, v_expected_pointer)); 610 611 for (int i = 0; i < repeat_; ++i) { 612 libyuv::M420ToI420(m420_pointer, m420_pitch, 613 y_output_pointer, y_pitch, 614 u_output_pointer, u_pitch, 615 v_output_pointer, v_pitch, 616 kWidth, kHeight); 617 } 618 // Compare the output frame with what is expected; expect exactly the same. 619 // Note: MSE should be set to a larger threshold if an odd block width 620 // is used, since the conversion will be lossy. 621 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer, 622 I420_SIZE(kHeight, kWidth), 1.e-6)); 623 624 if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); } 625 } 626 627 TEST_P(PlanarFunctionsTest, NV12ToI420) { 628 // Get the unalignment offset 629 int unalignment = GetParam(); 630 uint8_t* y_pointer = nullptr; 631 uint8_t* uv_pointer = nullptr; 632 int y_pitch = kWidth; 633 int uv_pitch = 2 * ((kWidth + 1) >> 1); 634 int u_pitch = (kWidth + 1) >> 1; 635 int v_pitch = (kWidth + 1) >> 1; 636 int y_size = kHeight * kWidth; 637 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 638 int block_size = 2; 639 // Generate a fake input image. 640 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeNV12TestingImage( 641 kHeight, kWidth, block_size, y_pointer, uv_pointer)); 642 // Allocate space for the output image. 643 rtc::scoped_ptr<uint8_t[]> yuv_output( 644 new uint8_t[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]); 645 uint8_t* y_output_pointer = 646 ALIGNP(yuv_output.get(), kAlignment) + unalignment; 647 uint8_t* u_output_pointer = y_output_pointer + y_size; 648 uint8_t* v_output_pointer = u_output_pointer + uv_size; 649 // Generate the expected output. 650 uint8_t* y_expected_pointer = nullptr; 651 uint8_t* u_expected_pointer = nullptr; 652 uint8_t* v_expected_pointer = nullptr; 653 rtc::scoped_ptr<uint8_t[]> yuv_output_expected(CreateFakeYuvTestingImage( 654 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_expected_pointer, 655 u_expected_pointer, v_expected_pointer)); 656 657 for (int i = 0; i < repeat_; ++i) { 658 libyuv::NV12ToI420(y_pointer, y_pitch, 659 uv_pointer, uv_pitch, 660 y_output_pointer, y_pitch, 661 u_output_pointer, u_pitch, 662 v_output_pointer, v_pitch, 663 kWidth, kHeight); 664 } 665 // Compare the output frame with what is expected; expect exactly the same. 666 // Note: MSE should be set to a larger threshold if an odd block width 667 // is used, since the conversion will be lossy. 668 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer, 669 I420_SIZE(kHeight, kWidth), 1.e-6)); 670 671 if (dump_) { DumpYuvImage(y_output_pointer, kWidth, kHeight); } 672 } 673 674 // A common macro for testing converting YUY2/UYVY to I420. 675 #define TEST_YUVTOI420(SRC_NAME, MSE, BLOCK_SIZE) \ 676 TEST_P(PlanarFunctionsTest, SRC_NAME##ToI420) { \ 677 /* Get the unalignment offset.*/ \ 678 int unalignment = GetParam(); \ 679 uint8_t* yuv_pointer = nullptr; \ 680 int yuv_pitch = 2 * ((kWidth + 1) & ~1); \ 681 int y_pitch = kWidth; \ 682 int u_pitch = (kWidth + 1) >> 1; \ 683 int v_pitch = (kWidth + 1) >> 1; \ 684 int y_size = kHeight * kWidth; \ 685 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); \ 686 int block_size = 2; \ 687 /* Generate a fake input image.*/ \ 688 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeInterleaveYuvTestingImage( \ 689 kHeight, kWidth, BLOCK_SIZE, yuv_pointer, FOURCC_##SRC_NAME)); \ 690 /* Allocate space for the output image.*/ \ 691 rtc::scoped_ptr<uint8_t[]> yuv_output( \ 692 new uint8_t[I420_SIZE(kHeight, kWidth) + kAlignment + unalignment]); \ 693 uint8_t* y_output_pointer = \ 694 ALIGNP(yuv_output.get(), kAlignment) + unalignment; \ 695 uint8_t* u_output_pointer = y_output_pointer + y_size; \ 696 uint8_t* v_output_pointer = u_output_pointer + uv_size; \ 697 /* Generate the expected output.*/ \ 698 uint8_t* y_expected_pointer = nullptr; \ 699 uint8_t* u_expected_pointer = nullptr; \ 700 uint8_t* v_expected_pointer = nullptr; \ 701 rtc::scoped_ptr<uint8_t[]> yuv_output_expected(CreateFakeYuvTestingImage( \ 702 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_expected_pointer, \ 703 u_expected_pointer, v_expected_pointer)); \ 704 for (int i = 0; i < repeat_; ++i) { \ 705 libyuv::SRC_NAME##ToI420(yuv_pointer, yuv_pitch, y_output_pointer, \ 706 y_pitch, u_output_pointer, u_pitch, \ 707 v_output_pointer, v_pitch, kWidth, kHeight); \ 708 } \ 709 /* Compare the output frame with what is expected.*/ \ 710 /* Note: MSE should be set to a larger threshold if an odd block width*/ \ 711 /* is used, since the conversion will be lossy.*/ \ 712 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_expected_pointer, \ 713 I420_SIZE(kHeight, kWidth), MSE)); \ 714 if (dump_) { \ 715 DumpYuvImage(y_output_pointer, kWidth, kHeight); \ 716 } \ 717 } 718 719 // TEST_P(PlanarFunctionsTest, YUV2ToI420) 720 TEST_YUVTOI420(YUY2, 1.e-6, 2); 721 // TEST_P(PlanarFunctionsTest, UYVYToI420) 722 TEST_YUVTOI420(UYVY, 1.e-6, 2); 723 724 // A common macro for testing converting I420 to ARGB, BGRA and ABGR. 725 #define TEST_YUVTORGB(SRC_NAME, DST_NAME, JPG_TYPE, MSE, BLOCK_SIZE) \ 726 TEST_F(PlanarFunctionsTest, SRC_NAME##To##DST_NAME) { \ 727 uint8_t* y_pointer = nullptr; \ 728 uint8_t* u_pointer = nullptr; \ 729 uint8_t* v_pointer = nullptr; \ 730 uint8_t* argb_expected_pointer = NULL; \ 731 int y_pitch = kWidth; \ 732 int u_pitch = (kWidth + 1) >> 1; \ 733 int v_pitch = (kWidth + 1) >> 1; \ 734 /* Generate a fake input image.*/ \ 735 rtc::scoped_ptr<uint8_t[]> yuv_input( \ 736 CreateFakeYuvTestingImage(kHeight, kWidth, BLOCK_SIZE, JPG_TYPE, \ 737 y_pointer, u_pointer, v_pointer)); \ 738 /* Generate the expected output.*/ \ 739 rtc::scoped_ptr<uint8_t[]> argb_expected( \ 740 CreateFakeArgbTestingImage(kHeight, kWidth, BLOCK_SIZE, \ 741 argb_expected_pointer, FOURCC_##DST_NAME)); \ 742 /* Allocate space for the output.*/ \ 743 rtc::scoped_ptr<uint8_t[]> argb_output( \ 744 new uint8_t[kHeight * kWidth * 4 + kAlignment]); \ 745 uint8_t* argb_pointer = ALIGNP(argb_expected.get(), kAlignment); \ 746 for (int i = 0; i < repeat_; ++i) { \ 747 libyuv::SRC_NAME##To##DST_NAME(y_pointer, y_pitch, u_pointer, u_pitch, \ 748 v_pointer, v_pitch, argb_pointer, \ 749 kWidth * 4, kWidth, kHeight); \ 750 } \ 751 EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, \ 752 kHeight* kWidth * 4, MSE)); \ 753 if (dump_) { \ 754 DumpArgbImage(argb_pointer, kWidth, kHeight); \ 755 } \ 756 } 757 758 // TEST_F(PlanarFunctionsTest, I420ToARGB) 759 TEST_YUVTORGB(I420, ARGB, libyuv::kJpegYuv420, 3., 2); 760 // TEST_F(PlanarFunctionsTest, I420ToABGR) 761 TEST_YUVTORGB(I420, ABGR, libyuv::kJpegYuv420, 3., 2); 762 // TEST_F(PlanarFunctionsTest, I420ToBGRA) 763 TEST_YUVTORGB(I420, BGRA, libyuv::kJpegYuv420, 3., 2); 764 // TEST_F(PlanarFunctionsTest, I422ToARGB) 765 TEST_YUVTORGB(I422, ARGB, libyuv::kJpegYuv422, 3., 2); 766 // TEST_F(PlanarFunctionsTest, I444ToARGB) 767 TEST_YUVTORGB(I444, ARGB, libyuv::kJpegYuv444, 3., 3); 768 // Note: an empirical MSE tolerance 3.0 is used here for the probable 769 // error from float-to-uint8_t type conversion. 770 771 TEST_F(PlanarFunctionsTest, I400ToARGB_Reference) { 772 uint8_t* y_pointer = nullptr; 773 uint8_t* u_pointer = nullptr; 774 uint8_t* v_pointer = nullptr; 775 int y_pitch = kWidth; 776 int u_pitch = (kWidth + 1) >> 1; 777 int v_pitch = (kWidth + 1) >> 1; 778 int block_size = 3; 779 // Generate a fake input image. 780 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeYuvTestingImage( 781 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_pointer, u_pointer, 782 v_pointer)); 783 // As the comparison standard, we convert a grayscale image (by setting both 784 // U and V channels to be 128) using an I420 converter. 785 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 786 787 rtc::scoped_ptr<uint8_t[]> uv(new uint8_t[uv_size + kAlignment]); 788 u_pointer = v_pointer = ALIGNP(uv.get(), kAlignment); 789 memset(u_pointer, 128, uv_size); 790 791 // Allocate space for the output image and generate the expected output. 792 rtc::scoped_ptr<uint8_t[]> argb_expected( 793 new uint8_t[kHeight * kWidth * 4 + kAlignment]); 794 rtc::scoped_ptr<uint8_t[]> argb_output( 795 new uint8_t[kHeight * kWidth * 4 + kAlignment]); 796 uint8_t* argb_expected_pointer = ALIGNP(argb_expected.get(), kAlignment); 797 uint8_t* argb_pointer = ALIGNP(argb_output.get(), kAlignment); 798 799 libyuv::I420ToARGB(y_pointer, y_pitch, 800 u_pointer, u_pitch, 801 v_pointer, v_pitch, 802 argb_expected_pointer, kWidth * 4, 803 kWidth, kHeight); 804 for (int i = 0; i < repeat_; ++i) { 805 libyuv::I400ToARGB_Reference(y_pointer, y_pitch, 806 argb_pointer, kWidth * 4, 807 kWidth, kHeight); 808 } 809 810 // Note: I420ToARGB and I400ToARGB_Reference should produce identical results. 811 EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, 812 kHeight * kWidth * 4, 2.)); 813 if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); } 814 } 815 816 TEST_P(PlanarFunctionsTest, I400ToARGB) { 817 // Get the unalignment offset 818 int unalignment = GetParam(); 819 uint8_t* y_pointer = nullptr; 820 uint8_t* u_pointer = nullptr; 821 uint8_t* v_pointer = nullptr; 822 int y_pitch = kWidth; 823 int u_pitch = (kWidth + 1) >> 1; 824 int v_pitch = (kWidth + 1) >> 1; 825 int block_size = 3; 826 // Generate a fake input image. 827 rtc::scoped_ptr<uint8_t[]> yuv_input(CreateFakeYuvTestingImage( 828 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_pointer, u_pointer, 829 v_pointer)); 830 // As the comparison standard, we convert a grayscale image (by setting both 831 // U and V channels to be 128) using an I420 converter. 832 int uv_size = ((kHeight + 1) >> 1) * ((kWidth + 1) >> 1); 833 834 // 1 byte extra if in the unaligned mode. 835 rtc::scoped_ptr<uint8_t[]> uv(new uint8_t[uv_size * 2 + kAlignment]); 836 u_pointer = ALIGNP(uv.get(), kAlignment); 837 v_pointer = u_pointer + uv_size; 838 memset(u_pointer, 128, uv_size); 839 memset(v_pointer, 128, uv_size); 840 841 // Allocate space for the output image and generate the expected output. 842 rtc::scoped_ptr<uint8_t[]> argb_expected( 843 new uint8_t[kHeight * kWidth * 4 + kAlignment]); 844 // 1 byte extra if in the misalinged mode. 845 rtc::scoped_ptr<uint8_t[]> argb_output( 846 new uint8_t[kHeight * kWidth * 4 + kAlignment + unalignment]); 847 uint8_t* argb_expected_pointer = ALIGNP(argb_expected.get(), kAlignment); 848 uint8_t* argb_pointer = ALIGNP(argb_output.get(), kAlignment) + unalignment; 849 850 libyuv::I420ToARGB(y_pointer, y_pitch, 851 u_pointer, u_pitch, 852 v_pointer, v_pitch, 853 argb_expected_pointer, kWidth * 4, 854 kWidth, kHeight); 855 for (int i = 0; i < repeat_; ++i) { 856 libyuv::I400ToARGB(y_pointer, y_pitch, 857 argb_pointer, kWidth * 4, 858 kWidth, kHeight); 859 } 860 861 // Note: current I400ToARGB uses an approximate method, 862 // so the error tolerance is larger here. 863 EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, 864 kHeight * kWidth * 4, 64.0)); 865 if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); } 866 } 867 868 TEST_P(PlanarFunctionsTest, ARGBToI400) { 869 // Get the unalignment offset 870 int unalignment = GetParam(); 871 // Create a fake ARGB input image. 872 uint8_t* y_pointer = NULL, * u_pointer = NULL, * v_pointer = NULL; 873 uint8_t* argb_pointer = NULL; 874 int block_size = 3; 875 // Generate a fake input image. 876 rtc::scoped_ptr<uint8_t[]> argb_input(CreateFakeArgbTestingImage( 877 kHeight, kWidth, block_size, argb_pointer, FOURCC_ARGB)); 878 // Generate the expected output. Only Y channel is used 879 rtc::scoped_ptr<uint8_t[]> yuv_expected(CreateFakeYuvTestingImage( 880 kHeight, kWidth, block_size, libyuv::kJpegYuv420, y_pointer, u_pointer, 881 v_pointer)); 882 // Allocate space for the Y output. 883 rtc::scoped_ptr<uint8_t[]> y_output( 884 new uint8_t[kHeight * kWidth + kAlignment + unalignment]); 885 uint8_t* y_output_pointer = ALIGNP(y_output.get(), kAlignment) + unalignment; 886 887 for (int i = 0; i < repeat_; ++i) { 888 libyuv::ARGBToI400(argb_pointer, kWidth * 4, y_output_pointer, kWidth, 889 kWidth, kHeight); 890 } 891 // Check if the output matches the input Y channel. 892 // Note: an empirical MSE tolerance 2.0 is used here for the probable 893 // error from float-to-uint8_t type conversion. 894 EXPECT_TRUE(IsMemoryEqual(y_output_pointer, y_pointer, 895 kHeight * kWidth, 2.)); 896 if (dump_) { DumpArgbImage(argb_pointer, kWidth, kHeight); } 897 } 898 899 // A common macro for testing converting RAW, BG24, BGRA, and ABGR 900 // to ARGB. 901 #define TEST_ARGB(SRC_NAME, FC_ID, BPP, BLOCK_SIZE) \ 902 TEST_P(PlanarFunctionsTest, SRC_NAME##ToARGB) { \ 903 int unalignment = GetParam(); /* Get the unalignment offset.*/ \ 904 uint8_t* argb_expected_pointer = NULL, * src_pointer = NULL; \ 905 /* Generate a fake input image.*/ \ 906 rtc::scoped_ptr<uint8_t[]> src_input(CreateFakeArgbTestingImage( \ 907 kHeight, kWidth, BLOCK_SIZE, src_pointer, FOURCC_##FC_ID)); \ 908 /* Generate the expected output.*/ \ 909 rtc::scoped_ptr<uint8_t[]> argb_expected(CreateFakeArgbTestingImage( \ 910 kHeight, kWidth, BLOCK_SIZE, argb_expected_pointer, FOURCC_ARGB)); \ 911 /* Allocate space for the output; 1 byte extra if in the unaligned mode.*/ \ 912 rtc::scoped_ptr<uint8_t[]> argb_output( \ 913 new uint8_t[kHeight * kWidth * 4 + kAlignment + unalignment]); \ 914 uint8_t* argb_pointer = \ 915 ALIGNP(argb_output.get(), kAlignment) + unalignment; \ 916 for (int i = 0; i < repeat_; ++i) { \ 917 libyuv::SRC_NAME##ToARGB(src_pointer, kWidth*(BPP), argb_pointer, \ 918 kWidth * 4, kWidth, kHeight); \ 919 } \ 920 /* Compare the result; expect identical.*/ \ 921 EXPECT_TRUE(IsMemoryEqual(argb_expected_pointer, argb_pointer, \ 922 kHeight* kWidth * 4, 1.e-6)); \ 923 if (dump_) { \ 924 DumpArgbImage(argb_pointer, kWidth, kHeight); \ 925 } \ 926 } 927 928 TEST_ARGB(RAW, RAW, 3, 3); // TEST_P(PlanarFunctionsTest, RAWToARGB) 929 TEST_ARGB(BG24, 24BG, 3, 3); // TEST_P(PlanarFunctionsTest, BG24ToARGB) 930 TEST_ARGB(ABGR, ABGR, 4, 3); // TEST_P(PlanarFunctionsTest, ABGRToARGB) 931 TEST_ARGB(BGRA, BGRA, 4, 3); // TEST_P(PlanarFunctionsTest, BGRAToARGB) 932 933 // Parameter Test: The parameter is the unalignment offset. 934 // Aligned data for testing assembly versions. 935 INSTANTIATE_TEST_CASE_P(PlanarFunctionsAligned, PlanarFunctionsTest, 936 ::testing::Values(0)); 937 938 // Purposely unalign the output argb pointer to test slow path (C version). 939 INSTANTIATE_TEST_CASE_P(PlanarFunctionsMisaligned, PlanarFunctionsTest, 940 ::testing::Values(1)); 941 942 } // namespace cricket 943