1 /* 2 * libjingle 3 * Copyright 2010 Google Inc. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sstream> 29 30 #include "libyuv/cpu_id.h" 31 #include "libyuv/scale.h" 32 #include "talk/media/base/testutils.h" 33 #include "webrtc/base/basictypes.h" 34 #include "webrtc/base/flags.h" 35 #include "webrtc/base/gunit.h" 36 #include "webrtc/base/scoped_ptr.h" 37 38 #if defined(_MSC_VER) 39 #define ALIGN16(var) __declspec(align(16)) var 40 #else 41 #define ALIGN16(var) var __attribute__((aligned(16))) 42 #endif 43 44 using cricket::LoadPlanarYuvTestImage; 45 using cricket::DumpPlanarYuvTestImage; 46 using rtc::scoped_ptr; 47 48 DEFINE_bool(yuvscaler_dump, false, 49 "whether to write out scaled images for inspection"); 50 DEFINE_int(yuvscaler_repeat, 1, 51 "how many times to perform each scaling operation (for perf testing)"); 52 53 static const int kAlignment = 16; 54 55 // TEST_UNCACHED flushes cache to test real memory performance. 56 // TEST_RSTSC uses cpu cycles for more accurate benchmark of the scale function. 57 #ifndef __arm__ 58 // #define TEST_UNCACHED 1 59 // #define TEST_RSTSC 1 60 #endif 61 62 #if defined(TEST_UNCACHED) || defined(TEST_RSTSC) 63 #ifdef _MSC_VER 64 #include <emmintrin.h> // NOLINT 65 #endif 66 67 #if defined(__GNUC__) && defined(__i386__) 68 static inline uint64_t __rdtsc(void) { 69 uint32_t a, d; 70 __asm__ volatile("rdtsc" : "=a" (a), "=d" (d)); 71 return (reinterpret_cast<uint64_t>(d) << 32) + a; 72 } 73 74 static inline void _mm_clflush(volatile void *__p) { 75 asm volatile("clflush %0" : "+m" (*(volatile char *)__p)); 76 } 77 #endif 78 79 static void FlushCache(uint8_t* dst, int count) { 80 while (count >= 32) { 81 _mm_clflush(dst); 82 dst += 32; 83 count -= 32; 84 } 85 } 86 #endif 87 88 class YuvScalerTest : public testing::Test { 89 protected: 90 virtual void SetUp() { 91 dump_ = *rtc::FlagList::Lookup("yuvscaler_dump")->bool_variable(); 92 repeat_ = *rtc::FlagList::Lookup("yuvscaler_repeat")->int_variable(); 93 } 94 95 // Scale an image and compare against a Lanczos-filtered test image. 96 // Lanczos is considered to be the "ideal" image resampling method, so we try 97 // to get as close to that as possible, while being as fast as possible. 98 bool TestScale(int iw, int ih, int ow, int oh, int offset, bool usefile, 99 bool optimize, int cpuflags, bool interpolate, 100 int memoffset, double* error) { 101 *error = 0.; 102 size_t isize = I420_SIZE(iw, ih); 103 size_t osize = I420_SIZE(ow, oh); 104 scoped_ptr<uint8_t[]> ibuffer( 105 new uint8_t[isize + kAlignment + memoffset]()); 106 scoped_ptr<uint8_t[]> obuffer( 107 new uint8_t[osize + kAlignment + memoffset]()); 108 scoped_ptr<uint8_t[]> xbuffer( 109 new uint8_t[osize + kAlignment + memoffset]()); 110 111 uint8_t* ibuf = ALIGNP(ibuffer.get(), kAlignment) + memoffset; 112 uint8_t* obuf = ALIGNP(obuffer.get(), kAlignment) + memoffset; 113 uint8_t* xbuf = ALIGNP(xbuffer.get(), kAlignment) + memoffset; 114 115 if (usefile) { 116 if (!LoadPlanarYuvTestImage("faces", iw, ih, ibuf) || 117 !LoadPlanarYuvTestImage("faces", ow, oh, xbuf)) { 118 LOG(LS_ERROR) << "Failed to load image"; 119 return false; 120 } 121 } else { 122 // These are used to test huge images. 123 memset(ibuf, 213, isize); // Input is constant color. 124 memset(obuf, 100, osize); // Output set to something wrong for now. 125 memset(xbuf, 213, osize); // Expected result. 126 } 127 128 #ifdef TEST_UNCACHED 129 FlushCache(ibuf, isize); 130 FlushCache(obuf, osize); 131 FlushCache(xbuf, osize); 132 #endif 133 134 // Scale down. 135 // If cpu true, disable cpu optimizations. Else allow auto detect 136 // TODO(fbarchard): set flags for libyuv 137 libyuv::MaskCpuFlags(cpuflags); 138 #ifdef TEST_RSTSC 139 uint64_t t = 0; 140 #endif 141 for (int i = 0; i < repeat_; ++i) { 142 #ifdef TEST_UNCACHED 143 FlushCache(ibuf, isize); 144 FlushCache(obuf, osize); 145 #endif 146 #ifdef TEST_RSTSC 147 uint64_t t1 = __rdtsc(); 148 #endif 149 EXPECT_EQ(0, libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, 150 offset, interpolate)); 151 #ifdef TEST_RSTSC 152 uint64_t t2 = __rdtsc(); 153 t += t2 - t1; 154 #endif 155 } 156 157 #ifdef TEST_RSTSC 158 LOG(LS_INFO) << "Time: " << std::setw(9) << t; 159 #endif 160 161 if (dump_) { 162 const testing::TestInfo* const test_info = 163 testing::UnitTest::GetInstance()->current_test_info(); 164 std::string test_name(test_info->name()); 165 DumpPlanarYuvTestImage(test_name, obuf, ow, oh); 166 } 167 168 double sse = cricket::ComputeSumSquareError(obuf, xbuf, osize); 169 *error = sse / osize; // Mean Squared Error. 170 double PSNR = cricket::ComputePSNR(sse, osize); 171 LOG(LS_INFO) << "Image MSE: " << 172 std::setw(6) << std::setprecision(4) << *error << 173 " Image PSNR: " << PSNR; 174 return true; 175 } 176 177 // Returns the index of the first differing byte. Easier to debug than memcmp. 178 static int FindDiff(const uint8_t* buf1, const uint8_t* buf2, int len) { 179 int i = 0; 180 while (i < len && buf1[i] == buf2[i]) { 181 i++; 182 } 183 return (i < len) ? i : -1; 184 } 185 186 protected: 187 bool dump_; 188 int repeat_; 189 }; 190 191 // Tests straight copy of data. 192 TEST_F(YuvScalerTest, TestCopy) { 193 const int iw = 640, ih = 360; 194 const int ow = 640, oh = 360; 195 ALIGN16(uint8_t ibuf[I420_SIZE(iw, ih)]); 196 ALIGN16(uint8_t obuf[I420_SIZE(ow, oh)]); 197 198 // Load the frame, scale it, check it. 199 ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf)); 200 for (int i = 0; i < repeat_; ++i) { 201 libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, 0, false); 202 } 203 if (dump_) DumpPlanarYuvTestImage("TestCopy", obuf, ow, oh); 204 EXPECT_EQ(-1, FindDiff(obuf, ibuf, sizeof(ibuf))); 205 } 206 207 // Tests copy from 4:3 to 16:9. 208 TEST_F(YuvScalerTest, TestOffset16_10Copy) { 209 const int iw = 640, ih = 360; 210 const int ow = 640, oh = 480; 211 const int offset = (480 - 360) / 2; 212 scoped_ptr<uint8_t[]> ibuffer(new uint8_t[I420_SIZE(iw, ih) + kAlignment]); 213 scoped_ptr<uint8_t[]> obuffer(new uint8_t[I420_SIZE(ow, oh) + kAlignment]); 214 215 uint8_t* ibuf = ALIGNP(ibuffer.get(), kAlignment); 216 uint8_t* obuf = ALIGNP(obuffer.get(), kAlignment); 217 218 // Load the frame, scale it, check it. 219 ASSERT_TRUE(LoadPlanarYuvTestImage("faces", iw, ih, ibuf)); 220 221 // Clear to black, which is Y = 0 and U and V = 128 222 memset(obuf, 0, ow * oh); 223 memset(obuf + ow * oh, 128, ow * oh / 2); 224 for (int i = 0; i < repeat_; ++i) { 225 libyuv::ScaleOffset(ibuf, iw, ih, obuf, ow, oh, offset, false); 226 } 227 if (dump_) DumpPlanarYuvTestImage("TestOffsetCopy16_9", obuf, ow, oh); 228 EXPECT_EQ(-1, FindDiff(obuf + ow * offset, 229 ibuf, 230 iw * ih)); 231 EXPECT_EQ(-1, FindDiff(obuf + ow * oh + ow * offset / 4, 232 ibuf + iw * ih, 233 iw * ih / 4)); 234 EXPECT_EQ(-1, FindDiff(obuf + ow * oh * 5 / 4 + ow * offset / 4, 235 ibuf + iw * ih * 5 / 4, 236 iw * ih / 4)); 237 } 238 239 // The following are 'cpu' flag values: 240 // Allow all SIMD optimizations 241 #define ALLFLAGS -1 242 // Disable SSSE3 but allow other forms of SIMD (SSE2) 243 #define NOSSSE3 ~libyuv::kCpuHasSSSE3 244 // Disable SSE2 and SSSE3 245 #define NOSSE ~libyuv::kCpuHasSSE2 & ~libyuv::kCpuHasSSSE3 246 247 // TEST_M scale factor with variations of opt, align, int 248 #define TEST_M(name, iwidth, iheight, owidth, oheight, mse) \ 249 TEST_F(YuvScalerTest, name##Ref) { \ 250 double error; \ 251 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 252 0, true, false, ALLFLAGS, false, 0, &error)); \ 253 EXPECT_LE(error, mse); \ 254 } \ 255 TEST_F(YuvScalerTest, name##OptAligned) { \ 256 double error; \ 257 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 258 0, true, true, ALLFLAGS, false, 0, &error)); \ 259 EXPECT_LE(error, mse); \ 260 } \ 261 TEST_F(YuvScalerTest, name##OptUnaligned) { \ 262 double error; \ 263 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 264 0, true, true, ALLFLAGS, false, 1, &error)); \ 265 EXPECT_LE(error, mse); \ 266 } \ 267 TEST_F(YuvScalerTest, name##OptSSE2) { \ 268 double error; \ 269 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 270 0, true, true, NOSSSE3, false, 0, &error)); \ 271 EXPECT_LE(error, mse); \ 272 } \ 273 TEST_F(YuvScalerTest, name##OptC) { \ 274 double error; \ 275 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 276 0, true, true, NOSSE, false, 0, &error)); \ 277 EXPECT_LE(error, mse); \ 278 } \ 279 TEST_F(YuvScalerTest, name##IntRef) { \ 280 double error; \ 281 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 282 0, true, false, ALLFLAGS, true, 0, &error)); \ 283 EXPECT_LE(error, mse); \ 284 } \ 285 TEST_F(YuvScalerTest, name##IntOptAligned) { \ 286 double error; \ 287 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 288 0, true, true, ALLFLAGS, true, 0, &error)); \ 289 EXPECT_LE(error, mse); \ 290 } \ 291 TEST_F(YuvScalerTest, name##IntOptUnaligned) { \ 292 double error; \ 293 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 294 0, true, true, ALLFLAGS, true, 1, &error)); \ 295 EXPECT_LE(error, mse); \ 296 } \ 297 TEST_F(YuvScalerTest, name##IntOptSSE2) { \ 298 double error; \ 299 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 300 0, true, true, NOSSSE3, true, 0, &error)); \ 301 EXPECT_LE(error, mse); \ 302 } \ 303 TEST_F(YuvScalerTest, name##IntOptC) { \ 304 double error; \ 305 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 306 0, true, true, NOSSE, true, 0, &error)); \ 307 EXPECT_LE(error, mse); \ 308 } 309 310 #define TEST_H(name, iwidth, iheight, owidth, oheight, opt, cpu, intr, mse) \ 311 TEST_F(YuvScalerTest, name) { \ 312 double error; \ 313 EXPECT_TRUE(TestScale(iwidth, iheight, owidth, oheight, \ 314 0, false, opt, cpu, intr, 0, &error)); \ 315 EXPECT_LE(error, mse); \ 316 } 317 318 // Test 4x3 aspect ratio scaling 319 320 // Tests 1/1x scale down. 321 TEST_M(TestScale4by3Down11, 640, 480, 640, 480, 0) 322 323 // Tests 3/4x scale down. 324 TEST_M(TestScale4by3Down34, 640, 480, 480, 360, 60) 325 326 // Tests 1/2x scale down. 327 TEST_M(TestScale4by3Down12, 640, 480, 320, 240, 60) 328 329 // Tests 3/8x scale down. 330 TEST_M(TestScale4by3Down38, 640, 480, 240, 180, 60) 331 332 // Tests 1/4x scale down.. 333 TEST_M(TestScale4by3Down14, 640, 480, 160, 120, 60) 334 335 // Tests 3/16x scale down. 336 TEST_M(TestScale4by3Down316, 640, 480, 120, 90, 120) 337 338 // Tests 1/8x scale down. 339 TEST_M(TestScale4by3Down18, 640, 480, 80, 60, 150) 340 341 // Tests 2/3x scale down. 342 TEST_M(TestScale4by3Down23, 480, 360, 320, 240, 60) 343 344 // Tests 4/3x scale up. 345 TEST_M(TestScale4by3Up43, 480, 360, 640, 480, 60) 346 347 // Tests 2/1x scale up. 348 TEST_M(TestScale4by3Up21, 320, 240, 640, 480, 60) 349 350 // Tests 4/1x scale up. 351 TEST_M(TestScale4by3Up41, 160, 120, 640, 480, 80) 352 353 // Test 16x10 aspect ratio scaling 354 355 // Tests 1/1x scale down. 356 TEST_M(TestScale16by10Down11, 640, 400, 640, 400, 0) 357 358 // Tests 3/4x scale down. 359 TEST_M(TestScale16by10Down34, 640, 400, 480, 300, 60) 360 361 // Tests 1/2x scale down. 362 TEST_M(TestScale16by10Down12, 640, 400, 320, 200, 60) 363 364 // Tests 3/8x scale down. 365 TEST_M(TestScale16by10Down38, 640, 400, 240, 150, 60) 366 367 // Tests 1/4x scale down.. 368 TEST_M(TestScale16by10Down14, 640, 400, 160, 100, 60) 369 370 // Tests 3/16x scale down. 371 TEST_M(TestScale16by10Down316, 640, 400, 120, 75, 120) 372 373 // Tests 1/8x scale down. 374 TEST_M(TestScale16by10Down18, 640, 400, 80, 50, 150) 375 376 // Tests 2/3x scale down. 377 TEST_M(TestScale16by10Down23, 480, 300, 320, 200, 60) 378 379 // Tests 4/3x scale up. 380 TEST_M(TestScale16by10Up43, 480, 300, 640, 400, 60) 381 382 // Tests 2/1x scale up. 383 TEST_M(TestScale16by10Up21, 320, 200, 640, 400, 60) 384 385 // Tests 4/1x scale up. 386 TEST_M(TestScale16by10Up41, 160, 100, 640, 400, 80) 387 388 // Test 16x9 aspect ratio scaling 389 390 // Tests 1/1x scale down. 391 TEST_M(TestScaleDown11, 640, 360, 640, 360, 0) 392 393 // Tests 3/4x scale down. 394 TEST_M(TestScaleDown34, 640, 360, 480, 270, 60) 395 396 // Tests 1/2x scale down. 397 TEST_M(TestScaleDown12, 640, 360, 320, 180, 60) 398 399 // Tests 3/8x scale down. 400 TEST_M(TestScaleDown38, 640, 360, 240, 135, 60) 401 402 // Tests 1/4x scale down.. 403 TEST_M(TestScaleDown14, 640, 360, 160, 90, 60) 404 405 // Tests 3/16x scale down. 406 TEST_M(TestScaleDown316, 640, 360, 120, 68, 120) 407 408 // Tests 1/8x scale down. 409 TEST_M(TestScaleDown18, 640, 360, 80, 45, 150) 410 411 // Tests 2/3x scale down. 412 TEST_M(TestScaleDown23, 480, 270, 320, 180, 60) 413 414 // Tests 4/3x scale up. 415 TEST_M(TestScaleUp43, 480, 270, 640, 360, 60) 416 417 // Tests 2/1x scale up. 418 TEST_M(TestScaleUp21, 320, 180, 640, 360, 60) 419 420 // Tests 4/1x scale up. 421 TEST_M(TestScaleUp41, 160, 90, 640, 360, 80) 422 423 // Test HD 4x3 aspect ratio scaling 424 425 // Tests 1/1x scale down. 426 TEST_M(TestScaleHD4x3Down11, 1280, 960, 1280, 960, 0) 427 428 // Tests 3/4x scale down. 429 TEST_M(TestScaleHD4x3Down34, 1280, 960, 960, 720, 60) 430 431 // Tests 1/2x scale down. 432 TEST_M(TestScaleHD4x3Down12, 1280, 960, 640, 480, 60) 433 434 // Tests 3/8x scale down. 435 TEST_M(TestScaleHD4x3Down38, 1280, 960, 480, 360, 60) 436 437 // Tests 1/4x scale down.. 438 TEST_M(TestScaleHD4x3Down14, 1280, 960, 320, 240, 60) 439 440 // Tests 3/16x scale down. 441 TEST_M(TestScaleHD4x3Down316, 1280, 960, 240, 180, 120) 442 443 // Tests 1/8x scale down. 444 TEST_M(TestScaleHD4x3Down18, 1280, 960, 160, 120, 150) 445 446 // Tests 2/3x scale down. 447 TEST_M(TestScaleHD4x3Down23, 960, 720, 640, 480, 60) 448 449 // Tests 4/3x scale up. 450 TEST_M(TestScaleHD4x3Up43, 960, 720, 1280, 960, 60) 451 452 // Tests 2/1x scale up. 453 TEST_M(TestScaleHD4x3Up21, 640, 480, 1280, 960, 60) 454 455 // Tests 4/1x scale up. 456 TEST_M(TestScaleHD4x3Up41, 320, 240, 1280, 960, 80) 457 458 // Test HD 16x10 aspect ratio scaling 459 460 // Tests 1/1x scale down. 461 TEST_M(TestScaleHD16x10Down11, 1280, 800, 1280, 800, 0) 462 463 // Tests 3/4x scale down. 464 TEST_M(TestScaleHD16x10Down34, 1280, 800, 960, 600, 60) 465 466 // Tests 1/2x scale down. 467 TEST_M(TestScaleHD16x10Down12, 1280, 800, 640, 400, 60) 468 469 // Tests 3/8x scale down. 470 TEST_M(TestScaleHD16x10Down38, 1280, 800, 480, 300, 60) 471 472 // Tests 1/4x scale down.. 473 TEST_M(TestScaleHD16x10Down14, 1280, 800, 320, 200, 60) 474 475 // Tests 3/16x scale down. 476 TEST_M(TestScaleHD16x10Down316, 1280, 800, 240, 150, 120) 477 478 // Tests 1/8x scale down. 479 TEST_M(TestScaleHD16x10Down18, 1280, 800, 160, 100, 150) 480 481 // Tests 2/3x scale down. 482 TEST_M(TestScaleHD16x10Down23, 960, 600, 640, 400, 60) 483 484 // Tests 4/3x scale up. 485 TEST_M(TestScaleHD16x10Up43, 960, 600, 1280, 800, 60) 486 487 // Tests 2/1x scale up. 488 TEST_M(TestScaleHD16x10Up21, 640, 400, 1280, 800, 60) 489 490 // Tests 4/1x scale up. 491 TEST_M(TestScaleHD16x10Up41, 320, 200, 1280, 800, 80) 492 493 // Test HD 16x9 aspect ratio scaling 494 495 // Tests 1/1x scale down. 496 TEST_M(TestScaleHDDown11, 1280, 720, 1280, 720, 0) 497 498 // Tests 3/4x scale down. 499 TEST_M(TestScaleHDDown34, 1280, 720, 960, 540, 60) 500 501 // Tests 1/2x scale down. 502 TEST_M(TestScaleHDDown12, 1280, 720, 640, 360, 60) 503 504 // Tests 3/8x scale down. 505 TEST_M(TestScaleHDDown38, 1280, 720, 480, 270, 60) 506 507 // Tests 1/4x scale down.. 508 TEST_M(TestScaleHDDown14, 1280, 720, 320, 180, 60) 509 510 // Tests 3/16x scale down. 511 TEST_M(TestScaleHDDown316, 1280, 720, 240, 135, 120) 512 513 // Tests 1/8x scale down. 514 TEST_M(TestScaleHDDown18, 1280, 720, 160, 90, 150) 515 516 // Tests 2/3x scale down. 517 TEST_M(TestScaleHDDown23, 960, 540, 640, 360, 60) 518 519 // Tests 4/3x scale up. 520 TEST_M(TestScaleHDUp43, 960, 540, 1280, 720, 60) 521 522 // Tests 2/1x scale up. 523 TEST_M(TestScaleHDUp21, 640, 360, 1280, 720, 60) 524 525 // Tests 4/1x scale up. 526 TEST_M(TestScaleHDUp41, 320, 180, 1280, 720, 80) 527 528 // Tests 1366x768 resolution for comparison to chromium scaler_bench 529 TEST_M(TestScaleHDUp1366, 1280, 720, 1366, 768, 10) 530 531 // Tests odd source/dest sizes. 3 less to make chroma odd as well. 532 TEST_M(TestScaleHDUp1363, 1277, 717, 1363, 765, 10) 533 534 // Tests 1/2x scale down, using optimized algorithm. 535 TEST_M(TestScaleOddDown12, 180, 100, 90, 50, 50) 536 537 // Tests bilinear scale down 538 TEST_M(TestScaleOddDownBilin, 160, 100, 90, 50, 120) 539 540 // Test huge buffer scales that are expected to use a different code path 541 // that avoids stack overflow but still work using point sampling. 542 // Max output size is 640 wide. 543 544 // Tests interpolated 1/8x scale down, using optimized algorithm. 545 TEST_H(TestScaleDown18HDOptInt, 6144, 48, 768, 6, true, ALLFLAGS, true, 1) 546 547 // Tests interpolated 1/8x scale down, using c_only optimized algorithm. 548 TEST_H(TestScaleDown18HDCOnlyOptInt, 6144, 48, 768, 6, true, NOSSE, true, 1) 549 550 // Tests interpolated 3/8x scale down, using optimized algorithm. 551 TEST_H(TestScaleDown38HDOptInt, 2048, 16, 768, 6, true, ALLFLAGS, true, 1) 552 553 // Tests interpolated 3/8x scale down, using no SSSE3 optimized algorithm. 554 TEST_H(TestScaleDown38HDNoSSSE3OptInt, 2048, 16, 768, 6, true, NOSSSE3, true, 1) 555 556 // Tests interpolated 3/8x scale down, using c_only optimized algorithm. 557 TEST_H(TestScaleDown38HDCOnlyOptInt, 2048, 16, 768, 6, true, NOSSE, true, 1) 558 559 // Tests interpolated 3/16x scale down, using optimized algorithm. 560 TEST_H(TestScaleDown316HDOptInt, 4096, 32, 768, 6, true, ALLFLAGS, true, 1) 561 562 // Tests interpolated 3/16x scale down, using no SSSE3 optimized algorithm. 563 TEST_H(TestScaleDown316HDNoSSSE3OptInt, 4096, 32, 768, 6, true, NOSSSE3, true, 564 1) 565 566 // Tests interpolated 3/16x scale down, using c_only optimized algorithm. 567 TEST_H(TestScaleDown316HDCOnlyOptInt, 4096, 32, 768, 6, true, NOSSE, true, 1) 568 569 // Test special sizes dont crash 570 // Tests scaling down to 1 pixel width 571 TEST_H(TestScaleDown1x6OptInt, 3, 24, 1, 6, true, ALLFLAGS, true, 4) 572 573 // Tests scaling down to 1 pixel height 574 TEST_H(TestScaleDown6x1OptInt, 24, 3, 6, 1, true, ALLFLAGS, true, 4) 575 576 // Tests scaling up from 1 pixel width 577 TEST_H(TestScaleUp1x6OptInt, 1, 6, 3, 24, true, ALLFLAGS, true, 4) 578 579 // Tests scaling up from 1 pixel height 580 TEST_H(TestScaleUp6x1OptInt, 6, 1, 24, 3, true, ALLFLAGS, true, 4) 581 582 // Test performance of a range of box filter scale sizes 583 584 // Tests interpolated 1/2x scale down, using optimized algorithm. 585 TEST_H(TestScaleDown2xHDOptInt, 1280, 720, 1280 / 2, 720 / 2, true, ALLFLAGS, 586 true, 1) 587 588 // Tests interpolated 1/3x scale down, using optimized algorithm. 589 TEST_H(TestScaleDown3xHDOptInt, 1280, 720, 1280 / 3, 720 / 3, true, ALLFLAGS, 590 true, 1) 591 592 // Tests interpolated 1/4x scale down, using optimized algorithm. 593 TEST_H(TestScaleDown4xHDOptInt, 1280, 720, 1280 / 4, 720 / 4, true, ALLFLAGS, 594 true, 1) 595 596 // Tests interpolated 1/5x scale down, using optimized algorithm. 597 TEST_H(TestScaleDown5xHDOptInt, 1280, 720, 1280 / 5, 720 / 5, true, ALLFLAGS, 598 true, 1) 599 600 // Tests interpolated 1/6x scale down, using optimized algorithm. 601 TEST_H(TestScaleDown6xHDOptInt, 1280, 720, 1280 / 6, 720 / 6, true, ALLFLAGS, 602 true, 1) 603 604 // Tests interpolated 1/7x scale down, using optimized algorithm. 605 TEST_H(TestScaleDown7xHDOptInt, 1280, 720, 1280 / 7, 720 / 7, true, ALLFLAGS, 606 true, 1) 607 608 // Tests interpolated 1/8x scale down, using optimized algorithm. 609 TEST_H(TestScaleDown8xHDOptInt, 1280, 720, 1280 / 8, 720 / 8, true, ALLFLAGS, 610 true, 1) 611 612 // Tests interpolated 1/8x scale down, using optimized algorithm. 613 TEST_H(TestScaleDown9xHDOptInt, 1280, 720, 1280 / 9, 720 / 9, true, ALLFLAGS, 614 true, 1) 615 616 // Tests interpolated 1/8x scale down, using optimized algorithm. 617 TEST_H(TestScaleDown10xHDOptInt, 1280, 720, 1280 / 10, 720 / 10, true, ALLFLAGS, 618 true, 1) 619