1 /* 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <stdlib.h> 12 #include <time.h> 13 14 #include "../unit_test/unit_test.h" 15 #include "libyuv/cpu_id.h" 16 #include "libyuv/scale.h" 17 18 #define STRINGIZE(line) #line 19 #define FILELINESTR(file, line) file ":" STRINGIZE(line) 20 21 namespace libyuv { 22 23 // Test scaling with C vs Opt and return maximum pixel difference. 0 = exact. 24 static int TestFilter(int src_width, 25 int src_height, 26 int dst_width, 27 int dst_height, 28 FilterMode f, 29 int benchmark_iterations, 30 int disable_cpu_flags, 31 int benchmark_cpu_info) { 32 if (!SizeValid(src_width, src_height, dst_width, dst_height)) { 33 return 0; 34 } 35 36 int i, j; 37 const int b = 0; // 128 to test for padding/stride. 38 int src_width_uv = (Abs(src_width) + 1) >> 1; 39 int src_height_uv = (Abs(src_height) + 1) >> 1; 40 41 int64 src_y_plane_size = (Abs(src_width) + b * 2) * (Abs(src_height) + b * 2); 42 int64 src_uv_plane_size = (src_width_uv + b * 2) * (src_height_uv + b * 2); 43 44 int src_stride_y = b * 2 + Abs(src_width); 45 int src_stride_uv = b * 2 + src_width_uv; 46 47 align_buffer_page_end(src_y, src_y_plane_size) 48 align_buffer_page_end(src_u, src_uv_plane_size) align_buffer_page_end( 49 src_v, src_uv_plane_size) if (!src_y || !src_u || !src_v) { 50 printf("Skipped. Alloc failed " FILELINESTR(__FILE__, __LINE__) "\n"); 51 return 0; 52 } 53 MemRandomize(src_y, src_y_plane_size); 54 MemRandomize(src_u, src_uv_plane_size); 55 MemRandomize(src_v, src_uv_plane_size); 56 57 int dst_width_uv = (dst_width + 1) >> 1; 58 int dst_height_uv = (dst_height + 1) >> 1; 59 60 int64 dst_y_plane_size = (dst_width + b * 2) * (dst_height + b * 2); 61 int64 dst_uv_plane_size = (dst_width_uv + b * 2) * (dst_height_uv + b * 2); 62 63 int dst_stride_y = b * 2 + dst_width; 64 int dst_stride_uv = b * 2 + dst_width_uv; 65 66 align_buffer_page_end(dst_y_c, dst_y_plane_size) 67 align_buffer_page_end(dst_u_c, dst_uv_plane_size) 68 align_buffer_page_end(dst_v_c, dst_uv_plane_size) 69 align_buffer_page_end(dst_y_opt, dst_y_plane_size) 70 align_buffer_page_end(dst_u_opt, dst_uv_plane_size) 71 align_buffer_page_end( 72 dst_v_opt, 73 dst_uv_plane_size) if (!dst_y_c || !dst_u_c || 74 !dst_v_c || !dst_y_opt || 75 !dst_u_opt || !dst_v_opt) { 76 printf("Skipped. Alloc failed " FILELINESTR(__FILE__, __LINE__) "\n"); 77 return 0; 78 } 79 80 MaskCpuFlags(disable_cpu_flags); // Disable all CPU optimization. 81 double c_time = get_time(); 82 I420Scale(src_y + (src_stride_y * b) + b, src_stride_y, 83 src_u + (src_stride_uv * b) + b, src_stride_uv, 84 src_v + (src_stride_uv * b) + b, src_stride_uv, src_width, 85 src_height, dst_y_c + (dst_stride_y * b) + b, dst_stride_y, 86 dst_u_c + (dst_stride_uv * b) + b, dst_stride_uv, 87 dst_v_c + (dst_stride_uv * b) + b, dst_stride_uv, dst_width, 88 dst_height, f); 89 c_time = (get_time() - c_time); 90 91 MaskCpuFlags(benchmark_cpu_info); // Enable all CPU optimization. 92 double opt_time = get_time(); 93 for (i = 0; i < benchmark_iterations; ++i) { 94 I420Scale(src_y + (src_stride_y * b) + b, src_stride_y, 95 src_u + (src_stride_uv * b) + b, src_stride_uv, 96 src_v + (src_stride_uv * b) + b, src_stride_uv, src_width, 97 src_height, dst_y_opt + (dst_stride_y * b) + b, dst_stride_y, 98 dst_u_opt + (dst_stride_uv * b) + b, dst_stride_uv, 99 dst_v_opt + (dst_stride_uv * b) + b, dst_stride_uv, dst_width, 100 dst_height, f); 101 } 102 opt_time = (get_time() - opt_time) / benchmark_iterations; 103 // Report performance of C vs OPT 104 printf("filter %d - %8d us C - %8d us OPT\n", f, 105 static_cast<int>(c_time * 1e6), static_cast<int>(opt_time * 1e6)); 106 107 // C version may be a little off from the optimized. Order of 108 // operations may introduce rounding somewhere. So do a difference 109 // of the buffers and look to see that the max difference isn't 110 // over 2. 111 int max_diff = 0; 112 for (i = b; i < (dst_height + b); ++i) { 113 for (j = b; j < (dst_width + b); ++j) { 114 int abs_diff = Abs(dst_y_c[(i * dst_stride_y) + j] - 115 dst_y_opt[(i * dst_stride_y) + j]); 116 if (abs_diff > max_diff) { 117 max_diff = abs_diff; 118 } 119 } 120 } 121 122 for (i = b; i < (dst_height_uv + b); ++i) { 123 for (j = b; j < (dst_width_uv + b); ++j) { 124 int abs_diff = Abs(dst_u_c[(i * dst_stride_uv) + j] - 125 dst_u_opt[(i * dst_stride_uv) + j]); 126 if (abs_diff > max_diff) { 127 max_diff = abs_diff; 128 } 129 abs_diff = Abs(dst_v_c[(i * dst_stride_uv) + j] - 130 dst_v_opt[(i * dst_stride_uv) + j]); 131 if (abs_diff > max_diff) { 132 max_diff = abs_diff; 133 } 134 } 135 } 136 137 free_aligned_buffer_page_end(dst_y_c) free_aligned_buffer_page_end(dst_u_c) 138 free_aligned_buffer_page_end(dst_v_c) 139 free_aligned_buffer_page_end(dst_y_opt) 140 free_aligned_buffer_page_end(dst_u_opt) 141 free_aligned_buffer_page_end(dst_v_opt) 142 143 free_aligned_buffer_page_end(src_y) 144 free_aligned_buffer_page_end(src_u) 145 free_aligned_buffer_page_end(src_v) 146 147 return max_diff; 148 } 149 150 // Test scaling with 8 bit C vs 16 bit C and return maximum pixel difference. 151 // 0 = exact. 152 static int TestFilter_16(int src_width, 153 int src_height, 154 int dst_width, 155 int dst_height, 156 FilterMode f, 157 int benchmark_iterations) { 158 if (!SizeValid(src_width, src_height, dst_width, dst_height)) { 159 return 0; 160 } 161 162 int i, j; 163 const int b = 0; // 128 to test for padding/stride. 164 int src_width_uv = (Abs(src_width) + 1) >> 1; 165 int src_height_uv = (Abs(src_height) + 1) >> 1; 166 167 int64 src_y_plane_size = (Abs(src_width) + b * 2) * (Abs(src_height) + b * 2); 168 int64 src_uv_plane_size = (src_width_uv + b * 2) * (src_height_uv + b * 2); 169 170 int src_stride_y = b * 2 + Abs(src_width); 171 int src_stride_uv = b * 2 + src_width_uv; 172 173 align_buffer_page_end(src_y, src_y_plane_size) align_buffer_page_end( 174 src_u, src_uv_plane_size) align_buffer_page_end(src_v, src_uv_plane_size) 175 align_buffer_page_end(src_y_16, src_y_plane_size * 2) 176 align_buffer_page_end(src_u_16, src_uv_plane_size * 2) 177 align_buffer_page_end(src_v_16, src_uv_plane_size * 2) 178 uint16* p_src_y_16 = reinterpret_cast<uint16*>(src_y_16); 179 uint16* p_src_u_16 = reinterpret_cast<uint16*>(src_u_16); 180 uint16* p_src_v_16 = reinterpret_cast<uint16*>(src_v_16); 181 182 MemRandomize(src_y, src_y_plane_size); 183 MemRandomize(src_u, src_uv_plane_size); 184 MemRandomize(src_v, src_uv_plane_size); 185 186 for (i = b; i < src_height + b; ++i) { 187 for (j = b; j < src_width + b; ++j) { 188 p_src_y_16[(i * src_stride_y) + j] = src_y[(i * src_stride_y) + j]; 189 } 190 } 191 192 for (i = b; i < (src_height_uv + b); ++i) { 193 for (j = b; j < (src_width_uv + b); ++j) { 194 p_src_u_16[(i * src_stride_uv) + j] = src_u[(i * src_stride_uv) + j]; 195 p_src_v_16[(i * src_stride_uv) + j] = src_v[(i * src_stride_uv) + j]; 196 } 197 } 198 199 int dst_width_uv = (dst_width + 1) >> 1; 200 int dst_height_uv = (dst_height + 1) >> 1; 201 202 int dst_y_plane_size = (dst_width + b * 2) * (dst_height + b * 2); 203 int dst_uv_plane_size = (dst_width_uv + b * 2) * (dst_height_uv + b * 2); 204 205 int dst_stride_y = b * 2 + dst_width; 206 int dst_stride_uv = b * 2 + dst_width_uv; 207 208 align_buffer_page_end(dst_y_8, dst_y_plane_size) 209 align_buffer_page_end(dst_u_8, dst_uv_plane_size) 210 align_buffer_page_end(dst_v_8, dst_uv_plane_size) 211 align_buffer_page_end(dst_y_16, dst_y_plane_size * 2) 212 align_buffer_page_end(dst_u_16, dst_uv_plane_size * 2) 213 align_buffer_page_end(dst_v_16, dst_uv_plane_size * 2) 214 215 uint16* p_dst_y_16 = 216 reinterpret_cast<uint16*>(dst_y_16); 217 uint16* p_dst_u_16 = reinterpret_cast<uint16*>(dst_u_16); 218 uint16* p_dst_v_16 = reinterpret_cast<uint16*>(dst_v_16); 219 220 I420Scale(src_y + (src_stride_y * b) + b, src_stride_y, 221 src_u + (src_stride_uv * b) + b, src_stride_uv, 222 src_v + (src_stride_uv * b) + b, src_stride_uv, src_width, 223 src_height, dst_y_8 + (dst_stride_y * b) + b, dst_stride_y, 224 dst_u_8 + (dst_stride_uv * b) + b, dst_stride_uv, 225 dst_v_8 + (dst_stride_uv * b) + b, dst_stride_uv, dst_width, 226 dst_height, f); 227 228 for (i = 0; i < benchmark_iterations; ++i) { 229 I420Scale_16(p_src_y_16 + (src_stride_y * b) + b, src_stride_y, 230 p_src_u_16 + (src_stride_uv * b) + b, src_stride_uv, 231 p_src_v_16 + (src_stride_uv * b) + b, src_stride_uv, src_width, 232 src_height, p_dst_y_16 + (dst_stride_y * b) + b, dst_stride_y, 233 p_dst_u_16 + (dst_stride_uv * b) + b, dst_stride_uv, 234 p_dst_v_16 + (dst_stride_uv * b) + b, dst_stride_uv, dst_width, 235 dst_height, f); 236 } 237 238 // Expect an exact match 239 int max_diff = 0; 240 for (i = b; i < (dst_height + b); ++i) { 241 for (j = b; j < (dst_width + b); ++j) { 242 int abs_diff = Abs(dst_y_8[(i * dst_stride_y) + j] - 243 p_dst_y_16[(i * dst_stride_y) + j]); 244 if (abs_diff > max_diff) { 245 max_diff = abs_diff; 246 } 247 } 248 } 249 250 for (i = b; i < (dst_height_uv + b); ++i) { 251 for (j = b; j < (dst_width_uv + b); ++j) { 252 int abs_diff = Abs(dst_u_8[(i * dst_stride_uv) + j] - 253 p_dst_u_16[(i * dst_stride_uv) + j]); 254 if (abs_diff > max_diff) { 255 max_diff = abs_diff; 256 } 257 abs_diff = Abs(dst_v_8[(i * dst_stride_uv) + j] - 258 p_dst_v_16[(i * dst_stride_uv) + j]); 259 if (abs_diff > max_diff) { 260 max_diff = abs_diff; 261 } 262 } 263 } 264 265 free_aligned_buffer_page_end(dst_y_8) free_aligned_buffer_page_end(dst_u_8) 266 free_aligned_buffer_page_end(dst_v_8) 267 free_aligned_buffer_page_end(dst_y_16) 268 free_aligned_buffer_page_end(dst_u_16) 269 free_aligned_buffer_page_end(dst_v_16) 270 271 free_aligned_buffer_page_end(src_y) 272 free_aligned_buffer_page_end(src_u) 273 free_aligned_buffer_page_end(src_v) 274 free_aligned_buffer_page_end(src_y_16) 275 free_aligned_buffer_page_end(src_u_16) 276 free_aligned_buffer_page_end(src_v_16) 277 278 return max_diff; 279 } 280 281 // The following adjustments in dimensions ensure the scale factor will be 282 // exactly achieved. 283 // 2 is chroma subsample 284 #define DX(x, nom, denom) static_cast<int>(((Abs(x) / nom + 1) / 2) * nom * 2) 285 #define SX(x, nom, denom) static_cast<int>(((x / nom + 1) / 2) * denom * 2) 286 287 #define TEST_FACTOR1(name, filter, nom, denom, max_diff) \ 288 TEST_F(LibYUVScaleTest, ScaleDownBy##name##_##filter) { \ 289 int diff = TestFilter( \ 290 SX(benchmark_width_, nom, denom), SX(benchmark_height_, nom, denom), \ 291 DX(benchmark_width_, nom, denom), DX(benchmark_height_, nom, denom), \ 292 kFilter##filter, benchmark_iterations_, disable_cpu_flags_, \ 293 benchmark_cpu_info_); \ 294 EXPECT_LE(diff, max_diff); \ 295 } \ 296 TEST_F(LibYUVScaleTest, DISABLED_ScaleDownBy##name##_##filter##_16) { \ 297 int diff = TestFilter_16( \ 298 SX(benchmark_width_, nom, denom), SX(benchmark_height_, nom, denom), \ 299 DX(benchmark_width_, nom, denom), DX(benchmark_height_, nom, denom), \ 300 kFilter##filter, benchmark_iterations_); \ 301 EXPECT_LE(diff, max_diff); \ 302 } 303 304 // Test a scale factor with all 4 filters. Expect unfiltered to be exact, but 305 // filtering is different fixed point implementations for SSSE3, Neon and C. 306 #define TEST_FACTOR(name, nom, denom, boxdiff) \ 307 TEST_FACTOR1(name, None, nom, denom, 0) \ 308 TEST_FACTOR1(name, Linear, nom, denom, 3) \ 309 TEST_FACTOR1(name, Bilinear, nom, denom, 3) \ 310 TEST_FACTOR1(name, Box, nom, denom, boxdiff) 311 312 TEST_FACTOR(2, 1, 2, 0) 313 TEST_FACTOR(4, 1, 4, 0) 314 TEST_FACTOR(8, 1, 8, 0) 315 TEST_FACTOR(3by4, 3, 4, 1) 316 TEST_FACTOR(3by8, 3, 8, 1) 317 TEST_FACTOR(3, 1, 3, 0) 318 #undef TEST_FACTOR1 319 #undef TEST_FACTOR 320 #undef SX 321 #undef DX 322 323 #define TEST_SCALETO1(name, width, height, filter, max_diff) \ 324 TEST_F(LibYUVScaleTest, name##To##width##x##height##_##filter) { \ 325 int diff = TestFilter(benchmark_width_, benchmark_height_, width, height, \ 326 kFilter##filter, benchmark_iterations_, \ 327 disable_cpu_flags_, benchmark_cpu_info_); \ 328 EXPECT_LE(diff, max_diff); \ 329 } \ 330 TEST_F(LibYUVScaleTest, name##From##width##x##height##_##filter) { \ 331 int diff = TestFilter(width, height, Abs(benchmark_width_), \ 332 Abs(benchmark_height_), kFilter##filter, \ 333 benchmark_iterations_, disable_cpu_flags_, \ 334 benchmark_cpu_info_); \ 335 EXPECT_LE(diff, max_diff); \ 336 } \ 337 TEST_F(LibYUVScaleTest, \ 338 DISABLED_##name##To##width##x##height##_##filter##_16) { \ 339 int diff = TestFilter_16(benchmark_width_, benchmark_height_, width, \ 340 height, kFilter##filter, benchmark_iterations_); \ 341 EXPECT_LE(diff, max_diff); \ 342 } \ 343 TEST_F(LibYUVScaleTest, \ 344 DISABLED_##name##From##width##x##height##_##filter##_16) { \ 345 int diff = TestFilter_16(width, height, Abs(benchmark_width_), \ 346 Abs(benchmark_height_), kFilter##filter, \ 347 benchmark_iterations_); \ 348 EXPECT_LE(diff, max_diff); \ 349 } 350 351 // Test scale to a specified size with all 4 filters. 352 #define TEST_SCALETO(name, width, height) \ 353 TEST_SCALETO1(name, width, height, None, 0) \ 354 TEST_SCALETO1(name, width, height, Linear, 0) \ 355 TEST_SCALETO1(name, width, height, Bilinear, 0) \ 356 TEST_SCALETO1(name, width, height, Box, 0) 357 358 TEST_SCALETO(Scale, 1, 1) 359 TEST_SCALETO(Scale, 320, 240) 360 TEST_SCALETO(Scale, 352, 288) 361 TEST_SCALETO(Scale, 569, 480) 362 TEST_SCALETO(Scale, 640, 360) 363 TEST_SCALETO(Scale, 1280, 720) 364 #undef TEST_SCALETO1 365 #undef TEST_SCALETO 366 367 } // namespace libyuv 368