1 // Copyright 2012 Google Inc. All Rights Reserved. 2 // 3 // Use of this source code is governed by a BSD-style license 4 // that can be found in the COPYING file in the root of the source 5 // tree. An additional intellectual property rights grant can be found 6 // in the file PATENTS. All contributing project authors may 7 // be found in the AUTHORS file in the root of the source tree. 8 // ----------------------------------------------------------------------------- 9 // 10 // main entry for the decoder 11 // 12 // Authors: Vikas Arora (vikaas.arora (at) gmail.com) 13 // Jyrki Alakuijala (jyrki (at) google.com) 14 15 #include <stdio.h> 16 #include <stdlib.h> 17 #include "./vp8li.h" 18 #include "../dsp/lossless.h" 19 #include "../dsp/yuv.h" 20 #include "../utils/huffman.h" 21 #include "../utils/utils.h" 22 23 #if defined(__cplusplus) || defined(c_plusplus) 24 extern "C" { 25 #endif 26 27 #define NUM_ARGB_CACHE_ROWS 16 28 29 static const int kCodeLengthLiterals = 16; 30 static const int kCodeLengthRepeatCode = 16; 31 static const int kCodeLengthExtraBits[3] = { 2, 3, 7 }; 32 static const int kCodeLengthRepeatOffsets[3] = { 3, 3, 11 }; 33 34 // ----------------------------------------------------------------------------- 35 // Five Huffman codes are used at each meta code: 36 // 1. green + length prefix codes + color cache codes, 37 // 2. alpha, 38 // 3. red, 39 // 4. blue, and, 40 // 5. distance prefix codes. 41 typedef enum { 42 GREEN = 0, 43 RED = 1, 44 BLUE = 2, 45 ALPHA = 3, 46 DIST = 4 47 } HuffIndex; 48 49 static const uint16_t kAlphabetSize[HUFFMAN_CODES_PER_META_CODE] = { 50 NUM_LITERAL_CODES + NUM_LENGTH_CODES, 51 NUM_LITERAL_CODES, NUM_LITERAL_CODES, NUM_LITERAL_CODES, 52 NUM_DISTANCE_CODES 53 }; 54 55 56 #define NUM_CODE_LENGTH_CODES 19 57 static const uint8_t kCodeLengthCodeOrder[NUM_CODE_LENGTH_CODES] = { 58 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 59 }; 60 61 #define CODE_TO_PLANE_CODES 120 62 static const uint8_t code_to_plane_lut[CODE_TO_PLANE_CODES] = { 63 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a, 64 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a, 65 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b, 66 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03, 67 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c, 68 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e, 69 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b, 70 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f, 71 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b, 72 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41, 73 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f, 74 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70 75 }; 76 77 static int DecodeImageStream(int xsize, int ysize, 78 int is_level0, 79 VP8LDecoder* const dec, 80 uint32_t** const decoded_data); 81 82 //------------------------------------------------------------------------------ 83 84 int VP8LCheckSignature(const uint8_t* const data, size_t size) { 85 return (size >= VP8L_FRAME_HEADER_SIZE && 86 data[0] == VP8L_MAGIC_BYTE && 87 (data[4] >> 5) == 0); // version 88 } 89 90 static int ReadImageInfo(VP8LBitReader* const br, 91 int* const width, int* const height, 92 int* const has_alpha) { 93 if (VP8LReadBits(br, 8) != VP8L_MAGIC_BYTE) return 0; 94 *width = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; 95 *height = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; 96 *has_alpha = VP8LReadBits(br, 1); 97 if (VP8LReadBits(br, VP8L_VERSION_BITS) != 0) return 0; 98 return 1; 99 } 100 101 int VP8LGetInfo(const uint8_t* data, size_t data_size, 102 int* const width, int* const height, int* const has_alpha) { 103 if (data == NULL || data_size < VP8L_FRAME_HEADER_SIZE) { 104 return 0; // not enough data 105 } else if (!VP8LCheckSignature(data, data_size)) { 106 return 0; // bad signature 107 } else { 108 int w, h, a; 109 VP8LBitReader br; 110 VP8LInitBitReader(&br, data, data_size); 111 if (!ReadImageInfo(&br, &w, &h, &a)) { 112 return 0; 113 } 114 if (width != NULL) *width = w; 115 if (height != NULL) *height = h; 116 if (has_alpha != NULL) *has_alpha = a; 117 return 1; 118 } 119 } 120 121 //------------------------------------------------------------------------------ 122 123 static WEBP_INLINE int GetCopyDistance(int distance_symbol, 124 VP8LBitReader* const br) { 125 int extra_bits, offset; 126 if (distance_symbol < 4) { 127 return distance_symbol + 1; 128 } 129 extra_bits = (distance_symbol - 2) >> 1; 130 offset = (2 + (distance_symbol & 1)) << extra_bits; 131 return offset + VP8LReadBits(br, extra_bits) + 1; 132 } 133 134 static WEBP_INLINE int GetCopyLength(int length_symbol, 135 VP8LBitReader* const br) { 136 // Length and distance prefixes are encoded the same way. 137 return GetCopyDistance(length_symbol, br); 138 } 139 140 static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) { 141 if (plane_code > CODE_TO_PLANE_CODES) { 142 return plane_code - CODE_TO_PLANE_CODES; 143 } else { 144 const int dist_code = code_to_plane_lut[plane_code - 1]; 145 const int yoffset = dist_code >> 4; 146 const int xoffset = 8 - (dist_code & 0xf); 147 const int dist = yoffset * xsize + xoffset; 148 return (dist >= 1) ? dist : 1; 149 } 150 } 151 152 //------------------------------------------------------------------------------ 153 // Decodes the next Huffman code from bit-stream. 154 // FillBitWindow(br) needs to be called at minimum every second call 155 // to ReadSymbol, in order to pre-fetch enough bits. 156 static WEBP_INLINE int ReadSymbol(const HuffmanTree* tree, 157 VP8LBitReader* const br) { 158 const HuffmanTreeNode* node = tree->root_; 159 int num_bits = 0; 160 uint32_t bits = VP8LPrefetchBits(br); 161 assert(node != NULL); 162 while (!HuffmanTreeNodeIsLeaf(node)) { 163 node = HuffmanTreeNextNode(node, bits & 1); 164 bits >>= 1; 165 ++num_bits; 166 } 167 VP8LDiscardBits(br, num_bits); 168 return node->symbol_; 169 } 170 171 static int ReadHuffmanCodeLengths( 172 VP8LDecoder* const dec, const int* const code_length_code_lengths, 173 int num_symbols, int* const code_lengths) { 174 int ok = 0; 175 VP8LBitReader* const br = &dec->br_; 176 int symbol; 177 int max_symbol; 178 int prev_code_len = DEFAULT_CODE_LENGTH; 179 HuffmanTree tree; 180 181 if (!HuffmanTreeBuildImplicit(&tree, code_length_code_lengths, 182 NUM_CODE_LENGTH_CODES)) { 183 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 184 return 0; 185 } 186 187 if (VP8LReadBits(br, 1)) { // use length 188 const int length_nbits = 2 + 2 * VP8LReadBits(br, 3); 189 max_symbol = 2 + VP8LReadBits(br, length_nbits); 190 if (max_symbol > num_symbols) { 191 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 192 goto End; 193 } 194 } else { 195 max_symbol = num_symbols; 196 } 197 198 symbol = 0; 199 while (symbol < num_symbols) { 200 int code_len; 201 if (max_symbol-- == 0) break; 202 VP8LFillBitWindow(br); 203 code_len = ReadSymbol(&tree, br); 204 if (code_len < kCodeLengthLiterals) { 205 code_lengths[symbol++] = code_len; 206 if (code_len != 0) prev_code_len = code_len; 207 } else { 208 const int use_prev = (code_len == kCodeLengthRepeatCode); 209 const int slot = code_len - kCodeLengthLiterals; 210 const int extra_bits = kCodeLengthExtraBits[slot]; 211 const int repeat_offset = kCodeLengthRepeatOffsets[slot]; 212 int repeat = VP8LReadBits(br, extra_bits) + repeat_offset; 213 if (symbol + repeat > num_symbols) { 214 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 215 goto End; 216 } else { 217 const int length = use_prev ? prev_code_len : 0; 218 while (repeat-- > 0) code_lengths[symbol++] = length; 219 } 220 } 221 } 222 ok = 1; 223 224 End: 225 HuffmanTreeRelease(&tree); 226 return ok; 227 } 228 229 static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec, 230 HuffmanTree* const tree) { 231 int ok = 0; 232 VP8LBitReader* const br = &dec->br_; 233 const int simple_code = VP8LReadBits(br, 1); 234 235 if (simple_code) { // Read symbols, codes & code lengths directly. 236 int symbols[2]; 237 int codes[2]; 238 int code_lengths[2]; 239 const int num_symbols = VP8LReadBits(br, 1) + 1; 240 const int first_symbol_len_code = VP8LReadBits(br, 1); 241 // The first code is either 1 bit or 8 bit code. 242 symbols[0] = VP8LReadBits(br, (first_symbol_len_code == 0) ? 1 : 8); 243 codes[0] = 0; 244 code_lengths[0] = num_symbols - 1; 245 // The second code (if present), is always 8 bit long. 246 if (num_symbols == 2) { 247 symbols[1] = VP8LReadBits(br, 8); 248 codes[1] = 1; 249 code_lengths[1] = num_symbols - 1; 250 } 251 ok = HuffmanTreeBuildExplicit(tree, code_lengths, codes, symbols, 252 alphabet_size, num_symbols); 253 } else { // Decode Huffman-coded code lengths. 254 int* code_lengths = NULL; 255 int i; 256 int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 }; 257 const int num_codes = VP8LReadBits(br, 4) + 4; 258 if (num_codes > NUM_CODE_LENGTH_CODES) { 259 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 260 return 0; 261 } 262 263 code_lengths = 264 (int*)WebPSafeCalloc((uint64_t)alphabet_size, sizeof(*code_lengths)); 265 if (code_lengths == NULL) { 266 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 267 return 0; 268 } 269 270 for (i = 0; i < num_codes; ++i) { 271 code_length_code_lengths[kCodeLengthCodeOrder[i]] = VP8LReadBits(br, 3); 272 } 273 ok = ReadHuffmanCodeLengths(dec, code_length_code_lengths, alphabet_size, 274 code_lengths); 275 if (ok) { 276 ok = HuffmanTreeBuildImplicit(tree, code_lengths, alphabet_size); 277 } 278 free(code_lengths); 279 } 280 ok = ok && !br->error_; 281 if (!ok) { 282 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 283 return 0; 284 } 285 return 1; 286 } 287 288 static void DeleteHtreeGroups(HTreeGroup* htree_groups, int num_htree_groups) { 289 if (htree_groups != NULL) { 290 int i, j; 291 for (i = 0; i < num_htree_groups; ++i) { 292 HuffmanTree* const htrees = htree_groups[i].htrees_; 293 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { 294 HuffmanTreeRelease(&htrees[j]); 295 } 296 } 297 free(htree_groups); 298 } 299 } 300 301 static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize, 302 int color_cache_bits, int allow_recursion) { 303 int i, j; 304 VP8LBitReader* const br = &dec->br_; 305 VP8LMetadata* const hdr = &dec->hdr_; 306 uint32_t* huffman_image = NULL; 307 HTreeGroup* htree_groups = NULL; 308 int num_htree_groups = 1; 309 310 if (allow_recursion && VP8LReadBits(br, 1)) { 311 // use meta Huffman codes. 312 const int huffman_precision = VP8LReadBits(br, 3) + 2; 313 const int huffman_xsize = VP8LSubSampleSize(xsize, huffman_precision); 314 const int huffman_ysize = VP8LSubSampleSize(ysize, huffman_precision); 315 const int huffman_pixs = huffman_xsize * huffman_ysize; 316 if (!DecodeImageStream(huffman_xsize, huffman_ysize, 0, dec, 317 &huffman_image)) { 318 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 319 goto Error; 320 } 321 hdr->huffman_subsample_bits_ = huffman_precision; 322 for (i = 0; i < huffman_pixs; ++i) { 323 // The huffman data is stored in red and green bytes. 324 const int group = (huffman_image[i] >> 8) & 0xffff; 325 huffman_image[i] = group; 326 if (group >= num_htree_groups) { 327 num_htree_groups = group + 1; 328 } 329 } 330 } 331 332 if (br->error_) goto Error; 333 334 assert(num_htree_groups <= 0x10000); 335 htree_groups = 336 (HTreeGroup*)WebPSafeCalloc((uint64_t)num_htree_groups, 337 sizeof(*htree_groups)); 338 if (htree_groups == NULL) { 339 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 340 goto Error; 341 } 342 343 for (i = 0; i < num_htree_groups; ++i) { 344 HuffmanTree* const htrees = htree_groups[i].htrees_; 345 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { 346 int alphabet_size = kAlphabetSize[j]; 347 if (j == 0 && color_cache_bits > 0) { 348 alphabet_size += 1 << color_cache_bits; 349 } 350 if (!ReadHuffmanCode(alphabet_size, dec, htrees + j)) goto Error; 351 } 352 } 353 354 // All OK. Finalize pointers and return. 355 hdr->huffman_image_ = huffman_image; 356 hdr->num_htree_groups_ = num_htree_groups; 357 hdr->htree_groups_ = htree_groups; 358 return 1; 359 360 Error: 361 free(huffman_image); 362 DeleteHtreeGroups(htree_groups, num_htree_groups); 363 return 0; 364 } 365 366 //------------------------------------------------------------------------------ 367 // Scaling. 368 369 static int AllocateAndInitRescaler(VP8LDecoder* const dec, VP8Io* const io) { 370 const int num_channels = 4; 371 const int in_width = io->mb_w; 372 const int out_width = io->scaled_width; 373 const int in_height = io->mb_h; 374 const int out_height = io->scaled_height; 375 const uint64_t work_size = 2 * num_channels * (uint64_t)out_width; 376 int32_t* work; // Rescaler work area. 377 const uint64_t scaled_data_size = num_channels * (uint64_t)out_width; 378 uint32_t* scaled_data; // Temporary storage for scaled BGRA data. 379 const uint64_t memory_size = sizeof(*dec->rescaler) + 380 work_size * sizeof(*work) + 381 scaled_data_size * sizeof(*scaled_data); 382 uint8_t* memory = (uint8_t*)WebPSafeCalloc(memory_size, sizeof(*memory)); 383 if (memory == NULL) { 384 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 385 return 0; 386 } 387 assert(dec->rescaler_memory == NULL); 388 dec->rescaler_memory = memory; 389 390 dec->rescaler = (WebPRescaler*)memory; 391 memory += sizeof(*dec->rescaler); 392 work = (int32_t*)memory; 393 memory += work_size * sizeof(*work); 394 scaled_data = (uint32_t*)memory; 395 396 WebPRescalerInit(dec->rescaler, in_width, in_height, (uint8_t*)scaled_data, 397 out_width, out_height, 0, num_channels, 398 in_width, out_width, in_height, out_height, work); 399 return 1; 400 } 401 402 //------------------------------------------------------------------------------ 403 // Export to ARGB 404 405 // We have special "export" function since we need to convert from BGRA 406 static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace, 407 int rgba_stride, uint8_t* const rgba) { 408 const uint32_t* const src = (const uint32_t*)rescaler->dst; 409 const int dst_width = rescaler->dst_width; 410 int num_lines_out = 0; 411 while (WebPRescalerHasPendingOutput(rescaler)) { 412 uint8_t* const dst = rgba + num_lines_out * rgba_stride; 413 WebPRescalerExportRow(rescaler); 414 VP8LConvertFromBGRA(src, dst_width, colorspace, dst); 415 ++num_lines_out; 416 } 417 return num_lines_out; 418 } 419 420 // Emit scaled rows. 421 static int EmitRescaledRows(const VP8LDecoder* const dec, 422 const uint32_t* const data, int in_stride, int mb_h, 423 uint8_t* const out, int out_stride) { 424 const WEBP_CSP_MODE colorspace = dec->output_->colorspace; 425 const uint8_t* const in = (const uint8_t*)data; 426 int num_lines_in = 0; 427 int num_lines_out = 0; 428 while (num_lines_in < mb_h) { 429 const uint8_t* const row_in = in + num_lines_in * in_stride; 430 uint8_t* const row_out = out + num_lines_out * out_stride; 431 num_lines_in += WebPRescalerImport(dec->rescaler, mb_h - num_lines_in, 432 row_in, in_stride); 433 num_lines_out += Export(dec->rescaler, colorspace, out_stride, row_out); 434 } 435 return num_lines_out; 436 } 437 438 // Emit rows without any scaling. 439 static int EmitRows(WEBP_CSP_MODE colorspace, 440 const uint32_t* const data, int in_stride, 441 int mb_w, int mb_h, 442 uint8_t* const out, int out_stride) { 443 int lines = mb_h; 444 const uint8_t* row_in = (const uint8_t*)data; 445 uint8_t* row_out = out; 446 while (lines-- > 0) { 447 VP8LConvertFromBGRA((const uint32_t*)row_in, mb_w, colorspace, row_out); 448 row_in += in_stride; 449 row_out += out_stride; 450 } 451 return mb_h; // Num rows out == num rows in. 452 } 453 454 //------------------------------------------------------------------------------ 455 // Export to YUVA 456 457 static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos, 458 const WebPDecBuffer* const output) { 459 const WebPYUVABuffer* const buf = &output->u.YUVA; 460 // first, the luma plane 461 { 462 int i; 463 uint8_t* const y = buf->y + y_pos * buf->y_stride; 464 for (i = 0; i < width; ++i) { 465 const uint32_t p = src[i]; 466 y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff); 467 } 468 } 469 470 // then U/V planes 471 { 472 uint8_t* const u = buf->u + (y_pos >> 1) * buf->u_stride; 473 uint8_t* const v = buf->v + (y_pos >> 1) * buf->v_stride; 474 const int uv_width = width >> 1; 475 int i; 476 for (i = 0; i < uv_width; ++i) { 477 const uint32_t v0 = src[2 * i + 0]; 478 const uint32_t v1 = src[2 * i + 1]; 479 // VP8RGBToU/V expects four accumulated pixels. Hence we need to 480 // scale r/g/b value by a factor 2. We just shift v0/v1 one bit less. 481 const int r = ((v0 >> 15) & 0x1fe) + ((v1 >> 15) & 0x1fe); 482 const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe); 483 const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe); 484 if (!(y_pos & 1)) { // even lines: store values 485 u[i] = VP8RGBToU(r, g, b); 486 v[i] = VP8RGBToV(r, g, b); 487 } else { // odd lines: average with previous values 488 const int tmp_u = VP8RGBToU(r, g, b); 489 const int tmp_v = VP8RGBToV(r, g, b); 490 // Approximated average-of-four. But it's an acceptable diff. 491 u[i] = (u[i] + tmp_u + 1) >> 1; 492 v[i] = (v[i] + tmp_v + 1) >> 1; 493 } 494 } 495 if (width & 1) { // last pixel 496 const uint32_t v0 = src[2 * i + 0]; 497 const int r = (v0 >> 14) & 0x3fc; 498 const int g = (v0 >> 6) & 0x3fc; 499 const int b = (v0 << 2) & 0x3fc; 500 if (!(y_pos & 1)) { // even lines 501 u[i] = VP8RGBToU(r, g, b); 502 v[i] = VP8RGBToV(r, g, b); 503 } else { // odd lines (note: we could just skip this) 504 const int tmp_u = VP8RGBToU(r, g, b); 505 const int tmp_v = VP8RGBToV(r, g, b); 506 u[i] = (u[i] + tmp_u + 1) >> 1; 507 v[i] = (v[i] + tmp_v + 1) >> 1; 508 } 509 } 510 } 511 // Lastly, store alpha if needed. 512 if (buf->a != NULL) { 513 int i; 514 uint8_t* const a = buf->a + y_pos * buf->a_stride; 515 for (i = 0; i < width; ++i) a[i] = (src[i] >> 24); 516 } 517 } 518 519 static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) { 520 WebPRescaler* const rescaler = dec->rescaler; 521 const uint32_t* const src = (const uint32_t*)rescaler->dst; 522 const int dst_width = rescaler->dst_width; 523 int num_lines_out = 0; 524 while (WebPRescalerHasPendingOutput(rescaler)) { 525 WebPRescalerExportRow(rescaler); 526 ConvertToYUVA(src, dst_width, y_pos, dec->output_); 527 ++y_pos; 528 ++num_lines_out; 529 } 530 return num_lines_out; 531 } 532 533 static int EmitRescaledRowsYUVA(const VP8LDecoder* const dec, 534 const uint32_t* const data, 535 int in_stride, int mb_h) { 536 const uint8_t* const in = (const uint8_t*)data; 537 int num_lines_in = 0; 538 int y_pos = dec->last_out_row_; 539 while (num_lines_in < mb_h) { 540 const uint8_t* const row_in = in + num_lines_in * in_stride; 541 num_lines_in += WebPRescalerImport(dec->rescaler, mb_h - num_lines_in, 542 row_in, in_stride); 543 y_pos += ExportYUVA(dec, y_pos); 544 } 545 return y_pos; 546 } 547 548 static int EmitRowsYUVA(const VP8LDecoder* const dec, 549 const uint32_t* const data, int in_stride, 550 int mb_w, int num_rows) { 551 int y_pos = dec->last_out_row_; 552 const uint8_t* row_in = (const uint8_t*)data; 553 while (num_rows-- > 0) { 554 ConvertToYUVA((const uint32_t*)row_in, mb_w, y_pos, dec->output_); 555 row_in += in_stride; 556 ++y_pos; 557 } 558 return y_pos; 559 } 560 561 //------------------------------------------------------------------------------ 562 // Cropping. 563 564 // Sets io->mb_y, io->mb_h & io->mb_w according to start row, end row and 565 // crop options. Also updates the input data pointer, so that it points to the 566 // start of the cropped window. 567 // Note that 'pixel_stride' is in units of 'uint32_t' (and not 'bytes). 568 // Returns true if the crop window is not empty. 569 static int SetCropWindow(VP8Io* const io, int y_start, int y_end, 570 const uint32_t** const in_data, int pixel_stride) { 571 assert(y_start < y_end); 572 assert(io->crop_left < io->crop_right); 573 if (y_end > io->crop_bottom) { 574 y_end = io->crop_bottom; // make sure we don't overflow on last row. 575 } 576 if (y_start < io->crop_top) { 577 const int delta = io->crop_top - y_start; 578 y_start = io->crop_top; 579 *in_data += pixel_stride * delta; 580 } 581 if (y_start >= y_end) return 0; // Crop window is empty. 582 583 *in_data += io->crop_left; 584 585 io->mb_y = y_start - io->crop_top; 586 io->mb_w = io->crop_right - io->crop_left; 587 io->mb_h = y_end - y_start; 588 return 1; // Non-empty crop window. 589 } 590 591 //------------------------------------------------------------------------------ 592 593 static WEBP_INLINE int GetMetaIndex( 594 const uint32_t* const image, int xsize, int bits, int x, int y) { 595 if (bits == 0) return 0; 596 return image[xsize * (y >> bits) + (x >> bits)]; 597 } 598 599 static WEBP_INLINE HTreeGroup* GetHtreeGroupForPos(VP8LMetadata* const hdr, 600 int x, int y) { 601 const int meta_index = GetMetaIndex(hdr->huffman_image_, hdr->huffman_xsize_, 602 hdr->huffman_subsample_bits_, x, y); 603 assert(meta_index < hdr->num_htree_groups_); 604 return hdr->htree_groups_ + meta_index; 605 } 606 607 //------------------------------------------------------------------------------ 608 // Main loop, with custom row-processing function 609 610 typedef void (*ProcessRowsFunc)(VP8LDecoder* const dec, int row); 611 612 static void ApplyInverseTransforms(VP8LDecoder* const dec, int num_rows, 613 const uint32_t* const rows) { 614 int n = dec->next_transform_; 615 const int cache_pixs = dec->width_ * num_rows; 616 const int start_row = dec->last_row_; 617 const int end_row = start_row + num_rows; 618 const uint32_t* rows_in = rows; 619 uint32_t* const rows_out = dec->argb_cache_; 620 621 // Inverse transforms. 622 // TODO: most transforms only need to operate on the cropped region only. 623 memcpy(rows_out, rows_in, cache_pixs * sizeof(*rows_out)); 624 while (n-- > 0) { 625 VP8LTransform* const transform = &dec->transforms_[n]; 626 VP8LInverseTransform(transform, start_row, end_row, rows_in, rows_out); 627 rows_in = rows_out; 628 } 629 } 630 631 // Special method for paletted alpha data. 632 static void ApplyInverseTransformsAlpha(VP8LDecoder* const dec, int num_rows, 633 const uint8_t* const rows) { 634 const int start_row = dec->last_row_; 635 const int end_row = start_row + num_rows; 636 const uint8_t* rows_in = rows; 637 uint8_t* rows_out = (uint8_t*)dec->io_->opaque + dec->io_->width * start_row; 638 VP8LTransform* const transform = &dec->transforms_[0]; 639 assert(dec->next_transform_ == 1); 640 assert(transform->type_ == COLOR_INDEXING_TRANSFORM); 641 VP8LColorIndexInverseTransformAlpha(transform, start_row, end_row, rows_in, 642 rows_out); 643 } 644 645 // Processes (transforms, scales & color-converts) the rows decoded after the 646 // last call. 647 static void ProcessRows(VP8LDecoder* const dec, int row) { 648 const uint32_t* const rows = dec->pixels_ + dec->width_ * dec->last_row_; 649 const int num_rows = row - dec->last_row_; 650 651 if (num_rows <= 0) return; // Nothing to be done. 652 ApplyInverseTransforms(dec, num_rows, rows); 653 654 // Emit output. 655 { 656 VP8Io* const io = dec->io_; 657 const uint32_t* rows_data = dec->argb_cache_; 658 if (!SetCropWindow(io, dec->last_row_, row, &rows_data, io->width)) { 659 // Nothing to output (this time). 660 } else { 661 const WebPDecBuffer* const output = dec->output_; 662 const int in_stride = io->width * sizeof(*rows_data); 663 if (output->colorspace < MODE_YUV) { // convert to RGBA 664 const WebPRGBABuffer* const buf = &output->u.RGBA; 665 uint8_t* const rgba = buf->rgba + dec->last_out_row_ * buf->stride; 666 const int num_rows_out = io->use_scaling ? 667 EmitRescaledRows(dec, rows_data, in_stride, io->mb_h, 668 rgba, buf->stride) : 669 EmitRows(output->colorspace, rows_data, in_stride, 670 io->mb_w, io->mb_h, rgba, buf->stride); 671 // Update 'last_out_row_'. 672 dec->last_out_row_ += num_rows_out; 673 } else { // convert to YUVA 674 dec->last_out_row_ = io->use_scaling ? 675 EmitRescaledRowsYUVA(dec, rows_data, in_stride, io->mb_h) : 676 EmitRowsYUVA(dec, rows_data, in_stride, io->mb_w, io->mb_h); 677 } 678 assert(dec->last_out_row_ <= output->height); 679 } 680 } 681 682 // Update 'last_row_'. 683 dec->last_row_ = row; 684 assert(dec->last_row_ <= dec->height_); 685 } 686 687 #define DECODE_DATA_FUNC(FUNC_NAME, TYPE, STORE_PIXEL) \ 688 static int FUNC_NAME(VP8LDecoder* const dec, TYPE* const data, int width, \ 689 int height, ProcessRowsFunc process_func) { \ 690 int ok = 1; \ 691 int col = 0, row = 0; \ 692 VP8LBitReader* const br = &dec->br_; \ 693 VP8LMetadata* const hdr = &dec->hdr_; \ 694 HTreeGroup* htree_group = hdr->htree_groups_; \ 695 TYPE* src = data; \ 696 TYPE* last_cached = data; \ 697 TYPE* const src_end = data + width * height; \ 698 const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES; \ 699 const int color_cache_limit = len_code_limit + hdr->color_cache_size_; \ 700 VP8LColorCache* const color_cache = \ 701 (hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL; \ 702 const int mask = hdr->huffman_mask_; \ 703 assert(htree_group != NULL); \ 704 while (!br->eos_ && src < src_end) { \ 705 int code; \ 706 /* Only update when changing tile. Note we could use this test: */ \ 707 /* if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed */ \ 708 /* but that's actually slower and needs storing the previous col/row. */ \ 709 if ((col & mask) == 0) { \ 710 htree_group = GetHtreeGroupForPos(hdr, col, row); \ 711 } \ 712 VP8LFillBitWindow(br); \ 713 code = ReadSymbol(&htree_group->htrees_[GREEN], br); \ 714 if (code < NUM_LITERAL_CODES) { /* Literal*/ \ 715 int red, green, blue, alpha; \ 716 red = ReadSymbol(&htree_group->htrees_[RED], br); \ 717 green = code; \ 718 VP8LFillBitWindow(br); \ 719 blue = ReadSymbol(&htree_group->htrees_[BLUE], br); \ 720 alpha = ReadSymbol(&htree_group->htrees_[ALPHA], br); \ 721 *src = STORE_PIXEL(alpha, red, green, blue); \ 722 AdvanceByOne: \ 723 ++src; \ 724 ++col; \ 725 if (col >= width) { \ 726 col = 0; \ 727 ++row; \ 728 if ((process_func != NULL) && (row % NUM_ARGB_CACHE_ROWS == 0)) { \ 729 process_func(dec, row); \ 730 } \ 731 if (color_cache != NULL) { \ 732 while (last_cached < src) { \ 733 VP8LColorCacheInsert(color_cache, *last_cached++); \ 734 } \ 735 } \ 736 } \ 737 } else if (code < len_code_limit) { /* Backward reference */ \ 738 int dist_code, dist; \ 739 const int length_sym = code - NUM_LITERAL_CODES; \ 740 const int length = GetCopyLength(length_sym, br); \ 741 const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br); \ 742 VP8LFillBitWindow(br); \ 743 dist_code = GetCopyDistance(dist_symbol, br); \ 744 dist = PlaneCodeToDistance(width, dist_code); \ 745 if (src - data < dist || src_end - src < length) { \ 746 ok = 0; \ 747 goto End; \ 748 } \ 749 { \ 750 int i; \ 751 for (i = 0; i < length; ++i) src[i] = src[i - dist]; \ 752 src += length; \ 753 } \ 754 col += length; \ 755 while (col >= width) { \ 756 col -= width; \ 757 ++row; \ 758 if ((process_func != NULL) && (row % NUM_ARGB_CACHE_ROWS == 0)) { \ 759 process_func(dec, row); \ 760 } \ 761 } \ 762 if (src < src_end) { \ 763 htree_group = GetHtreeGroupForPos(hdr, col, row); \ 764 if (color_cache != NULL) { \ 765 while (last_cached < src) { \ 766 VP8LColorCacheInsert(color_cache, *last_cached++); \ 767 } \ 768 } \ 769 } \ 770 } else if (code < color_cache_limit) { /* Color cache */ \ 771 const int key = code - len_code_limit; \ 772 assert(color_cache != NULL); \ 773 while (last_cached < src) { \ 774 VP8LColorCacheInsert(color_cache, *last_cached++); \ 775 } \ 776 *src = VP8LColorCacheLookup(color_cache, key); \ 777 goto AdvanceByOne; \ 778 } else { /* Not reached */ \ 779 ok = 0; \ 780 goto End; \ 781 } \ 782 ok = !br->error_; \ 783 if (!ok) goto End; \ 784 } \ 785 /* Process the remaining rows corresponding to last row-block. */ \ 786 if (process_func != NULL) process_func(dec, row); \ 787 End: \ 788 if (br->error_ || !ok || (br->eos_ && src < src_end)) { \ 789 ok = 0; \ 790 dec->status_ = \ 791 (!br->eos_) ? VP8_STATUS_BITSTREAM_ERROR : VP8_STATUS_SUSPENDED; \ 792 } else if (src == src_end) { \ 793 dec->state_ = READ_DATA; \ 794 } \ 795 return ok; \ 796 } 797 798 static WEBP_INLINE uint32_t GetARGBPixel(int alpha, int red, int green, 799 int blue) { 800 return (alpha << 24) | (red << 16) | (green << 8) | blue; 801 } 802 803 static WEBP_INLINE uint8_t GetAlphaPixel(int alpha, int red, int green, 804 int blue) { 805 (void)alpha; 806 (void)red; 807 (void)blue; 808 return green; // Alpha value is stored in green channel. 809 } 810 811 DECODE_DATA_FUNC(DecodeImageData, uint32_t, GetARGBPixel) 812 DECODE_DATA_FUNC(DecodeAlphaData, uint8_t, GetAlphaPixel) 813 814 #undef DECODE_DATA_FUNC 815 816 // ----------------------------------------------------------------------------- 817 // VP8LTransform 818 819 static void ClearTransform(VP8LTransform* const transform) { 820 free(transform->data_); 821 transform->data_ = NULL; 822 } 823 824 // For security reason, we need to remap the color map to span 825 // the total possible bundled values, and not just the num_colors. 826 static int ExpandColorMap(int num_colors, VP8LTransform* const transform) { 827 int i; 828 const int final_num_colors = 1 << (8 >> transform->bits_); 829 uint32_t* const new_color_map = 830 (uint32_t*)WebPSafeMalloc((uint64_t)final_num_colors, 831 sizeof(*new_color_map)); 832 if (new_color_map == NULL) { 833 return 0; 834 } else { 835 uint8_t* const data = (uint8_t*)transform->data_; 836 uint8_t* const new_data = (uint8_t*)new_color_map; 837 new_color_map[0] = transform->data_[0]; 838 for (i = 4; i < 4 * num_colors; ++i) { 839 // Equivalent to AddPixelEq(), on a byte-basis. 840 new_data[i] = (data[i] + new_data[i - 4]) & 0xff; 841 } 842 for (; i < 4 * final_num_colors; ++i) 843 new_data[i] = 0; // black tail. 844 free(transform->data_); 845 transform->data_ = new_color_map; 846 } 847 return 1; 848 } 849 850 static int ReadTransform(int* const xsize, int const* ysize, 851 VP8LDecoder* const dec) { 852 int ok = 1; 853 VP8LBitReader* const br = &dec->br_; 854 VP8LTransform* transform = &dec->transforms_[dec->next_transform_]; 855 const VP8LImageTransformType type = 856 (VP8LImageTransformType)VP8LReadBits(br, 2); 857 858 // Each transform type can only be present once in the stream. 859 if (dec->transforms_seen_ & (1U << type)) { 860 return 0; // Already there, let's not accept the second same transform. 861 } 862 dec->transforms_seen_ |= (1U << type); 863 864 transform->type_ = type; 865 transform->xsize_ = *xsize; 866 transform->ysize_ = *ysize; 867 transform->data_ = NULL; 868 ++dec->next_transform_; 869 assert(dec->next_transform_ <= NUM_TRANSFORMS); 870 871 switch (type) { 872 case PREDICTOR_TRANSFORM: 873 case CROSS_COLOR_TRANSFORM: 874 transform->bits_ = VP8LReadBits(br, 3) + 2; 875 ok = DecodeImageStream(VP8LSubSampleSize(transform->xsize_, 876 transform->bits_), 877 VP8LSubSampleSize(transform->ysize_, 878 transform->bits_), 879 0, dec, &transform->data_); 880 break; 881 case COLOR_INDEXING_TRANSFORM: { 882 const int num_colors = VP8LReadBits(br, 8) + 1; 883 const int bits = (num_colors > 16) ? 0 884 : (num_colors > 4) ? 1 885 : (num_colors > 2) ? 2 886 : 3; 887 *xsize = VP8LSubSampleSize(transform->xsize_, bits); 888 transform->bits_ = bits; 889 ok = DecodeImageStream(num_colors, 1, 0, dec, &transform->data_); 890 ok = ok && ExpandColorMap(num_colors, transform); 891 break; 892 } 893 case SUBTRACT_GREEN: 894 break; 895 default: 896 assert(0); // can't happen 897 break; 898 } 899 900 return ok; 901 } 902 903 // ----------------------------------------------------------------------------- 904 // VP8LMetadata 905 906 static void InitMetadata(VP8LMetadata* const hdr) { 907 assert(hdr); 908 memset(hdr, 0, sizeof(*hdr)); 909 } 910 911 static void ClearMetadata(VP8LMetadata* const hdr) { 912 assert(hdr); 913 914 free(hdr->huffman_image_); 915 DeleteHtreeGroups(hdr->htree_groups_, hdr->num_htree_groups_); 916 VP8LColorCacheClear(&hdr->color_cache_); 917 InitMetadata(hdr); 918 } 919 920 // ----------------------------------------------------------------------------- 921 // VP8LDecoder 922 923 VP8LDecoder* VP8LNew(void) { 924 VP8LDecoder* const dec = (VP8LDecoder*)calloc(1, sizeof(*dec)); 925 if (dec == NULL) return NULL; 926 dec->status_ = VP8_STATUS_OK; 927 dec->action_ = READ_DIM; 928 dec->state_ = READ_DIM; 929 return dec; 930 } 931 932 void VP8LClear(VP8LDecoder* const dec) { 933 int i; 934 if (dec == NULL) return; 935 ClearMetadata(&dec->hdr_); 936 937 free(dec->pixels_); 938 dec->pixels_ = NULL; 939 for (i = 0; i < dec->next_transform_; ++i) { 940 ClearTransform(&dec->transforms_[i]); 941 } 942 dec->next_transform_ = 0; 943 dec->transforms_seen_ = 0; 944 945 free(dec->rescaler_memory); 946 dec->rescaler_memory = NULL; 947 948 dec->output_ = NULL; // leave no trace behind 949 } 950 951 void VP8LDelete(VP8LDecoder* const dec) { 952 if (dec != NULL) { 953 VP8LClear(dec); 954 free(dec); 955 } 956 } 957 958 static void UpdateDecoder(VP8LDecoder* const dec, int width, int height) { 959 VP8LMetadata* const hdr = &dec->hdr_; 960 const int num_bits = hdr->huffman_subsample_bits_; 961 dec->width_ = width; 962 dec->height_ = height; 963 964 hdr->huffman_xsize_ = VP8LSubSampleSize(width, num_bits); 965 hdr->huffman_mask_ = (num_bits == 0) ? ~0 : (1 << num_bits) - 1; 966 } 967 968 static int DecodeImageStream(int xsize, int ysize, 969 int is_level0, 970 VP8LDecoder* const dec, 971 uint32_t** const decoded_data) { 972 int ok = 1; 973 int transform_xsize = xsize; 974 int transform_ysize = ysize; 975 VP8LBitReader* const br = &dec->br_; 976 VP8LMetadata* const hdr = &dec->hdr_; 977 uint32_t* data = NULL; 978 int color_cache_bits = 0; 979 980 // Read the transforms (may recurse). 981 if (is_level0) { 982 while (ok && VP8LReadBits(br, 1)) { 983 ok = ReadTransform(&transform_xsize, &transform_ysize, dec); 984 } 985 } 986 987 // Color cache 988 if (ok && VP8LReadBits(br, 1)) { 989 color_cache_bits = VP8LReadBits(br, 4); 990 ok = (color_cache_bits >= 1 && color_cache_bits <= MAX_CACHE_BITS); 991 if (!ok) { 992 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 993 goto End; 994 } 995 } 996 997 // Read the Huffman codes (may recurse). 998 ok = ok && ReadHuffmanCodes(dec, transform_xsize, transform_ysize, 999 color_cache_bits, is_level0); 1000 if (!ok) { 1001 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 1002 goto End; 1003 } 1004 1005 // Finish setting up the color-cache 1006 if (color_cache_bits > 0) { 1007 hdr->color_cache_size_ = 1 << color_cache_bits; 1008 if (!VP8LColorCacheInit(&hdr->color_cache_, color_cache_bits)) { 1009 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 1010 ok = 0; 1011 goto End; 1012 } 1013 } else { 1014 hdr->color_cache_size_ = 0; 1015 } 1016 UpdateDecoder(dec, transform_xsize, transform_ysize); 1017 1018 if (is_level0) { // level 0 complete 1019 dec->state_ = READ_HDR; 1020 goto End; 1021 } 1022 1023 { 1024 const uint64_t total_size = (uint64_t)transform_xsize * transform_ysize; 1025 data = (uint32_t*)WebPSafeMalloc(total_size, sizeof(*data)); 1026 if (data == NULL) { 1027 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 1028 ok = 0; 1029 goto End; 1030 } 1031 } 1032 1033 // Use the Huffman trees to decode the LZ77 encoded data. 1034 ok = DecodeImageData(dec, data, transform_xsize, transform_ysize, NULL); 1035 ok = ok && !br->error_; 1036 1037 End: 1038 1039 if (!ok) { 1040 free(data); 1041 ClearMetadata(hdr); 1042 // If not enough data (br.eos_) resulted in BIT_STREAM_ERROR, update the 1043 // status appropriately. 1044 if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR && dec->br_.eos_) { 1045 dec->status_ = VP8_STATUS_SUSPENDED; 1046 } 1047 } else { 1048 if (decoded_data != NULL) { 1049 *decoded_data = data; 1050 } else { 1051 // We allocate image data in this function only for transforms. At level 0 1052 // (that is: not the transforms), we shouldn't have allocated anything. 1053 assert(data == NULL); 1054 assert(is_level0); 1055 } 1056 if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind. 1057 } 1058 return ok; 1059 } 1060 1061 //------------------------------------------------------------------------------ 1062 // Allocate internal buffers dec->pixels_ and dec->argb_cache_. 1063 static int AllocateInternalBuffers(VP8LDecoder* const dec, int final_width, 1064 size_t bytes_per_pixel) { 1065 const int argb_cache_needed = (bytes_per_pixel == sizeof(uint32_t)); 1066 const uint64_t num_pixels = (uint64_t)dec->width_ * dec->height_; 1067 // Scratch buffer corresponding to top-prediction row for transforming the 1068 // first row in the row-blocks. Not needed for paletted alpha. 1069 const uint64_t cache_top_pixels = 1070 argb_cache_needed ? (uint16_t)final_width : 0ULL; 1071 // Scratch buffer for temporary BGRA storage. Not needed for paletted alpha. 1072 const uint64_t cache_pixels = 1073 argb_cache_needed ? (uint64_t)final_width * NUM_ARGB_CACHE_ROWS : 0ULL; 1074 const uint64_t total_num_pixels = 1075 num_pixels + cache_top_pixels + cache_pixels; 1076 1077 assert(dec->width_ <= final_width); 1078 dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, bytes_per_pixel); 1079 if (dec->pixels_ == NULL) { 1080 dec->argb_cache_ = NULL; // for sanity check 1081 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; 1082 return 0; 1083 } 1084 dec->argb_cache_ = 1085 argb_cache_needed ? dec->pixels_ + num_pixels + cache_top_pixels : NULL; 1086 return 1; 1087 } 1088 1089 //------------------------------------------------------------------------------ 1090 1091 // Special row-processing that only stores the alpha data. 1092 static void ExtractAlphaRows(VP8LDecoder* const dec, int row) { 1093 const int num_rows = row - dec->last_row_; 1094 const uint32_t* const in = dec->pixels_ + dec->width_ * dec->last_row_; 1095 1096 if (num_rows <= 0) return; // Nothing to be done. 1097 ApplyInverseTransforms(dec, num_rows, in); 1098 1099 // Extract alpha (which is stored in the green plane). 1100 { 1101 const int width = dec->io_->width; // the final width (!= dec->width_) 1102 const int cache_pixs = width * num_rows; 1103 uint8_t* const dst = (uint8_t*)dec->io_->opaque + width * dec->last_row_; 1104 const uint32_t* const src = dec->argb_cache_; 1105 int i; 1106 for (i = 0; i < cache_pixs; ++i) dst[i] = (src[i] >> 8) & 0xff; 1107 } 1108 dec->last_row_ = dec->last_out_row_ = row; 1109 } 1110 1111 // Row-processing for the special case when alpha data contains only one 1112 // transform: color indexing. 1113 static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int row) { 1114 const int num_rows = row - dec->last_row_; 1115 const uint8_t* const in = 1116 (uint8_t*)dec->pixels_ + dec->width_ * dec->last_row_; 1117 if (num_rows <= 0) return; // Nothing to be done. 1118 ApplyInverseTransformsAlpha(dec, num_rows, in); 1119 dec->last_row_ = dec->last_out_row_ = row; 1120 } 1121 1122 int VP8LDecodeAlphaImageStream(int width, int height, const uint8_t* const data, 1123 size_t data_size, uint8_t* const output) { 1124 VP8Io io; 1125 int ok = 0; 1126 VP8LDecoder* const dec = VP8LNew(); 1127 size_t bytes_per_pixel = sizeof(uint32_t); // Default: BGRA mode. 1128 if (dec == NULL) return 0; 1129 1130 dec->width_ = width; 1131 dec->height_ = height; 1132 dec->io_ = &io; 1133 1134 VP8InitIo(&io); 1135 WebPInitCustomIo(NULL, &io); // Just a sanity Init. io won't be used. 1136 io.opaque = output; 1137 io.width = width; 1138 io.height = height; 1139 1140 dec->status_ = VP8_STATUS_OK; 1141 VP8LInitBitReader(&dec->br_, data, data_size); 1142 1143 dec->action_ = READ_HDR; 1144 if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Err; 1145 1146 // Special case: if alpha data uses only the color indexing transform and 1147 // doesn't use color cache (a frequent case), we will use DecodeAlphaData() 1148 // method that only needs allocation of 1 byte per pixel (alpha channel). 1149 if (dec->next_transform_ == 1 && 1150 dec->transforms_[0].type_ == COLOR_INDEXING_TRANSFORM && 1151 dec->hdr_.color_cache_size_ == 0) { 1152 bytes_per_pixel = sizeof(uint8_t); 1153 } 1154 1155 // Allocate internal buffers (note that dec->width_ may have changed here). 1156 if (!AllocateInternalBuffers(dec, width, bytes_per_pixel)) goto Err; 1157 1158 // Decode (with special row processing). 1159 dec->action_ = READ_DATA; 1160 ok = (bytes_per_pixel == sizeof(uint8_t)) ? 1161 DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_, 1162 ExtractPalettedAlphaRows) : 1163 DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, 1164 ExtractAlphaRows); 1165 1166 Err: 1167 VP8LDelete(dec); 1168 return ok; 1169 } 1170 1171 //------------------------------------------------------------------------------ 1172 1173 int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io) { 1174 int width, height, has_alpha; 1175 1176 if (dec == NULL) return 0; 1177 if (io == NULL) { 1178 dec->status_ = VP8_STATUS_INVALID_PARAM; 1179 return 0; 1180 } 1181 1182 dec->io_ = io; 1183 dec->status_ = VP8_STATUS_OK; 1184 VP8LInitBitReader(&dec->br_, io->data, io->data_size); 1185 if (!ReadImageInfo(&dec->br_, &width, &height, &has_alpha)) { 1186 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; 1187 goto Error; 1188 } 1189 dec->state_ = READ_DIM; 1190 io->width = width; 1191 io->height = height; 1192 1193 dec->action_ = READ_HDR; 1194 if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Error; 1195 return 1; 1196 1197 Error: 1198 VP8LClear(dec); 1199 assert(dec->status_ != VP8_STATUS_OK); 1200 return 0; 1201 } 1202 1203 int VP8LDecodeImage(VP8LDecoder* const dec) { 1204 const size_t bytes_per_pixel = sizeof(uint32_t); 1205 VP8Io* io = NULL; 1206 WebPDecParams* params = NULL; 1207 1208 // Sanity checks. 1209 if (dec == NULL) return 0; 1210 1211 io = dec->io_; 1212 assert(io != NULL); 1213 params = (WebPDecParams*)io->opaque; 1214 assert(params != NULL); 1215 dec->output_ = params->output; 1216 assert(dec->output_ != NULL); 1217 1218 // Initialization. 1219 if (!WebPIoInitFromOptions(params->options, io, MODE_BGRA)) { 1220 dec->status_ = VP8_STATUS_INVALID_PARAM; 1221 goto Err; 1222 } 1223 1224 if (!AllocateInternalBuffers(dec, io->width, bytes_per_pixel)) goto Err; 1225 1226 if (io->use_scaling && !AllocateAndInitRescaler(dec, io)) goto Err; 1227 1228 // Decode. 1229 dec->action_ = READ_DATA; 1230 if (!DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, 1231 ProcessRows)) { 1232 goto Err; 1233 } 1234 1235 // Cleanup. 1236 params->last_y = dec->last_out_row_; 1237 VP8LClear(dec); 1238 return 1; 1239 1240 Err: 1241 VP8LClear(dec); 1242 assert(dec->status_ != VP8_STATUS_OK); 1243 return 0; 1244 } 1245 1246 //------------------------------------------------------------------------------ 1247 1248 #if defined(__cplusplus) || defined(c_plusplus) 1249 } // extern "C" 1250 #endif 1251