1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 12 #include <stdlib.h> 13 #include <string.h> 14 #include "vpx/vpx_image.h" 15 16 #define ADDRESS_STORAGE_SIZE sizeof(size_t) 17 /*returns an addr aligned to the byte boundary specified by align*/ 18 #define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align)) 19 20 /* Memalign code is copied from vpx_mem.c */ 21 static void *img_buf_memalign(size_t align, size_t size) { 22 void *addr, 23 * x = NULL; 24 25 addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE); 26 27 if (addr) { 28 x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align); 29 /* save the actual malloc address */ 30 ((size_t *)x)[-1] = (size_t)addr; 31 } 32 33 return x; 34 } 35 36 static void img_buf_free(void *memblk) { 37 if (memblk) { 38 void *addr = (void *)(((size_t *)memblk)[-1]); 39 free(addr); 40 } 41 } 42 43 static vpx_image_t *img_alloc_helper(vpx_image_t *img, 44 vpx_img_fmt_t fmt, 45 unsigned int d_w, 46 unsigned int d_h, 47 unsigned int buf_align, 48 unsigned int stride_align, 49 unsigned char *img_data) { 50 51 unsigned int h, w, s, xcs, ycs, bps; 52 int align; 53 54 /* Treat align==0 like align==1 */ 55 if (!buf_align) 56 buf_align = 1; 57 58 /* Validate alignment (must be power of 2) */ 59 if (buf_align & (buf_align - 1)) 60 goto fail; 61 62 /* Treat align==0 like align==1 */ 63 if (!stride_align) 64 stride_align = 1; 65 66 /* Validate alignment (must be power of 2) */ 67 if (stride_align & (stride_align - 1)) 68 goto fail; 69 70 /* Get sample size for this format */ 71 switch (fmt) { 72 case VPX_IMG_FMT_RGB32: 73 case VPX_IMG_FMT_RGB32_LE: 74 case VPX_IMG_FMT_ARGB: 75 case VPX_IMG_FMT_ARGB_LE: 76 bps = 32; 77 break; 78 case VPX_IMG_FMT_RGB24: 79 case VPX_IMG_FMT_BGR24: 80 bps = 24; 81 break; 82 case VPX_IMG_FMT_RGB565: 83 case VPX_IMG_FMT_RGB565_LE: 84 case VPX_IMG_FMT_RGB555: 85 case VPX_IMG_FMT_RGB555_LE: 86 case VPX_IMG_FMT_UYVY: 87 case VPX_IMG_FMT_YUY2: 88 case VPX_IMG_FMT_YVYU: 89 bps = 16; 90 break; 91 case VPX_IMG_FMT_I420: 92 case VPX_IMG_FMT_YV12: 93 case VPX_IMG_FMT_VPXI420: 94 case VPX_IMG_FMT_VPXYV12: 95 bps = 12; 96 break; 97 default: 98 bps = 16; 99 break; 100 } 101 102 /* Get chroma shift values for this format */ 103 switch (fmt) { 104 case VPX_IMG_FMT_I420: 105 case VPX_IMG_FMT_YV12: 106 case VPX_IMG_FMT_VPXI420: 107 case VPX_IMG_FMT_VPXYV12: 108 xcs = 1; 109 break; 110 default: 111 xcs = 0; 112 break; 113 } 114 115 switch (fmt) { 116 case VPX_IMG_FMT_I420: 117 case VPX_IMG_FMT_YV12: 118 case VPX_IMG_FMT_VPXI420: 119 case VPX_IMG_FMT_VPXYV12: 120 ycs = 1; 121 break; 122 default: 123 ycs = 0; 124 break; 125 } 126 127 /* Calculate storage sizes given the chroma subsampling */ 128 align = (1 << xcs) - 1; 129 w = (d_w + align) & ~align; 130 align = (1 << ycs) - 1; 131 h = (d_h + align) & ~align; 132 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8; 133 s = (s + stride_align - 1) & ~(stride_align - 1); 134 135 /* Allocate the new image */ 136 if (!img) { 137 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t)); 138 139 if (!img) 140 goto fail; 141 142 img->self_allocd = 1; 143 } else { 144 memset(img, 0, sizeof(vpx_image_t)); 145 } 146 147 img->img_data = img_data; 148 149 if (!img_data) { 150 img->img_data = img_buf_memalign(buf_align, ((fmt & VPX_IMG_FMT_PLANAR) ? 151 h * s * bps / 8 : h * s)); 152 img->img_data_owner = 1; 153 } 154 155 if (!img->img_data) 156 goto fail; 157 158 img->fmt = fmt; 159 img->w = w; 160 img->h = h; 161 img->x_chroma_shift = xcs; 162 img->y_chroma_shift = ycs; 163 img->bps = bps; 164 165 /* Calculate strides */ 166 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s; 167 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs; 168 169 /* Default viewport to entire image */ 170 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) 171 return img; 172 173 fail: 174 vpx_img_free(img); 175 return NULL; 176 } 177 178 vpx_image_t *vpx_img_alloc(vpx_image_t *img, 179 vpx_img_fmt_t fmt, 180 unsigned int d_w, 181 unsigned int d_h, 182 unsigned int align) { 183 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL); 184 } 185 186 vpx_image_t *vpx_img_wrap(vpx_image_t *img, 187 vpx_img_fmt_t fmt, 188 unsigned int d_w, 189 unsigned int d_h, 190 unsigned int stride_align, 191 unsigned char *img_data) { 192 /* By setting buf_align = 1, we don't change buffer alignment in this 193 * function. */ 194 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data); 195 } 196 197 int vpx_img_set_rect(vpx_image_t *img, 198 unsigned int x, 199 unsigned int y, 200 unsigned int w, 201 unsigned int h) { 202 unsigned char *data; 203 204 if (x + w <= img->w && y + h <= img->h) { 205 img->d_w = w; 206 img->d_h = h; 207 208 /* Calculate plane pointers */ 209 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) { 210 img->planes[VPX_PLANE_PACKED] = 211 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED]; 212 } else { 213 data = img->img_data; 214 215 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) { 216 img->planes[VPX_PLANE_ALPHA] = 217 data + x + y * img->stride[VPX_PLANE_ALPHA]; 218 data += img->h * img->stride[VPX_PLANE_ALPHA]; 219 } 220 221 img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y]; 222 data += img->h * img->stride[VPX_PLANE_Y]; 223 224 if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) { 225 img->planes[VPX_PLANE_U] = data 226 + (x >> img->x_chroma_shift) 227 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U]; 228 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U]; 229 img->planes[VPX_PLANE_V] = data 230 + (x >> img->x_chroma_shift) 231 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V]; 232 } else { 233 img->planes[VPX_PLANE_V] = data 234 + (x >> img->x_chroma_shift) 235 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V]; 236 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V]; 237 img->planes[VPX_PLANE_U] = data 238 + (x >> img->x_chroma_shift) 239 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U]; 240 } 241 } 242 243 return 0; 244 } 245 246 return -1; 247 } 248 249 void vpx_img_flip(vpx_image_t *img) { 250 /* Note: In the calculation pointer adjustment calculation, we want the 251 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99 252 * standard indicates that if the adjustment parameter is unsigned, the 253 * stride parameter will be promoted to unsigned, causing errors when 254 * the lhs is a larger type than the rhs. 255 */ 256 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y]; 257 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y]; 258 259 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) 260 * img->stride[VPX_PLANE_U]; 261 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U]; 262 263 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) 264 * img->stride[VPX_PLANE_V]; 265 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V]; 266 267 img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA]; 268 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA]; 269 } 270 271 void vpx_img_free(vpx_image_t *img) { 272 if (img) { 273 if (img->img_data && img->img_data_owner) 274 img_buf_free(img->img_data); 275 276 if (img->self_allocd) 277 free(img); 278 } 279 } 280