/developers/samples/android/media/HdrViewfinder/ |
template-params.xml | 89 necessary YUV->RGB conversion. The camera subsystem outputs YUV images naturally, while the GPU 91 fused/composited, a standard YUV->RGB color transform is applied before the the data is written
|
/external/libvpx/libvpx/test/ |
cpu_speed_test.cc | 82 ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, 117 ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, 132 ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0,
|
vp9_end_to_end_test.cc | 50 {"park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1}, 55 {"park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3}, 59 {"park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3},
|
datarate_test.cc | 152 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 181 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 210 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 239 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 464 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 523 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 572 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 612 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 660 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 695 ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288 [all...] |
/hardware/qcom/display/msm8996/libgralloc1/ |
gr_adreno_info.h | 65 ADRENO_PIXELFORMAT_UYVY = 614, // YUV 4:2:2 packed progressive (1 plane) 67 ADRENO_PIXELFORMAT_Y8U8V8A8 = 620, // YUV 4:4:4 packed (1 plane) 68 ADRENO_PIXELFORMAT_Y8 = 625, // Single 8-bit luma only channel YUV format
|
/external/libjpeg-turbo/doc/html/ |
group___turbo_j_p_e_g.html | 233 <tr class="memdesc:ga0b931126c7a615ddc3bbd0cca6698d67"><td class="mdescLeft"> </td><td class="mdescRight">Compress a YUV planar image into a JPEG image. <a href="#ga0b931126c7a615ddc3bbd0cca6698d67">More...</a><br/></td></tr> 242 <tr class="memdesc:gaf451664a62c1f6c7cc5a6401f32908c9"><td class="mdescLeft"> </td><td class="mdescRight">The size of the buffer (in bytes) required to hold a YUV planar image with the given parameters. <a href="#gaf451664a62c1f6c7cc5a6401f32908c9">More...</a><br/></td></tr> 245 <tr class="memdesc:ga6f98d977bfa9d167c97172e876ba61e2"><td class="mdescLeft"> </td><td class="mdescRight">The size of the buffer (in bytes) required to hold a YUV image plane with the given parameters. <a href="#ga6f98d977bfa9d167c97172e876ba61e2">More...</a><br/></td></tr> 248 <tr class="memdesc:ga63fb66bb1e36c74008c4634360becbb1"><td class="mdescLeft"> </td><td class="mdescRight">The plane width of a YUV image plane with the given parameters. <a href="#ga63fb66bb1e36c74008c4634360becbb1">More...</a><br/></td></tr> 251 <tr class="memdesc:ga1a209696c6a80748f20e134b3c64789f"><td class="mdescLeft"> </td><td class="mdescRight">The plane height of a YUV image plane with the given parameters. <a href="#ga1a209696c6a80748f20e134b3c64789f">More... (…) [all...] |
/ndk/docs/Additional_library_docs/renderscript/ |
classandroid_1_1RSC_1_1ScriptIntrinsicYuvToRGB.html | 64 <div class="textblock"><p>Intrinsic for converting an Android YUV buffer to RGB.</p> 65 <p>The input allocation should be supplied in a supported YUV format as a YUV element <a class="el" href="classandroid_1_1RSC_1_1Allocation.html">Allocation</a>. The output is RGBA; the alpha channel will be set to 255. </p> 93 <p>Create an intrinsic for converting YUV to RGB.</p> 144 <p>Set the input YUV allocation.</p>
|
/cts/tests/camera/src/android/hardware/camera2/cts/ |
AllocationTest.java | 279 * Convert a single YUV pixel (3 byte elements) to an RGB pixel. 302 * @param yuvData An array of a YUV pixel (at least 3 bytes large) 307 final int CHANNELS = 3; // yuv 310 assertTrue("YUV pixel must be at least 3 bytes large", CHANNELS <= yuvData.length); 318 // convert YUV -> RGB (from JFIF's "Conversion to and from RGB" section) 424 // Minimal required size to represent YUV 4:2:0 image 432 "YUV 420 packed size (%d) should be at least as large as the actual size " + 472 // Create a script graph that converts YUV to RGB 666 * output: mean YUV value of a central section of the image, 667 * YUV 4:4:4 encoded as U8_ [all...] |
/device/moto/shamu/camera/QCamera/stack/mm-camera-test/src/ |
mm_qcamera_display.c | 69 } yuv; variable in typeref:union:__anon5723 178 /* Initialize yuv structure */ 179 yuv.list.count = 1; 181 e = &yuv.list.req[0]; 240 result = ioctl(fb_fd, MSMFB_BLIT, &yuv.list); 461 /* Initialize yuv structure */ 462 yuv.list.count = 1; 463 e = &yuv.list.req[0]; 521 /* Initialize yuv structure */ 522 yuv.list.count = 1 [all...] |
/cts/tests/camera/src/android/hardware/camera2/cts/rs/ |
AllocationInfo.java | 95 element = Element.YUV(rs); 236 if (element.equals(Element.YUV(mRS))) { 288 YUV, 305 if (element.equals(Element.YUV(RenderScriptSingleton.getRS()))) { 306 return YUV; 333 case YUV: 334 comparison = Element.YUV(mRS);
|
/external/webrtc/talk/app/webrtc/androidtests/src/org/webrtc/ |
GlRectDrawerTest.java | 140 // Create YUV byte buffer planes with random content. 154 // Upload the YUV byte buffer data as textures. 163 // Draw the YUV frame onto the pixel buffer. 172 // Compare the YUV data with the RGBA result. 175 // YUV color space. Y in [0, 1], UV in [-0.5, 0.5]. The constants are taken from the YUV 192 // Assert rendered image is close to pixel perfect from source YUV.
|
/frameworks/rs/ |
rsType.cpp | 122 // YUV only supports basic 2d 240 if (t->getDimYuv() != params->yuv) continue; 272 nt->mHal.state.dimYuv = params->yuv; 316 p.yuv = getDimYuv(); 365 uint32_t dimY, uint32_t dimZ, bool mipmaps, bool faces, uint32_t yuv) { 375 p.yuv = yuv;
|
/hardware/intel/img/hwcomposer/merrifield/ips/common/ |
OverlayPlaneBase.cpp | 409 stride.yuv.yStride = yStride; 410 stride.yuv.uvStride = uvStride; 415 stride.yuv.yStride = yStride; 416 stride.yuv.uvStride = uvStride; 421 stride.yuv.yStride = payload->scaling_luma_stride; 422 stride.yuv.uvStride = payload->scaling_chroma_u_stride; 426 stride.yuv.yStride = yStride; 427 stride.yuv.uvStride = uvStride; 434 stride.yuv.yStride = yStride; 435 stride.yuv.uvStride = uvStride [all...] |
/packages/apps/Camera2/src/com/android/camera/processing/imagebackend/ |
TaskConvertImageToRGBPreview.java | 59 * Quick n' Dirty YUV to RGB conversion 147 // for chroma subsampled images such as YUV 159 * Calculates the memory offset of a YUV 420 plane, given the parameters of 160 * the separate YUV color planes and the fact that UV components may be 168 * @param colorSubsample Color subsample due to the YUV color space (In YUV, 190 * YUV data. This version of the function should be used in production and 206 * subsample of the YUV space with an acceptable color conversion, but w/o any 233 + ") in YUV Image Object"); 492 * YUV data. No crop is applied [all...] |
/hardware/qcom/display/msm8084/libcopybit/ |
copybit_c2d.cpp | 107 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 108 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 150 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 151 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 278 /* Get the C2D formats needed for conversion to YUV */ 281 // We do not swap the RB when the target is YUV 285 // The U and V need to be interchanged when the target is YUV 588 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); [all...] |
/hardware/qcom/display/msm8226/libcopybit/ |
copybit_c2d.cpp | 107 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 108 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 150 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 151 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 280 /* Get the C2D formats needed for conversion to YUV */ 283 // We do not swap the RB when the target is YUV 287 // The U and V need to be interchanged when the target is YUV 594 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); [all...] |
/hardware/qcom/display/msm8909/libcopybit/ |
copybit_c2d.cpp | 106 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 107 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 149 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 150 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 279 /* Get the C2D formats needed for conversion to YUV */ 282 // We do not swap the RB when the target is YUV 286 // The U and V need to be interchanged when the target is YUV 593 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); 921 // need to convert based on this param. YUV formats have bpp=1, so checking [all...] |
/hardware/qcom/display/msm8960/libcopybit/ |
copybit_c2d.cpp | 109 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 110 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 152 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 153 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 276 /* Get the C2D formats needed for conversion to YUV */ 279 // We do not swap the RB when the target is YUV 283 // The U and V need to be interchanged when the target is YUV 578 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); 917 // need to convert based on this param. YUV formats have bpp=1, so checking [all...] |
/hardware/qcom/display/msm8994/libcopybit/ |
copybit_c2d.cpp | 106 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 107 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 149 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 150 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 281 /* Get the C2D formats needed for conversion to YUV */ 284 // We do not swap the RB when the target is YUV 288 // The U and V need to be interchanged when the target is YUV 597 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); [all...] |
/hardware/qcom/display/msm8996/libcopybit/ |
copybit_c2d.cpp | 106 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw 107 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw 150 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being 151 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit 284 /* Get the C2D formats needed for conversion to YUV */ 287 // We do not swap the RB when the target is YUV 291 // The U and V need to be interchanged when the target is YUV 599 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__); [all...] |
/cts/tests/tests/media/src/android/media/cts/ |
Vp8CodecTestBase.java | 82 // were calculated and were written to yuv file. 259 outputIvfBaseName + resolutionScales[i]+ ".yuv"; 316 private static byte[] YUV420ToNV(int width, int height, byte[] yuv) { 317 byte[] nv = new byte[yuv.length]; 319 System.arraycopy(yuv, 0, nv, 0, width * height); 326 nv[nv_offset++] = yuv[u_offset++]; 327 nv[nv_offset++] = yuv[v_offset++]; 338 byte[] yuv = new byte[width * height * 3 / 2]; 342 System.arraycopy(nv12, i * stride, yuv, i * width, width); 352 yuv[u_offset++] = nv12[nv_offset++] 541 FileOutputStream yuv = null; local [all...] |
/external/libjpeg-turbo/java/doc/org/libjpegturbo/turbojpeg/ |
TJCompressor.html | 269 instance into a unified YUV planar image buffer and return a 278 instance into a YUV planar image and store it in the given 339 <div class="block">Associate an uncompressed YUV planar source image with this compressor 478 the JPEG or YUV image should be compressed/encoded</dd><dd><code>y</code> - y offset (in pixels) of the region in the source image from which 479 the JPEG or YUV image should be compressed/encoded</dd><dd><code>width</code> - width (in pixels) of the region in the source image from 480 which the JPEG or YUV image should be compressed/encoded</dd><dd><code>pitch</code> - bytes per line of the source image. Normally, this should be 484 compress/encode a JPEG or YUV image from a region of a larger source 488 which the JPEG or YUV image should be compressed/encoded</dd><dd><code>pixelFormat</code> - pixel format of the source image (one of 529 the JPEG or YUV image should be compressed/encoded</dd><dd><code>y</code> - y offset (in pixels) of the region in the source image from which 530 the JPEG or YUV image should be compressed/encoded</dd><dd><code>width</code> - width (in pixels) of the region in (…) [all...] |
/external/opencv3/modules/cudacodec/src/cuda/ |
nv12_to_rgb.cu | 46 * This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar) 78 // Convert YUV To RGB with hue adjustment 176 // YUV to RGB Transformation conversion
|
/external/webrtc/talk/app/webrtc/java/android/org/webrtc/ |
GlRectDrawer.java | 45 * be an OES texture or YUV textures in I420 format. The GL state must be preserved between draw 50 // Simple vertex shader, used for both YUV and OES. 164 * Draw a YUV frame with specified texture transformation matrix. Required resources are
|
/external/webrtc/tools/python_charts/webrtc/ |
data_helper.py | 147 {'name': 'input_filename', 'value': 'foreman_cif.yuv'}, 166 'input_filename': 'foreman_cif.yuv' }, 169 'input_filename': 'foreman_cif.yuv' },
|