Home | History | Annotate | Download | only in 1.0
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package android.hardware.graphics.common@1.0;
     18 
     19 /**
     20  * Common enumeration and structure definitions for all graphics HALs.
     21  */
     22 
     23 /**
     24  * Pixel formats for graphics buffers.
     25  */
     26 @export(name="android_pixel_format_t", value_prefix="HAL_PIXEL_FORMAT_")
     27 enum PixelFormat : int32_t {
     28     /**
     29      * 32-bit format that has 8-bit R, G, B, and A components, in that order,
     30      * from the lowest memory address to the highest memory address.
     31      *
     32      * The component values are unsigned normalized to the range [0, 1], whose
     33      * interpretation is defined by the dataspace.
     34      */
     35     RGBA_8888          = 0x1,
     36 
     37     /**
     38      * 32-bit format that has 8-bit R, G, B, and unused components, in that
     39      * order, from the lowest memory address to the highest memory address.
     40      *
     41      * The component values are unsigned normalized to the range [0, 1], whose
     42      * interpretation is defined by the dataspace.
     43      */
     44     RGBX_8888          = 0x2,
     45 
     46     /**
     47      * 24-bit format that has 8-bit R, G, and B components, in that order,
     48      * from the lowest memory address to the highest memory address.
     49      *
     50      * The component values are unsigned normalized to the range [0, 1], whose
     51      * interpretation is defined by the dataspace.
     52      */
     53     RGB_888            = 0x3,
     54 
     55     /**
     56      * 16-bit packed format that has 5-bit R, 6-bit G, and 5-bit B components,
     57      * in that order, from the most-sigfinicant bits to the least-significant
     58      * bits.
     59      *
     60      * The component values are unsigned normalized to the range [0, 1], whose
     61      * interpretation is defined by the dataspace.
     62      */
     63     RGB_565            = 0x4,
     64 
     65     /**
     66      * 32-bit format that has 8-bit B, G, R, and A components, in that order,
     67      * from the lowest memory address to the highest memory address.
     68      *
     69      * The component values are unsigned normalized to the range [0, 1], whose
     70      * interpretation is defined by the dataspace.
     71      */
     72     BGRA_8888          = 0x5,
     73 
     74     /**
     75      * Legacy formats deprecated in favor of YCBCR_420_888.
     76      */
     77     YCBCR_422_SP       = 0x10,  // NV16
     78     YCRCB_420_SP       = 0x11,  // NV21
     79     YCBCR_422_I        = 0x14,  // YUY2
     80 
     81     /**
     82      * 64-bit format that has 16-bit R, G, B, and A components, in that order,
     83      * from the lowest memory address to the highest memory address.
     84      *
     85      * The component values are signed floats, whose interpretation is defined
     86      * by the dataspace.
     87      */
     88     RGBA_FP16          = 0x16,
     89 
     90     /**
     91      * RAW16 is a single-channel, 16-bit, little endian format, typically
     92      * representing raw Bayer-pattern images from an image sensor, with minimal
     93      * processing.
     94      *
     95      * The exact pixel layout of the data in the buffer is sensor-dependent, and
     96      * needs to be queried from the camera device.
     97      *
     98      * Generally, not all 16 bits are used; more common values are 10 or 12
     99      * bits. If not all bits are used, the lower-order bits are filled first.
    100      * All parameters to interpret the raw data (black and white points,
    101      * color space, etc) must be queried from the camera device.
    102      *
    103      * This format assumes
    104      * - an even width
    105      * - an even height
    106      * - a horizontal stride multiple of 16 pixels
    107      * - a vertical stride equal to the height
    108      * - strides are specified in pixels, not in bytes
    109      *
    110      *   size = stride * height * 2
    111      *
    112      * This format must be accepted by the allocator when used with the
    113      * following usage flags:
    114      *
    115      *    - BufferUsage::CAMERA_*
    116      *    - BufferUsage::CPU_*
    117      *    - BufferUsage::RENDERSCRIPT
    118      *
    119      * The mapping of the dataspace to buffer contents for RAW16 is as
    120      * follows:
    121      *
    122      *  Dataspace value               | Buffer contents
    123      * -------------------------------+-----------------------------------------
    124      *  Dataspace::ARBITRARY          | Raw image sensor data, layout is as
    125      *                                | defined above.
    126      *  Dataspace::DEPTH              | Unprocessed implementation-dependent raw
    127      *                                | depth measurements, opaque with 16 bit
    128      *                                | samples.
    129      *  Other                         | Unsupported
    130      */
    131     RAW16              = 0x20,
    132 
    133     /**
    134      * BLOB is used to carry task-specific data which does not have a standard
    135      * image structure. The details of the format are left to the two
    136      * endpoints.
    137      *
    138      * A typical use case is for transporting JPEG-compressed images from the
    139      * Camera HAL to the framework or to applications.
    140      *
    141      * Buffers of this format must have a height of 1, and width equal to their
    142      * size in bytes.
    143      *
    144      * The mapping of the dataspace to buffer contents for BLOB is as
    145      * follows:
    146      *
    147      *  Dataspace value               | Buffer contents
    148      * -------------------------------+-----------------------------------------
    149      *  Dataspace::JFIF               | An encoded JPEG image
    150      *  Dataspace::DEPTH              | An android_depth_points buffer
    151      *  Dataspace::SENSOR             | Sensor event data.
    152      *  Other                         | Unsupported
    153      */
    154     BLOB               = 0x21,
    155 
    156     /**
    157      * A format indicating that the choice of format is entirely up to the
    158      * allocator.
    159      *
    160      * The allocator should examine the usage bits passed in when allocating a
    161      * buffer with this format, and it should derive the pixel format from
    162      * those usage flags. This format must never be used with any of the
    163      * BufferUsage::CPU_* usage flags.
    164      *
    165      * Even when the internally chosen format has an alpha component, the
    166      * clients must assume the alpha vlaue to be 1.0.
    167      *
    168      * The interpretation of the component values is defined by the dataspace.
    169      */
    170     IMPLEMENTATION_DEFINED = 0x22,
    171 
    172     /**
    173      * This format allows platforms to use an efficient YCbCr/YCrCb 4:2:0
    174      * buffer layout, while still describing the general format in a
    175      * layout-independent manner. While called YCbCr, it can be used to
    176      * describe formats with either chromatic ordering, as well as
    177      * whole planar or semiplanar layouts.
    178      *
    179      * This format must be accepted by the allocator when BufferUsage::CPU_*
    180      * are set.
    181      *
    182      * Buffers with this format must be locked with IMapper::lockYCbCr.
    183      * Locking with IMapper::lock must return an error.
    184      *
    185      * The interpretation of the component values is defined by the dataspace.
    186      */
    187     YCBCR_420_888      = 0x23,
    188 
    189     /**
    190      * RAW_OPAQUE is a format for unprocessed raw image buffers coming from an
    191      * image sensor. The actual structure of buffers of this format is
    192      * implementation-dependent.
    193      *
    194      * This format must be accepted by the allocator when used with the
    195      * following usage flags:
    196      *
    197      *    - BufferUsage::CAMERA_*
    198      *    - BufferUsage::CPU_*
    199      *    - BufferUsage::RENDERSCRIPT
    200      *
    201      * The mapping of the dataspace to buffer contents for RAW_OPAQUE is as
    202      * follows:
    203      *
    204      *  Dataspace value               | Buffer contents
    205      * -------------------------------+-----------------------------------------
    206      *  Dataspace::ARBITRARY          | Raw image sensor data.
    207      *  Other                         | Unsupported
    208      */
    209     RAW_OPAQUE         = 0x24,
    210 
    211     /**
    212      * RAW10 is a single-channel, 10-bit per pixel, densely packed in each row,
    213      * unprocessed format, usually representing raw Bayer-pattern images coming from
    214      * an image sensor.
    215      *
    216      * In an image buffer with this format, starting from the first pixel of each
    217      * row, each 4 consecutive pixels are packed into 5 bytes (40 bits). Each one
    218      * of the first 4 bytes contains the top 8 bits of each pixel, The fifth byte
    219      * contains the 2 least significant bits of the 4 pixels, the exact layout data
    220      * for each 4 consecutive pixels is illustrated below (Pi[j] stands for the jth
    221      * bit of the ith pixel):
    222      *
    223      *          bit 7                                     bit 0
    224      *          =====|=====|=====|=====|=====|=====|=====|=====|
    225      * Byte 0: |P0[9]|P0[8]|P0[7]|P0[6]|P0[5]|P0[4]|P0[3]|P0[2]|
    226      *         |-----|-----|-----|-----|-----|-----|-----|-----|
    227      * Byte 1: |P1[9]|P1[8]|P1[7]|P1[6]|P1[5]|P1[4]|P1[3]|P1[2]|
    228      *         |-----|-----|-----|-----|-----|-----|-----|-----|
    229      * Byte 2: |P2[9]|P2[8]|P2[7]|P2[6]|P2[5]|P2[4]|P2[3]|P2[2]|
    230      *         |-----|-----|-----|-----|-----|-----|-----|-----|
    231      * Byte 3: |P3[9]|P3[8]|P3[7]|P3[6]|P3[5]|P3[4]|P3[3]|P3[2]|
    232      *         |-----|-----|-----|-----|-----|-----|-----|-----|
    233      * Byte 4: |P3[1]|P3[0]|P2[1]|P2[0]|P1[1]|P1[0]|P0[1]|P0[0]|
    234      *          ===============================================
    235      *
    236      * This format assumes
    237      * - a width multiple of 4 pixels
    238      * - an even height
    239      * - a vertical stride equal to the height
    240      * - strides are specified in bytes, not in pixels
    241      *
    242      *   size = stride * height
    243      *
    244      * When stride is equal to width * (10 / 8), there will be no padding bytes at
    245      * the end of each row, the entire image data is densely packed. When stride is
    246      * larger than width * (10 / 8), padding bytes will be present at the end of each
    247      * row (including the last row).
    248      *
    249      * This format must be accepted by the allocator when used with the
    250      * following usage flags:
    251      *
    252      *    - BufferUsage::CAMERA_*
    253      *    - BufferUsage::CPU_*
    254      *    - BufferUsage::RENDERSCRIPT
    255      *
    256      * The mapping of the dataspace to buffer contents for RAW10 is as
    257      * follows:
    258      *
    259      *  Dataspace value               | Buffer contents
    260      * -------------------------------+-----------------------------------------
    261      *  Dataspace::ARBITRARY          | Raw image sensor data.
    262      *  Other                         | Unsupported
    263      */
    264     RAW10              = 0x25,
    265 
    266     /**
    267      * RAW12 is a single-channel, 12-bit per pixel, densely packed in each row,
    268      * unprocessed format, usually representing raw Bayer-pattern images coming from
    269      * an image sensor.
    270      *
    271      * In an image buffer with this format, starting from the first pixel of each
    272      * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
    273      * and second byte contains the top 8 bits of first and second pixel. The third
    274      * byte contains the 4 least significant bits of the two pixels, the exact layout
    275      * data for each two consecutive pixels is illustrated below (Pi[j] stands for
    276      * the jth bit of the ith pixel):
    277      *
    278      *           bit 7                                            bit 0
    279      *          ======|======|======|======|======|======|======|======|
    280      * Byte 0: |P0[11]|P0[10]|P0[ 9]|P0[ 8]|P0[ 7]|P0[ 6]|P0[ 5]|P0[ 4]|
    281      *         |------|------|------|------|------|------|------|------|
    282      * Byte 1: |P1[11]|P1[10]|P1[ 9]|P1[ 8]|P1[ 7]|P1[ 6]|P1[ 5]|P1[ 4]|
    283      *         |------|------|------|------|------|------|------|------|
    284      * Byte 2: |P1[ 3]|P1[ 2]|P1[ 1]|P1[ 0]|P0[ 3]|P0[ 2]|P0[ 1]|P0[ 0]|
    285      *          =======================================================
    286      *
    287      * This format assumes:
    288      * - a width multiple of 4 pixels
    289      * - an even height
    290      * - a vertical stride equal to the height
    291      * - strides are specified in bytes, not in pixels
    292      *
    293      *   size = stride * height
    294      *
    295      * When stride is equal to width * (12 / 8), there will be no padding bytes at
    296      * the end of each row, the entire image data is densely packed. When stride is
    297      * larger than width * (12 / 8), padding bytes will be present at the end of
    298      * each row (including the last row).
    299      *
    300      * This format must be accepted by the allocator when used with the
    301      * following usage flags:
    302      *
    303      *    - BufferUsage::CAMERA_*
    304      *    - BufferUsage::CPU_*
    305      *    - BufferUsage::RENDERSCRIPT
    306      *
    307      * The mapping of the dataspace to buffer contents for RAW12 is as
    308      * follows:
    309      *
    310      *  Dataspace value               | Buffer contents
    311      * -------------------------------+-----------------------------------------
    312      *  Dataspace::ARBITRARY          | Raw image sensor data.
    313      *  Other                         | Unsupported
    314      */
    315     RAW12              = 0x26,
    316 
    317     /** 0x27 to 0x2A are reserved for flexible formats */
    318 
    319     /**
    320      * 32-bit packed format that has 2-bit A, 10-bit B, G, and R components,
    321      * in that order, from the most-sigfinicant bits to the least-significant
    322      * bits.
    323      *
    324      * The component values are unsigned normalized to the range [0, 1], whose
    325      * interpretation is defined by the dataspace.
    326      */
    327     RGBA_1010102       = 0x2B,
    328 
    329     /**
    330      * 0x100 - 0x1FF
    331      *
    332      * This range is reserved for vendor extensions. Formats in this range
    333      * must support BufferUsage::GPU_TEXTURE. Clients must assume they do not
    334      * have an alpha component.
    335      */
    336 
    337     /**
    338      * Y8 is a YUV planar format comprised of a WxH Y plane, with each pixel
    339      * being represented by 8 bits. It is equivalent to just the Y plane from
    340      * YV12.
    341      *
    342      * This format assumes
    343      * - an even width
    344      * - an even height
    345      * - a horizontal stride multiple of 16 pixels
    346      * - a vertical stride equal to the height
    347      *
    348      *   size = stride * height
    349      *
    350      * This format must be accepted by the allocator when used with the
    351      * following usage flags:
    352      *
    353      *    - BufferUsage::CAMERA_*
    354      *    - BufferUsage::CPU_*
    355      *
    356      * The component values are unsigned normalized to the range [0, 1], whose
    357      * interpretation is defined by the dataspace.
    358      */
    359     Y8                 = 0x20203859,
    360 
    361     /**
    362      * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel
    363      * being represented by 16 bits. It is just like Y8, but has double the
    364      * bits per pixel (little endian).
    365      *
    366      * This format assumes
    367      * - an even width
    368      * - an even height
    369      * - a horizontal stride multiple of 16 pixels
    370      * - a vertical stride equal to the height
    371      * - strides are specified in pixels, not in bytes
    372      *
    373      *   size = stride * height * 2
    374      *
    375      * This format must be accepted by the allocator when used with the
    376      * following usage flags:
    377      *
    378      *    - BufferUsage::CAMERA_*
    379      *    - BufferUsage::CPU_*
    380      *
    381      * The component values are unsigned normalized to the range [0, 1], whose
    382      * interpretation is defined by the dataspace. When the dataspace is
    383      * Dataspace::DEPTH, each pixel is a distance value measured by a depth
    384      * camera, plus an associated confidence value.
    385      */
    386     Y16                = 0x20363159,
    387 
    388     /**
    389      * YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
    390      * by (W/2) x (H/2) Cr and Cb planes.
    391      *
    392      * This format assumes
    393      * - an even width
    394      * - an even height
    395      * - a horizontal stride multiple of 16 pixels
    396      * - a vertical stride equal to the height
    397      *
    398      *   y_size = stride * height
    399      *   c_stride = ALIGN(stride/2, 16)
    400      *   c_size = c_stride * height/2
    401      *   size = y_size + c_size * 2
    402      *   cr_offset = y_size
    403      *   cb_offset = y_size + c_size
    404      *
    405      * This range is reserved for vendor extensions. Formats in this range
    406      * must support BufferUsage::GPU_TEXTURE. Clients must assume they do not
    407      * have an alpha component.
    408      *
    409      * This format must be accepted by the allocator when used with the
    410      * following usage flags:
    411      *
    412      *    - BufferUsage::CAMERA_*
    413      *    - BufferUsage::CPU_*
    414      *    - BufferUsage::GPU_TEXTURE
    415      *
    416      * The component values are unsigned normalized to the range [0, 1], whose
    417      * interpretation is defined by the dataspace.
    418      */
    419     YV12               = 0x32315659, // YCrCb 4:2:0 Planar
    420 };
    421 
    422 /**
    423  * Buffer usage definitions.
    424  */
    425 enum BufferUsage : uint64_t {
    426     /** bit 0-3 is an enum */
    427     CPU_READ_MASK   = 0xfULL,
    428     /** buffer is never read by CPU */
    429     CPU_READ_NEVER  = 0,
    430     /** buffer is rarely read by CPU */
    431     CPU_READ_RARELY = 2,
    432     /** buffer is often read by CPU */
    433     CPU_READ_OFTEN  = 3,
    434 
    435     /** bit 4-7 is an enum */
    436     CPU_WRITE_MASK   = 0xfULL << 4,
    437     /** buffer is never written by CPU */
    438     CPU_WRITE_NEVER  = 0 << 4,
    439     /** buffer is rarely written by CPU */
    440     CPU_WRITE_RARELY = 2 << 4,
    441     /** buffer is often written by CPU */
    442     CPU_WRITE_OFTEN  = 3 << 4,
    443 
    444     /** buffer is used as a GPU texture */
    445     GPU_TEXTURE       = 1ULL << 8,
    446 
    447     /** buffer is used as a GPU render target */
    448     GPU_RENDER_TARGET = 1ULL << 9,
    449 
    450     /** bit 10 must be zero */
    451 
    452     /** buffer is used as a composer HAL overlay layer */
    453     COMPOSER_OVERLAY  = 1ULL << 11,
    454     /** buffer is used as a composer HAL client target */
    455     COMPOSER_CLIENT_TARGET = 1ULL << 12,
    456 
    457     /** bit 13 must be zero */
    458 
    459     /**
    460      * Buffer is allocated with hardware-level protection against copying the
    461      * contents (or information derived from the contents) into unprotected
    462      * memory.
    463      */
    464     PROTECTED         = 1ULL << 14,
    465 
    466     /** buffer is used as a hwcomposer HAL cursor layer */
    467     COMPOSER_CURSOR   = 1ULL << 15,
    468 
    469     /** buffer is used as a video encoder input */
    470     VIDEO_ENCODER     = 1ULL << 16,
    471 
    472     /** buffer is used as a camera HAL output */
    473     CAMERA_OUTPUT     = 1ULL << 17,
    474 
    475     /** buffer is used as a camera HAL input */
    476     CAMERA_INPUT      = 1ULL << 18,
    477 
    478     /** bit 19 must be zero */
    479 
    480     /** buffer is used as a renderscript allocation */
    481     RENDERSCRIPT      = 1ULL << 20,
    482 
    483     /** bit 21 must be zero */
    484 
    485     /** buffer is used as a video decoder output */
    486     VIDEO_DECODER     = 1ULL << 22,
    487 
    488     /** buffer is used as a sensor direct report output */
    489     SENSOR_DIRECT_DATA = 1ULL << 23,
    490 
    491     /**
    492      * buffer is used as as an OpenGL shader storage or uniform
    493      * buffer object
    494      */
    495     GPU_DATA_BUFFER   = 1ULL << 24,
    496 
    497     /** bits 25-27 must be zero and are reserved for future versions */
    498     /** bits 28-31 are reserved for vendor extensions */
    499     VENDOR_MASK       = 0xfULL << 28,
    500 
    501     /** bits 32-47 must be zero and are reserved for future versions */
    502     /** bits 48-63 are reserved for vendor extensions */
    503     VENDOR_MASK_HI    = 0xffffULL << 48,
    504 };
    505 
    506 /**
    507  * Transformation definitions
    508  */
    509 @export(name="android_transform_t", value_prefix="HAL_TRANSFORM_")
    510 enum Transform : int32_t {
    511     /**
    512      * Horizontal flip. FLIP_H/FLIP_V is applied before ROT_90.
    513      */
    514     FLIP_H    = 1 << 0,
    515 
    516     /**
    517      * Vertical flip. FLIP_H/FLIP_V is applied before ROT_90.
    518      */
    519     FLIP_V    = 1 << 1,
    520 
    521     /**
    522      * 90 degree clockwise rotation. FLIP_H/FLIP_V is applied before ROT_90.
    523      */
    524     ROT_90    = 1 << 2,
    525 
    526     /**
    527      * Commonly used combinations.
    528      */
    529     ROT_180   = FLIP_H | FLIP_V,
    530     ROT_270   = FLIP_H | FLIP_V | ROT_90,
    531 };
    532 
    533 /**
    534  * Dataspace Definitions
    535  * ======================
    536  *
    537  * Dataspace is the definition of how pixel values should be interpreted.
    538  *
    539  * For many formats, this is the colorspace of the image data, which includes
    540  * primaries (including white point) and the transfer characteristic function,
    541  * which describes both gamma curve and numeric range (within the bit depth).
    542  *
    543  * Other dataspaces include depth measurement data from a depth camera.
    544  *
    545  * A dataspace is comprised of a number of fields.
    546  *
    547  * Version
    548  * --------
    549  * The top 2 bits represent the revision of the field specification. This is
    550  * currently always 0.
    551  *
    552  *
    553  * bits    31-30 29                      -                          0
    554  *        +-----+----------------------------------------------------+
    555  * fields | Rev |            Revision specific fields                |
    556  *        +-----+----------------------------------------------------+
    557  *
    558  * Field layout for version = 0:
    559  * ----------------------------
    560  *
    561  * A dataspace is comprised of the following fields:
    562  *      Standard
    563  *      Transfer function
    564  *      Range
    565  *
    566  * bits    31-30 29-27 26 -  22 21 -  16 15             -           0
    567  *        +-----+-----+--------+--------+----------------------------+
    568  * fields |  0  |Range|Transfer|Standard|    Legacy and custom       |
    569  *        +-----+-----+--------+--------+----------------------------+
    570  *          VV    RRR   TTTTT    SSSSSS    LLLLLLLL       LLLLLLLL
    571  *
    572  * If range, transfer and standard fields are all 0 (e.g. top 16 bits are
    573  * all zeroes), the bottom 16 bits contain either a legacy dataspace value,
    574  * or a custom value.
    575  */
    576 @export(name="android_dataspace_t", value_prefix="HAL_DATASPACE_")
    577 enum Dataspace : int32_t {
    578     /**
    579      * Default-assumption data space, when not explicitly specified.
    580      *
    581      * It is safest to assume the buffer is an image with sRGB primaries and
    582      * encoding ranges, but the consumer and/or the producer of the data may
    583      * simply be using defaults. No automatic gamma transform should be
    584      * expected, except for a possible display gamma transform when drawn to a
    585      * screen.
    586      */
    587     UNKNOWN = 0x0,
    588 
    589     /**
    590      * Arbitrary dataspace with manually defined characteristics.  Definition
    591      * for colorspaces or other meaning must be communicated separately.
    592      *
    593      * This is used when specifying primaries, transfer characteristics,
    594      * etc. separately.
    595      *
    596      * A typical use case is in video encoding parameters (e.g. for H.264),
    597      * where a colorspace can have separately defined primaries, transfer
    598      * characteristics, etc.
    599      */
    600     ARBITRARY = 0x1,
    601 
    602     /**
    603      * Color-description aspects
    604      *
    605      * The following aspects define various characteristics of the color
    606      * specification. These represent bitfields, so that a data space value
    607      * can specify each of them independently.
    608      */
    609 
    610     STANDARD_SHIFT = 16,
    611 
    612     /**
    613      * Standard aspect
    614      *
    615      * Defines the chromaticity coordinates of the source primaries in terms of
    616      * the CIE 1931 definition of x and y specified in ISO 11664-1.
    617      */
    618     STANDARD_MASK = 63 << STANDARD_SHIFT,  // 0x3F
    619 
    620     /**
    621      * Chromacity coordinates are unknown or are determined by the application.
    622      * Implementations shall use the following suggested standards:
    623      *
    624      * All YCbCr formats: BT709 if size is 720p or larger (since most video
    625      *                    content is letterboxed this corresponds to width is
    626      *                    1280 or greater, or height is 720 or greater).
    627      *                    BT601_625 if size is smaller than 720p or is JPEG.
    628      * All RGB formats:   BT709.
    629      *
    630      * For all other formats standard is undefined, and implementations should use
    631      * an appropriate standard for the data represented.
    632      */
    633     STANDARD_UNSPECIFIED = 0 << STANDARD_SHIFT,
    634 
    635     /**
    636      * Primaries:       x       y
    637      *  green           0.300   0.600
    638      *  blue            0.150   0.060
    639      *  red             0.640   0.330
    640      *  white (D65)     0.3127  0.3290
    641      *
    642      * Use the unadjusted KR = 0.2126, KB = 0.0722 luminance interpretation
    643      * for RGB conversion.
    644      */
    645     STANDARD_BT709 = 1 << STANDARD_SHIFT,
    646 
    647     /**
    648      * Primaries:       x       y
    649      *  green           0.290   0.600
    650      *  blue            0.150   0.060
    651      *  red             0.640   0.330
    652      *  white (D65)     0.3127  0.3290
    653      *
    654      *  KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
    655      *  for RGB conversion from the one purely determined by the primaries
    656      *  to minimize the color shift into RGB space that uses BT.709
    657      *  primaries.
    658      */
    659     STANDARD_BT601_625 = 2 << STANDARD_SHIFT,
    660 
    661     /**
    662      * Primaries:       x       y
    663      *  green           0.290   0.600
    664      *  blue            0.150   0.060
    665      *  red             0.640   0.330
    666      *  white (D65)     0.3127  0.3290
    667      *
    668      * Use the unadjusted KR = 0.222, KB = 0.071 luminance interpretation
    669      * for RGB conversion.
    670      */
    671     STANDARD_BT601_625_UNADJUSTED = 3 << STANDARD_SHIFT,
    672 
    673     /**
    674      * Primaries:       x       y
    675      *  green           0.310   0.595
    676      *  blue            0.155   0.070
    677      *  red             0.630   0.340
    678      *  white (D65)     0.3127  0.3290
    679      *
    680      *  KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
    681      *  for RGB conversion from the one purely determined by the primaries
    682      *  to minimize the color shift into RGB space that uses BT.709
    683      *  primaries.
    684      */
    685     STANDARD_BT601_525 = 4 << STANDARD_SHIFT,
    686 
    687     /**
    688      * Primaries:       x       y
    689      *  green           0.310   0.595
    690      *  blue            0.155   0.070
    691      *  red             0.630   0.340
    692      *  white (D65)     0.3127  0.3290
    693      *
    694      * Use the unadjusted KR = 0.212, KB = 0.087 luminance interpretation
    695      * for RGB conversion (as in SMPTE 240M).
    696      */
    697     STANDARD_BT601_525_UNADJUSTED = 5 << STANDARD_SHIFT,
    698 
    699     /**
    700      * Primaries:       x       y
    701      *  green           0.170   0.797
    702      *  blue            0.131   0.046
    703      *  red             0.708   0.292
    704      *  white (D65)     0.3127  0.3290
    705      *
    706      * Use the unadjusted KR = 0.2627, KB = 0.0593 luminance interpretation
    707      * for RGB conversion.
    708      */
    709     STANDARD_BT2020 = 6 << STANDARD_SHIFT,
    710 
    711     /**
    712      * Primaries:       x       y
    713      *  green           0.170   0.797
    714      *  blue            0.131   0.046
    715      *  red             0.708   0.292
    716      *  white (D65)     0.3127  0.3290
    717      *
    718      * Use the unadjusted KR = 0.2627, KB = 0.0593 luminance interpretation
    719      * for RGB conversion using the linear domain.
    720      */
    721     STANDARD_BT2020_CONSTANT_LUMINANCE = 7 << STANDARD_SHIFT,
    722 
    723     /**
    724      * Primaries:       x      y
    725      *  green           0.21   0.71
    726      *  blue            0.14   0.08
    727      *  red             0.67   0.33
    728      *  white (C)       0.310  0.316
    729      *
    730      * Use the unadjusted KR = 0.30, KB = 0.11 luminance interpretation
    731      * for RGB conversion.
    732      */
    733     STANDARD_BT470M = 8 << STANDARD_SHIFT,
    734 
    735     /**
    736      * Primaries:       x       y
    737      *  green           0.243   0.692
    738      *  blue            0.145   0.049
    739      *  red             0.681   0.319
    740      *  white (C)       0.310   0.316
    741      *
    742      * Use the unadjusted KR = 0.254, KB = 0.068 luminance interpretation
    743      * for RGB conversion.
    744      */
    745     STANDARD_FILM = 9 << STANDARD_SHIFT,
    746 
    747     /**
    748      * SMPTE EG 432-1 and SMPTE RP 431-2. (DCI-P3)
    749      * Primaries:       x       y
    750      *  green           0.265   0.690
    751      *  blue            0.150   0.060
    752      *  red             0.680   0.320
    753      *  white (D65)     0.3127  0.3290
    754      */
    755     STANDARD_DCI_P3 = 10 << STANDARD_SHIFT,
    756 
    757     /**
    758      * Adobe RGB
    759      * Primaries:       x       y
    760      *  green           0.210   0.710
    761      *  blue            0.150   0.060
    762      *  red             0.640   0.330
    763      *  white (D65)     0.3127  0.3290
    764      */
    765     STANDARD_ADOBE_RGB = 11 << STANDARD_SHIFT,
    766 
    767 
    768 
    769     TRANSFER_SHIFT = 22,
    770 
    771     /**
    772      * Transfer aspect
    773      *
    774      * Transfer characteristics are the opto-electronic transfer characteristic
    775      * at the source as a function of linear optical intensity (luminance).
    776      *
    777      * For digital signals, E corresponds to the recorded value. Normally, the
    778      * transfer function is applied in RGB space to each of the R, G and B
    779      * components independently. This may result in color shift that can be
    780      * minized by applying the transfer function in Lab space only for the L
    781      * component. Implementation may apply the transfer function in RGB space
    782      * for all pixel formats if desired.
    783      */
    784 
    785     TRANSFER_MASK = 31 << TRANSFER_SHIFT,  // 0x1F
    786 
    787     /**
    788      * Transfer characteristics are unknown or are determined by the
    789      * application.
    790      *
    791      * Implementations should use the following transfer functions:
    792      *
    793      * For YCbCr formats: use TRANSFER_SMPTE_170M
    794      * For RGB formats: use TRANSFER_SRGB
    795      *
    796      * For all other formats transfer function is undefined, and implementations
    797      * should use an appropriate standard for the data represented.
    798      */
    799     TRANSFER_UNSPECIFIED = 0 << TRANSFER_SHIFT,
    800 
    801     /**
    802      * Transfer characteristic curve:
    803      *  E = L
    804      *      L - luminance of image 0 <= L <= 1 for conventional colorimetry
    805      *      E - corresponding electrical signal
    806      */
    807     TRANSFER_LINEAR = 1 << TRANSFER_SHIFT,
    808 
    809     /**
    810      * Transfer characteristic curve:
    811      *
    812      * E = 1.055 * L^(1/2.4) - 0.055  for 0.0031308 <= L <= 1
    813      *   = 12.92 * L                  for 0 <= L < 0.0031308
    814      *     L - luminance of image 0 <= L <= 1 for conventional colorimetry
    815      *     E - corresponding electrical signal
    816      */
    817     TRANSFER_SRGB = 2 << TRANSFER_SHIFT,
    818 
    819     /**
    820      * BT.601 525, BT.601 625, BT.709, BT.2020
    821      *
    822      * Transfer characteristic curve:
    823      *  E = 1.099 * L ^ 0.45 - 0.099  for 0.018 <= L <= 1
    824      *    = 4.500 * L                 for 0 <= L < 0.018
    825      *      L - luminance of image 0 <= L <= 1 for conventional colorimetry
    826      *      E - corresponding electrical signal
    827      */
    828     TRANSFER_SMPTE_170M = 3 << TRANSFER_SHIFT,
    829 
    830     /**
    831      * Assumed display gamma 2.2.
    832      *
    833      * Transfer characteristic curve:
    834      *  E = L ^ (1/2.2)
    835      *      L - luminance of image 0 <= L <= 1 for conventional colorimetry
    836      *      E - corresponding electrical signal
    837      */
    838     TRANSFER_GAMMA2_2 = 4 << TRANSFER_SHIFT,
    839 
    840     /**
    841      *  display gamma 2.6.
    842      *
    843      * Transfer characteristic curve:
    844      *  E = L ^ (1/2.6)
    845      *      L - luminance of image 0 <= L <= 1 for conventional colorimetry
    846      *      E - corresponding electrical signal
    847      */
    848     TRANSFER_GAMMA2_6 = 5 << TRANSFER_SHIFT,
    849 
    850     /**
    851      *  display gamma 2.8.
    852      *
    853      * Transfer characteristic curve:
    854      *  E = L ^ (1/2.8)
    855      *      L - luminance of image 0 <= L <= 1 for conventional colorimetry
    856      *      E - corresponding electrical signal
    857      */
    858     TRANSFER_GAMMA2_8 = 6 << TRANSFER_SHIFT,
    859 
    860     /**
    861      * SMPTE ST 2084 (Dolby Perceptual Quantizer)
    862      *
    863      * Transfer characteristic curve:
    864      *  E = ((c1 + c2 * L^n) / (1 + c3 * L^n)) ^ m
    865      *  c1 = c3 - c2 + 1 = 3424 / 4096 = 0.8359375
    866      *  c2 = 32 * 2413 / 4096 = 18.8515625
    867      *  c3 = 32 * 2392 / 4096 = 18.6875
    868      *  m = 128 * 2523 / 4096 = 78.84375
    869      *  n = 0.25 * 2610 / 4096 = 0.1593017578125
    870      *      L - luminance of image 0 <= L <= 1 for HDR colorimetry.
    871      *          L = 1 corresponds to 10000 cd/m2
    872      *      E - corresponding electrical signal
    873      */
    874     TRANSFER_ST2084 = 7 << TRANSFER_SHIFT,
    875 
    876     /**
    877      * ARIB STD-B67 Hybrid Log Gamma
    878      *
    879      * Transfer characteristic curve:
    880      *  E = r * L^0.5                 for 0 <= L <= 1
    881      *    = a * ln(L - b) + c         for 1 < L
    882      *  a = 0.17883277
    883      *  b = 0.28466892
    884      *  c = 0.55991073
    885      *  r = 0.5
    886      *      L - luminance of image 0 <= L for HDR colorimetry. L = 1 corresponds
    887      *          to reference white level of 100 cd/m2
    888      *      E - corresponding electrical signal
    889      */
    890     TRANSFER_HLG = 8 << TRANSFER_SHIFT,
    891 
    892     RANGE_SHIFT = 27,
    893 
    894     /**
    895      * Range aspect
    896      *
    897      * Defines the range of values corresponding to the unit range of 0-1.
    898      * This is defined for YCbCr only, but can be expanded to RGB space.
    899      */
    900     RANGE_MASK = 7 << RANGE_SHIFT,  // 0x7
    901 
    902     /**
    903      * Range is unknown or are determined by the application.  Implementations
    904      * shall use the following suggested ranges:
    905      *
    906      * All YCbCr formats: limited range.
    907      * All RGB or RGBA formats (including RAW and Bayer): full range.
    908      * All Y formats: full range
    909      *
    910      * For all other formats range is undefined, and implementations should use
    911      * an appropriate range for the data represented.
    912      */
    913     RANGE_UNSPECIFIED = 0 << RANGE_SHIFT,
    914 
    915     /**
    916      * Full range uses all values for Y, Cb and Cr from
    917      * 0 to 2^b-1, where b is the bit depth of the color format.
    918      */
    919     RANGE_FULL = 1 << RANGE_SHIFT,
    920 
    921     /**
    922      * Limited range uses values 16/256*2^b to 235/256*2^b for Y, and
    923      * 1/16*2^b to 15/16*2^b for Cb, Cr, R, G and B, where b is the bit depth of
    924      * the color format.
    925      *
    926      * E.g. For 8-bit-depth formats:
    927      * Luma (Y) samples should range from 16 to 235, inclusive
    928      * Chroma (Cb, Cr) samples should range from 16 to 240, inclusive
    929      *
    930      * For 10-bit-depth formats:
    931      * Luma (Y) samples should range from 64 to 940, inclusive
    932      * Chroma (Cb, Cr) samples should range from 64 to 960, inclusive
    933      */
    934     RANGE_LIMITED = 2 << RANGE_SHIFT,
    935 
    936     /**
    937      * Extended range is used for scRGB. Intended for use with
    938      * floating point pixel formats. [0.0 - 1.0] is the standard
    939      * sRGB space. Values outside the range 0.0 - 1.0 can encode
    940      * color outside the sRGB gamut.
    941      * Used to blend / merge multiple dataspaces on a single display.
    942      */
    943     RANGE_EXTENDED = 3 << RANGE_SHIFT,
    944 
    945     /**
    946      * Legacy dataspaces
    947      */
    948 
    949     /**
    950      * sRGB linear encoding:
    951      *
    952      * The red, green, and blue components are stored in sRGB space, but
    953      * are linear, not gamma-encoded.
    954      * The RGB primaries and the white point are the same as BT.709.
    955      *
    956      * The values are encoded using the full range ([0,255] for 8-bit) for all
    957      * components.
    958      */
    959     SRGB_LINEAR = 0x200, // deprecated, use V0_SRGB_LINEAR
    960 
    961     V0_SRGB_LINEAR = STANDARD_BT709 | TRANSFER_LINEAR | RANGE_FULL,
    962 
    963 
    964     /**
    965      * scRGB linear encoding:
    966      *
    967      * The red, green, and blue components are stored in extended sRGB space,
    968      * but are linear, not gamma-encoded.
    969      * The RGB primaries and the white point are the same as BT.709.
    970      *
    971      * The values are floating point.
    972      * A pixel value of 1.0, 1.0, 1.0 corresponds to sRGB white (D65) at 80 nits.
    973      * Values beyond the range [0.0 - 1.0] would correspond to other colors
    974      * spaces and/or HDR content.
    975      */
    976     V0_SCRGB_LINEAR = STANDARD_BT709 | TRANSFER_LINEAR | RANGE_EXTENDED,
    977 
    978 
    979     /**
    980      * sRGB gamma encoding:
    981      *
    982      * The red, green and blue components are stored in sRGB space, and
    983      * converted to linear space when read, using the SRGB transfer function
    984      * for each of the R, G and B components. When written, the inverse
    985      * transformation is performed.
    986      *
    987      * The alpha component, if present, is always stored in linear space and
    988      * is left unmodified when read or written.
    989      *
    990      * Use full range and BT.709 standard.
    991      */
    992     SRGB = 0x201, // deprecated, use V0_SRGB
    993 
    994     V0_SRGB = STANDARD_BT709 | TRANSFER_SRGB | RANGE_FULL,
    995 
    996 
    997     /**
    998      * scRGB:
    999      *
   1000      * The red, green, and blue components are stored in extended sRGB space,
   1001      * but are linear, not gamma-encoded.
   1002      * The RGB primaries and the white point are the same as BT.709.
   1003      *
   1004      * The values are floating point.
   1005      * A pixel value of 1.0, 1.0, 1.0 corresponds to sRGB white (D65) at 80 nits.
   1006      * Values beyond the range [0.0 - 1.0] would correspond to other colors
   1007      * spaces and/or HDR content.
   1008      */
   1009     V0_SCRGB = STANDARD_BT709 | TRANSFER_SRGB | RANGE_EXTENDED,
   1010 
   1011     /**
   1012      * YCbCr Colorspaces
   1013      * -----------------
   1014      *
   1015      * Primaries are given using (x,y) coordinates in the CIE 1931 definition
   1016      * of x and y specified by ISO 11664-1.
   1017      *
   1018      * Transfer characteristics are the opto-electronic transfer characteristic
   1019      * at the source as a function of linear optical intensity (luminance).
   1020      */
   1021 
   1022     /**
   1023      * JPEG File Interchange Format (JFIF)
   1024      *
   1025      * Same model as BT.601-625, but all values (Y, Cb, Cr) range from 0 to 255
   1026      *
   1027      * Use full range, BT.601 transfer and BT.601_625 standard.
   1028      */
   1029     JFIF = 0x101, // deprecated, use V0_JFIF
   1030 
   1031     V0_JFIF = STANDARD_BT601_625 | TRANSFER_SMPTE_170M | RANGE_FULL,
   1032 
   1033     /**
   1034      * ITU-R Recommendation 601 (BT.601) - 625-line
   1035      *
   1036      * Standard-definition television, 625 Lines (PAL)
   1037      *
   1038      * Use limited range, BT.601 transfer and BT.601_625 standard.
   1039      */
   1040     BT601_625 = 0x102, // deprecated, use V0_BT601_625
   1041 
   1042     V0_BT601_625 = STANDARD_BT601_625 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
   1043 
   1044 
   1045     /**
   1046      * ITU-R Recommendation 601 (BT.601) - 525-line
   1047      *
   1048      * Standard-definition television, 525 Lines (NTSC)
   1049      *
   1050      * Use limited range, BT.601 transfer and BT.601_525 standard.
   1051      */
   1052     BT601_525 = 0x103, // deprecated, use V0_BT601_525
   1053 
   1054     V0_BT601_525 = STANDARD_BT601_525 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
   1055 
   1056     /**
   1057      * ITU-R Recommendation 709 (BT.709)
   1058      *
   1059      * High-definition television
   1060      *
   1061      * Use limited range, BT.709 transfer and BT.709 standard.
   1062      */
   1063     BT709 = 0x104, // deprecated, use V0_BT709
   1064 
   1065     V0_BT709 = STANDARD_BT709 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
   1066 
   1067 
   1068     /**
   1069      * SMPTE EG 432-1 and SMPTE RP 431-2.
   1070      *
   1071      * Digital Cinema DCI-P3
   1072      *
   1073      * Use full range, linear transfer and D65 DCI-P3 standard
   1074      */
   1075     DCI_P3_LINEAR = STANDARD_DCI_P3 | TRANSFER_LINEAR | RANGE_FULL,
   1076 
   1077 
   1078     /**
   1079      * SMPTE EG 432-1 and SMPTE RP 431-2.
   1080      *
   1081      * Digital Cinema DCI-P3
   1082      *
   1083      * Use full range, gamma 2.6 transfer and D65 DCI-P3 standard
   1084      * Note: Application is responsible for gamma encoding the data as
   1085      * a 2.6 gamma encoding is not supported in HW.
   1086      */
   1087     DCI_P3 = STANDARD_DCI_P3 | TRANSFER_GAMMA2_6 | RANGE_FULL,
   1088 
   1089 
   1090     /**
   1091      * Display P3
   1092      *
   1093      * Display P3 uses same primaries and white-point as DCI-P3
   1094      * linear transfer function makes this the same as DCI_P3_LINEAR.
   1095      */
   1096     DISPLAY_P3_LINEAR = STANDARD_DCI_P3 | TRANSFER_LINEAR | RANGE_FULL,
   1097 
   1098 
   1099     /**
   1100      * Display P3
   1101      *
   1102      * Use same primaries and white-point as DCI-P3
   1103      * but sRGB transfer function.
   1104      */
   1105     DISPLAY_P3 = STANDARD_DCI_P3 | TRANSFER_SRGB | RANGE_FULL,
   1106 
   1107 
   1108     /**
   1109      * Adobe RGB
   1110      *
   1111      * Use full range, gamma 2.2 transfer and Adobe RGB primaries
   1112      * Note: Application is responsible for gamma encoding the data as
   1113      * a 2.2 gamma encoding is not supported in HW.
   1114      */
   1115     ADOBE_RGB = STANDARD_ADOBE_RGB | TRANSFER_GAMMA2_2 | RANGE_FULL,
   1116 
   1117 
   1118     /**
   1119      * ITU-R Recommendation 2020 (BT.2020)
   1120      *
   1121      * Ultra High-definition television
   1122      *
   1123      * Use full range, linear transfer and BT2020 standard
   1124      */
   1125     BT2020_LINEAR = STANDARD_BT2020 | TRANSFER_LINEAR | RANGE_FULL,
   1126 
   1127 
   1128     /**
   1129      * ITU-R Recommendation 2020 (BT.2020)
   1130      *
   1131      * Ultra High-definition television
   1132      *
   1133      * Use full range, BT.709 transfer and BT2020 standard
   1134      */
   1135     BT2020 = STANDARD_BT2020 | TRANSFER_SMPTE_170M | RANGE_FULL,
   1136 
   1137     /**
   1138      * ITU-R Recommendation 2020 (BT.2020)
   1139      *
   1140      * Ultra High-definition television
   1141      *
   1142      * Use full range, SMPTE 2084 (PQ) transfer and BT2020 standard
   1143      */
   1144     BT2020_PQ = STANDARD_BT2020 | TRANSFER_ST2084 | RANGE_FULL,
   1145 
   1146 
   1147     /**
   1148      * Data spaces for non-color formats
   1149      */
   1150 
   1151     /**
   1152      * The buffer contains depth ranging measurements from a depth camera.
   1153      * This value is valid with formats:
   1154      *    HAL_PIXEL_FORMAT_Y16: 16-bit samples, consisting of a depth measurement
   1155      *       and an associated confidence value. The 3 MSBs of the sample make
   1156      *       up the confidence value, and the low 13 LSBs of the sample make up
   1157      *       the depth measurement.
   1158      *       For the confidence section, 0 means 100% confidence, 1 means 0%
   1159      *       confidence. The mapping to a linear float confidence value between
   1160      *       0.f and 1.f can be obtained with
   1161      *         float confidence = (((depthSample >> 13) - 1) & 0x7) / 7.0f;
   1162      *       The depth measurement can be extracted simply with
   1163      *         uint16_t range = (depthSample & 0x1FFF);
   1164      *    HAL_PIXEL_FORMAT_BLOB: A depth point cloud, as
   1165      *       a variable-length float (x,y,z, confidence) coordinate point list.
   1166      *       The point cloud will be represented with the android_depth_points
   1167      *       structure.
   1168      */
   1169     DEPTH = 0x1000,
   1170 
   1171 
   1172     /**
   1173      * The buffer contains sensor events from sensor direct report.
   1174      * This value is valid with formats:
   1175      *    HAL_PIXEL_FORMAT_BLOB: an array of sensor event structure that forms
   1176      *       a lock free queue. Format of sensor event structure is specified
   1177      *       in Sensors HAL.
   1178      */
   1179     SENSOR = 0x1001
   1180 };
   1181 
   1182 /**
   1183  * Color modes that may be supported by a display.
   1184  *
   1185  * Definitions:
   1186  * Rendering intent generally defines the goal in mapping a source (input)
   1187  * color to a destination device color for a given color mode.
   1188  *
   1189  *  It is important to keep in mind three cases where mapping may be applied:
   1190  *  1. The source gamut is much smaller than the destination (display) gamut
   1191  *  2. The source gamut is much larger than the destination gamut (this will
   1192  *  ordinarily be handled using colorimetric rendering, below)
   1193  *  3. The source and destination gamuts are roughly equal, although not
   1194  *  completely overlapping
   1195  *  Also, a common requirement for mappings is that skin tones should be
   1196  *  preserved, or at least remain natural in appearance.
   1197  *
   1198  *  Colorimetric Rendering Intent (All cases):
   1199  *  Colorimetric indicates that colors should be preserved. In the case
   1200  *  that the source gamut lies wholly within the destination gamut or is
   1201  *  about the same (#1, #3), this will simply mean that no manipulations
   1202  *  (no saturation boost, for example) are applied. In the case where some
   1203  *  source colors lie outside the destination gamut (#2, #3), those will
   1204  *  need to be mapped to colors that are within the destination gamut,
   1205  *  while the already in-gamut colors remain unchanged.
   1206  *
   1207  *  Non-colorimetric transforms can take many forms. There are no hard
   1208  *  rules and it's left to the implementation to define.
   1209  *  Two common intents are described below.
   1210  *
   1211  *  Stretched-Gamut Enhancement Intent (Source < Destination):
   1212  *  When the destination gamut is much larger than the source gamut (#1), the
   1213  *  source primaries may be redefined to reflect the full extent of the
   1214  *  destination space, or to reflect an intermediate gamut.
   1215  *  Skin-tone preservation would likely be applied. An example might be sRGB
   1216  *  input displayed on a DCI-P3 capable device, with skin-tone preservation.
   1217  *
   1218  *  Within-Gamut Enhancement Intent (Source >= Destination):
   1219  *  When the device (destination) gamut is not larger than the source gamut
   1220  *  (#2 or #3), but the appearance of a larger gamut is desired, techniques
   1221  *  such as saturation boost may be applied to the source colors. Skin-tone
   1222  *  preservation may be applied. There is no unique method for within-gamut
   1223  *  enhancement; it would be defined within a flexible color mode.
   1224  *
   1225  */
   1226 @export(name="android_color_mode_t", value_prefix="HAL_COLOR_MODE_")
   1227 enum ColorMode : int32_t {
   1228     /**
   1229      * DEFAULT is the "native" gamut of the display.
   1230      * White Point: Vendor/OEM defined
   1231      * Panel Gamma: Vendor/OEM defined (typically 2.2)
   1232      * Rendering Intent: Vendor/OEM defined (typically 'enhanced')
   1233      */
   1234     NATIVE = 0,
   1235 
   1236     /**
   1237      * STANDARD_BT601_625 corresponds with display
   1238      * settings that implement the ITU-R Recommendation BT.601
   1239      * or Rec 601. Using 625 line version
   1240      * Rendering Intent: Colorimetric
   1241      * Primaries:
   1242      *                  x       y
   1243      *  green           0.290   0.600
   1244      *  blue            0.150   0.060
   1245      *  red             0.640   0.330
   1246      *  white (D65)     0.3127  0.3290
   1247      *
   1248      *  KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
   1249      *  for RGB conversion from the one purely determined by the primaries
   1250      *  to minimize the color shift into RGB space that uses BT.709
   1251      *  primaries.
   1252      *
   1253      * Gamma Correction (GC):
   1254      *
   1255      *  if Vlinear < 0.018
   1256      *    Vnonlinear = 4.500 * Vlinear
   1257      *  else
   1258      *    Vnonlinear = 1.099 * (Vlinear)^(0.45)  0.099
   1259      */
   1260     STANDARD_BT601_625 = 1,
   1261 
   1262     /**
   1263      * Primaries:
   1264      *                  x       y
   1265      *  green           0.290   0.600
   1266      *  blue            0.150   0.060
   1267      *  red             0.640   0.330
   1268      *  white (D65)     0.3127  0.3290
   1269      *
   1270      *  Use the unadjusted KR = 0.222, KB = 0.071 luminance interpretation
   1271      *  for RGB conversion.
   1272      *
   1273      * Gamma Correction (GC):
   1274      *
   1275      *  if Vlinear < 0.018
   1276      *    Vnonlinear = 4.500 * Vlinear
   1277      *  else
   1278      *    Vnonlinear = 1.099 * (Vlinear)^(0.45)  0.099
   1279      */
   1280     STANDARD_BT601_625_UNADJUSTED = 2,
   1281 
   1282     /**
   1283      * Primaries:
   1284      *                  x       y
   1285      *  green           0.310   0.595
   1286      *  blue            0.155   0.070
   1287      *  red             0.630   0.340
   1288      *  white (D65)     0.3127  0.3290
   1289      *
   1290      *  KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
   1291      *  for RGB conversion from the one purely determined by the primaries
   1292      *  to minimize the color shift into RGB space that uses BT.709
   1293      *  primaries.
   1294      *
   1295      * Gamma Correction (GC):
   1296      *
   1297      *  if Vlinear < 0.018
   1298      *    Vnonlinear = 4.500 * Vlinear
   1299      *  else
   1300      *    Vnonlinear = 1.099 * (Vlinear)^(0.45)  0.099
   1301      */
   1302     STANDARD_BT601_525 = 3,
   1303 
   1304     /**
   1305      * Primaries:
   1306      *                  x       y
   1307      *  green           0.310   0.595
   1308      *  blue            0.155   0.070
   1309      *  red             0.630   0.340
   1310      *  white (D65)     0.3127  0.3290
   1311      *
   1312      *  Use the unadjusted KR = 0.212, KB = 0.087 luminance interpretation
   1313      *  for RGB conversion (as in SMPTE 240M).
   1314      *
   1315      * Gamma Correction (GC):
   1316      *
   1317      *  if Vlinear < 0.018
   1318      *    Vnonlinear = 4.500 * Vlinear
   1319      *  else
   1320      *    Vnonlinear = 1.099 * (Vlinear)^(0.45)  0.099
   1321      */
   1322     STANDARD_BT601_525_UNADJUSTED = 4,
   1323 
   1324     /**
   1325      * REC709 corresponds with display settings that implement
   1326      * the ITU-R Recommendation BT.709 / Rec. 709 for high-definition television.
   1327      * Rendering Intent: Colorimetric
   1328      * Primaries:
   1329      *                  x       y
   1330      *  green           0.300   0.600
   1331      *  blue            0.150   0.060
   1332      *  red             0.640   0.330
   1333      *  white (D65)     0.3127  0.3290
   1334      *
   1335      * HDTV REC709 Inverse Gamma Correction (IGC): V represents normalized
   1336      * (with [0 to 1] range) value of R, G, or B.
   1337      *
   1338      *  if Vnonlinear < 0.081
   1339      *    Vlinear = Vnonlinear / 4.5
   1340      *  else
   1341      *    Vlinear = ((Vnonlinear + 0.099) / 1.099) ^ (1/0.45)
   1342      *
   1343      * HDTV REC709 Gamma Correction (GC):
   1344      *
   1345      *  if Vlinear < 0.018
   1346      *    Vnonlinear = 4.5 * Vlinear
   1347      *  else
   1348      *    Vnonlinear = 1.099 * (Vlinear) ^ 0.45  0.099
   1349      */
   1350     STANDARD_BT709 = 5,
   1351 
   1352     /**
   1353      * DCI_P3 corresponds with display settings that implement
   1354      * SMPTE EG 432-1 and SMPTE RP 431-2
   1355      * Rendering Intent: Colorimetric
   1356      * Primaries:
   1357      *                  x       y
   1358      *  green           0.265   0.690
   1359      *  blue            0.150   0.060
   1360      *  red             0.680   0.320
   1361      *  white (D65)     0.3127  0.3290
   1362      *
   1363      * Gamma: 2.6
   1364      */
   1365     DCI_P3 = 6,
   1366 
   1367     /**
   1368      * SRGB corresponds with display settings that implement
   1369      * the sRGB color space. Uses the same primaries as ITU-R Recommendation
   1370      * BT.709
   1371      * Rendering Intent: Colorimetric
   1372      * Primaries:
   1373      *                  x       y
   1374      *  green           0.300   0.600
   1375      *  blue            0.150   0.060
   1376      *  red             0.640   0.330
   1377      *  white (D65)     0.3127  0.3290
   1378      *
   1379      * PC/Internet (sRGB) Inverse Gamma Correction (IGC):
   1380      *
   1381      *  if Vnonlinear  0.03928
   1382      *    Vlinear = Vnonlinear / 12.92
   1383      *  else
   1384      *    Vlinear = ((Vnonlinear + 0.055)/1.055) ^ 2.4
   1385      *
   1386      * PC/Internet (sRGB) Gamma Correction (GC):
   1387      *
   1388      *  if Vlinear  0.0031308
   1389      *    Vnonlinear = 12.92 * Vlinear
   1390      *  else
   1391      *    Vnonlinear = 1.055 * (Vlinear)^(1/2.4)  0.055
   1392      */
   1393     SRGB = 7,
   1394 
   1395     /**
   1396      * ADOBE_RGB corresponds with the RGB color space developed
   1397      * by Adobe Systems, Inc. in 1998.
   1398      * Rendering Intent: Colorimetric
   1399      * Primaries:
   1400      *                  x       y
   1401      *  green           0.210   0.710
   1402      *  blue            0.150   0.060
   1403      *  red             0.640   0.330
   1404      *  white (D65)     0.3127  0.3290
   1405      *
   1406      * Gamma: 2.2
   1407      */
   1408     ADOBE_RGB = 8,
   1409 
   1410     /**
   1411      * DISPLAY_P3 is a color space that uses the DCI_P3 primaries,
   1412      * the D65 white point and the SRGB transfer functions.
   1413      * Rendering Intent: Colorimetric
   1414      * Primaries:
   1415      *                  x       y
   1416      *  green           0.265   0.690
   1417      *  blue            0.150   0.060
   1418      *  red             0.680   0.320
   1419      *  white (D65)     0.3127  0.3290
   1420      *
   1421      * PC/Internet (sRGB) Gamma Correction (GC):
   1422      *
   1423      *  if Vlinear  0.0030186
   1424      *    Vnonlinear = 12.92 * Vlinear
   1425      *  else
   1426      *    Vnonlinear = 1.055 * (Vlinear)^(1/2.4)  0.055
   1427      *
   1428      * Note: In most cases sRGB transfer function will be fine.
   1429      */
   1430     DISPLAY_P3 = 9
   1431 };
   1432 
   1433 /**
   1434  * Color transforms that may be applied by hardware composer to the whole
   1435  * display.
   1436  */
   1437 @export(name="android_color_transform_t", value_prefix="HAL_COLOR_TRANSFORM_")
   1438 enum ColorTransform : int32_t {
   1439     /** Applies no transform to the output color */
   1440     IDENTITY = 0,
   1441 
   1442     /** Applies an arbitrary transform defined by a 4x4 affine matrix */
   1443     ARBITRARY_MATRIX = 1,
   1444 
   1445     /**
   1446      * Applies a transform that inverts the value or luminance of the color, but
   1447      * does not modify hue or saturation */
   1448     VALUE_INVERSE = 2,
   1449 
   1450     /** Applies a transform that maps all colors to shades of gray */
   1451     GRAYSCALE = 3,
   1452 
   1453     /** Applies a transform which corrects for protanopic color blindness */
   1454     CORRECT_PROTANOPIA = 4,
   1455 
   1456     /** Applies a transform which corrects for deuteranopic color blindness */
   1457     CORRECT_DEUTERANOPIA = 5,
   1458 
   1459     /** Applies a transform which corrects for tritanopic color blindness */
   1460     CORRECT_TRITANOPIA = 6
   1461 };
   1462 
   1463 /**
   1464  * Supported HDR formats. Must be kept in sync with equivalents in Display.java.
   1465  */
   1466 @export(name="android_hdr_t", value_prefix="HAL_HDR_")
   1467 enum Hdr : int32_t {
   1468     /** Device supports Dolby Vision HDR */
   1469     DOLBY_VISION = 1,
   1470 
   1471     /** Device supports HDR10 */
   1472     HDR10 = 2,
   1473 
   1474     /** Device supports hybrid log-gamma HDR */
   1475     HLG = 3
   1476 };
   1477