Home | History | Annotate | Download | only in libgralloc-qsd8k
      1 /*
      2  * Copyright (C) 2010 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <limits.h>
     18 #include <unistd.h>
     19 #include <fcntl.h>
     20 
     21 #include <sys/mman.h>
     22 
     23 #include "gr.h"
     24 #include "gpu.h"
     25 
     26 gpu_context_t::gpu_context_t(Deps& deps, PmemAllocator& pmemAllocator,
     27         PmemAllocator& pmemAdspAllocator, const private_module_t* module) :
     28     deps(deps),
     29     pmemAllocator(pmemAllocator),
     30     pmemAdspAllocator(pmemAdspAllocator)
     31 {
     32     // Zero out the alloc_device_t
     33     memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
     34 
     35     // Initialize the procs
     36     common.tag     = HARDWARE_DEVICE_TAG;
     37     common.version = 0;
     38     common.module  = const_cast<hw_module_t*>(&module->base.common);
     39     common.close   = gralloc_close;
     40     alloc          = gralloc_alloc;
     41     free           = gralloc_free;
     42 }
     43 
     44 int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
     45         buffer_handle_t* pHandle)
     46 {
     47     private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
     48 
     49     // we don't support allocations with both the FB and PMEM_ADSP flags
     50     if (usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) {
     51         return -EINVAL;
     52     }
     53 
     54     // allocate the framebuffer
     55     if (m->framebuffer == NULL) {
     56         // initialize the framebuffer, the framebuffer is mapped once
     57         // and forever.
     58         int err = deps.mapFrameBufferLocked(m);
     59         if (err < 0) {
     60             return err;
     61         }
     62     }
     63 
     64     const uint32_t bufferMask = m->bufferMask;
     65     const uint32_t numBuffers = m->numBuffers;
     66     const size_t bufferSize = m->finfo.line_length * m->info.yres;
     67     if (numBuffers == 1) {
     68         // If we have only one buffer, we never use page-flipping. Instead,
     69         // we return a regular buffer which will be memcpy'ed to the main
     70         // screen when post is called.
     71         int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
     72         return gralloc_alloc_buffer(bufferSize, newUsage, pHandle);
     73     }
     74 
     75     if (bufferMask >= ((1LU<<numBuffers)-1)) {
     76         // We ran out of buffers.
     77         return -ENOMEM;
     78     }
     79 
     80     // create a "fake" handles for it
     81     intptr_t vaddr = intptr_t(m->framebuffer->base);
     82     private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
     83                                                  private_handle_t::PRIV_FLAGS_USES_PMEM |
     84                                                  private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
     85 
     86     // find a free slot
     87     for (uint32_t i=0 ; i<numBuffers ; i++) {
     88         if ((bufferMask & (1LU<<i)) == 0) {
     89             m->bufferMask |= (1LU<<i);
     90             break;
     91         }
     92         vaddr += bufferSize;
     93     }
     94 
     95     hnd->base = vaddr;
     96     hnd->offset = vaddr - intptr_t(m->framebuffer->base);
     97     *pHandle = hnd;
     98 
     99     return 0;
    100 }
    101 
    102 
    103 int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
    104         buffer_handle_t* pHandle)
    105 {
    106     private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
    107     pthread_mutex_lock(&m->lock);
    108     int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
    109     pthread_mutex_unlock(&m->lock);
    110     return err;
    111 }
    112 
    113 
    114 int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage, buffer_handle_t* pHandle)
    115 {
    116     int err = 0;
    117     int flags = 0;
    118 
    119     int fd = -1;
    120     void* base = 0; // XXX JMG: This should change to just get an address from
    121                     // the PmemAllocator rather than getting the base & offset separately
    122     int offset = 0;
    123     int lockState = 0;
    124 
    125     size = roundUpToPageSize(size);
    126 
    127     if (usage & GRALLOC_USAGE_HW_TEXTURE) {
    128         // enable pmem in that case, so our software GL can fallback to
    129         // the copybit module.
    130         flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
    131     }
    132 
    133     if (usage & GRALLOC_USAGE_HW_2D) {
    134         flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
    135     }
    136 
    137     if (usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) {
    138         flags |= private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
    139         flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
    140     }
    141 
    142     private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
    143 
    144     if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) != 0 ||
    145         (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0) {
    146 
    147         PmemAllocator* pma = 0;
    148 
    149         if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) != 0) {
    150           if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0) {
    151               LOGE("attempting to allocate a gralloc buffer with both the "
    152                    "USES_PMEM and USES_PMEM_ADSP flags.  Unsetting the "
    153                    "USES_PMEM_ADSP flag.");
    154               flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
    155           }
    156           pma = &pmemAllocator;
    157         } else { // (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0
    158           pma = &pmemAdspAllocator;
    159         }
    160 
    161         // PMEM buffers are always mmapped
    162         lockState |= private_handle_t::LOCK_STATE_MAPPED;
    163 
    164         // Allocate the buffer from pmem
    165         err = pma->alloc_pmem_buffer(size, usage, &base, &offset, &fd);
    166         if (err < 0) {
    167             if (((usage & GRALLOC_USAGE_HW_MASK) == 0) &&
    168                 ((usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) == 0)) {
    169                 // the caller didn't request PMEM, so we can try something else
    170                 flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
    171                 err = 0;
    172                 goto try_ashmem;
    173             } else {
    174                 LOGE("couldn't open pmem (%s)", strerror(errno));
    175             }
    176         }
    177     } else {
    178 try_ashmem:
    179         fd = deps.ashmem_create_region("gralloc-buffer", size);
    180         if (fd < 0) {
    181             LOGE("couldn't create ashmem (%s)", strerror(errno));
    182             err = -errno;
    183         }
    184     }
    185 
    186     if (err == 0) {
    187         private_handle_t* hnd = new private_handle_t(fd, size, flags);
    188         hnd->offset = offset;
    189         hnd->base = int(base)+offset;
    190         hnd->lockState = lockState;
    191         *pHandle = hnd;
    192     }
    193 
    194     LOGE_IF(err, "gralloc failed err=%s", strerror(-err));
    195 
    196     return err;
    197 }
    198 
    199 static inline size_t ALIGN(size_t x, size_t align) {
    200     return (x + align-1) & ~(align-1);
    201 }
    202 
    203 int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
    204         buffer_handle_t* pHandle, int* pStride) {
    205     if (!pHandle || !pStride)
    206         return -EINVAL;
    207 
    208     size_t size, alignedw, alignedh;
    209 
    210     alignedw = ALIGN(w, 32);
    211     alignedh = ALIGN(h, 32);
    212     switch (format) {
    213         case HAL_PIXEL_FORMAT_RGBA_8888:
    214         case HAL_PIXEL_FORMAT_RGBX_8888:
    215         case HAL_PIXEL_FORMAT_BGRA_8888:
    216             size = alignedw * alignedh * 4;
    217             break;
    218         case HAL_PIXEL_FORMAT_RGB_888:
    219             size = alignedw * alignedh * 3;
    220             break;
    221         case HAL_PIXEL_FORMAT_RGB_565:
    222         case HAL_PIXEL_FORMAT_RGBA_5551:
    223         case HAL_PIXEL_FORMAT_RGBA_4444:
    224             size = alignedw * alignedh * 2;
    225             break;
    226 
    227         // adreno formats
    228         case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:  // NV21
    229             size  = ALIGN(alignedw*alignedh, 4096);
    230             size += ALIGN(2 * ALIGN(w/2, 32) * ALIGN(h/2, 32), 4096);
    231             break;
    232         case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:   // NV12
    233             // The chroma plane is subsampled,
    234             // but the pitch in bytes is unchanged
    235             // The GPU needs 4K alignment, but the video decoder needs 8K
    236             alignedw = ALIGN(w, 128);
    237             size  = ALIGN( alignedw * alignedh, 8192);
    238             size += ALIGN( alignedw * ALIGN(h/2, 32), 4096);
    239             break;
    240 
    241         case HAL_PIXEL_FORMAT_YV12:
    242             if ((w&1) || (h&1)) {
    243                 LOGE("w or h is odd for HAL_PIXEL_FORMAT_YV12");
    244                 return -EINVAL;
    245             }
    246             alignedw = ALIGN(w, 16);
    247             alignedh = h;
    248             size = alignedw*alignedh +
    249                     (ALIGN(alignedw/2, 16) * (alignedh/2))*2;
    250             break;
    251 
    252         default:
    253             LOGE("unrecognized pixel format: %d", format);
    254             return -EINVAL;
    255     }
    256 
    257     if ((ssize_t)size <= 0)
    258         return -EINVAL;
    259 
    260     int err;
    261     if (usage & GRALLOC_USAGE_HW_FB) {
    262         err = gralloc_alloc_framebuffer(size, usage, pHandle);
    263     } else {
    264         err = gralloc_alloc_buffer(size, usage, pHandle);
    265     }
    266 
    267     if (err < 0) {
    268         return err;
    269     }
    270 
    271     *pStride = alignedw;
    272     return 0;
    273 }
    274 
    275 int gpu_context_t::free_impl(private_handle_t const* hnd) {
    276     private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
    277     if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
    278         // free this buffer
    279         const size_t bufferSize = m->finfo.line_length * m->info.yres;
    280         int index = (hnd->base - m->framebuffer->base) / bufferSize;
    281         m->bufferMask &= ~(1<<index);
    282     } else {
    283         PmemAllocator* pmem_allocator = 0;
    284         if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
    285             pmem_allocator = &pmemAllocator;
    286         } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) {
    287             pmem_allocator = &pmemAdspAllocator;
    288         }
    289         if (pmem_allocator) {
    290             pmem_allocator->free_pmem_buffer(hnd->size, (void*)hnd->base,
    291                     hnd->offset, hnd->fd);
    292         }
    293         deps.terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
    294     }
    295 
    296     deps.close(hnd->fd);
    297     delete hnd; // XXX JMG: move this to the deps
    298     return 0;
    299 }
    300 
    301 /******************************************************************************
    302  * Static functions
    303  *****************************************************************************/
    304 
    305 int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
    306         int usage, buffer_handle_t* pHandle, int* pStride)
    307 {
    308     if (!dev) {
    309         return -EINVAL;
    310     }
    311     gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
    312     return gpu->alloc_impl(w, h, format, usage, pHandle, pStride);
    313 }
    314 
    315 int gpu_context_t::gralloc_free(alloc_device_t* dev,
    316                                     buffer_handle_t handle)
    317 {
    318     if (private_handle_t::validate(handle) < 0)
    319         return -EINVAL;
    320 
    321     private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
    322     gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
    323     return gpu->free_impl(hnd);
    324 }
    325 
    326 /*****************************************************************************/
    327 
    328 int gpu_context_t::gralloc_close(struct hw_device_t *dev)
    329 {
    330     gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
    331     if (ctx) {
    332         /* TODO: keep a list of all buffer_handle_t created, and free them
    333          * all here.
    334          */
    335         delete ctx;
    336     }
    337     return 0;
    338 }
    339 
    340 
    341 gpu_context_t::Deps::~Deps() {}
    342