Home | History | Annotate | Download | only in driver
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "rsdCore.h"
     18 #include "rsdAllocation.h"
     19 
     20 #include "rsAllocation.h"
     21 
     22 #if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
     23 #include "system/window.h"
     24 #include "ui/Rect.h"
     25 #include "ui/GraphicBufferMapper.h"
     26 #endif
     27 
     28 #ifdef RS_COMPATIBILITY_LIB
     29 #include "rsCompatibilityLib.h"
     30 #else
     31 #include "rsdFrameBufferObj.h"
     32 #include "gui/GLConsumer.h"
     33 #include "gui/CpuConsumer.h"
     34 #include "gui/Surface.h"
     35 #include "hardware/gralloc.h"
     36 
     37 #include <GLES/gl.h>
     38 #include <GLES2/gl2.h>
     39 #include <GLES/glext.h>
     40 #endif
     41 
     42 #ifdef RS_SERVER
     43 // server requires malloc.h for memalign
     44 #include <malloc.h>
     45 #endif
     46 
     47 using namespace android;
     48 using namespace android::renderscript;
     49 
     50 
     51 #ifndef RS_COMPATIBILITY_LIB
     52 const static GLenum gFaceOrder[] = {
     53     GL_TEXTURE_CUBE_MAP_POSITIVE_X,
     54     GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
     55     GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
     56     GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
     57     GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
     58     GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
     59 };
     60 
     61 GLenum rsdTypeToGLType(RsDataType t) {
     62     switch (t) {
     63     case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
     64     case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
     65     case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
     66 
     67     //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
     68     case RS_TYPE_FLOAT_32:      return GL_FLOAT;
     69     case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
     70     case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
     71     case RS_TYPE_SIGNED_8:      return GL_BYTE;
     72     case RS_TYPE_SIGNED_16:     return GL_SHORT;
     73     default:    break;
     74     }
     75     return 0;
     76 }
     77 
     78 GLenum rsdKindToGLFormat(RsDataKind k) {
     79     switch (k) {
     80     case RS_KIND_PIXEL_L: return GL_LUMINANCE;
     81     case RS_KIND_PIXEL_A: return GL_ALPHA;
     82     case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
     83     case RS_KIND_PIXEL_RGB: return GL_RGB;
     84     case RS_KIND_PIXEL_RGBA: return GL_RGBA;
     85     case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
     86     default: break;
     87     }
     88     return 0;
     89 }
     90 #endif
     91 
     92 uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
     93                       uint32_t xoff, uint32_t yoff, uint32_t zoff,
     94                       uint32_t lod, RsAllocationCubemapFace face) {
     95     uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
     96     ptr += face * alloc->mHal.drvState.faceOffset;
     97     ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
     98     ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
     99     ptr += xoff * alloc->mHal.state.elementSizeBytes;
    100     return ptr;
    101 }
    102 
    103 
    104 static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
    105                             uint32_t xoff, uint32_t yoff, uint32_t lod,
    106                             RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
    107 #ifndef RS_COMPATIBILITY_LIB
    108     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    109 
    110     rsAssert(drv->textureID);
    111     RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
    112     RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
    113     GLenum t = GL_TEXTURE_2D;
    114     if (alloc->mHal.state.hasFaces) {
    115         t = gFaceOrder[face];
    116     }
    117     RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
    118 #endif
    119 }
    120 
    121 
    122 #ifndef RS_COMPATIBILITY_LIB
    123 static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
    124     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    125 
    126     RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
    127     RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
    128 
    129     uint32_t faceCount = 1;
    130     if (alloc->mHal.state.hasFaces) {
    131         faceCount = 6;
    132     }
    133 
    134     rsdGLCheckError(rsc, "Upload2DTexture 1 ");
    135     for (uint32_t face = 0; face < faceCount; face ++) {
    136         for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
    137             const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
    138 
    139             GLenum t = GL_TEXTURE_2D;
    140             if (alloc->mHal.state.hasFaces) {
    141                 t = gFaceOrder[face];
    142             }
    143 
    144             if (isFirstUpload) {
    145                 RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
    146                              alloc->mHal.state.type->getLODDimX(lod),
    147                              alloc->mHal.state.type->getLODDimY(lod),
    148                              0, drv->glFormat, drv->glType, p);
    149             } else {
    150                 RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
    151                                 alloc->mHal.state.type->getLODDimX(lod),
    152                                 alloc->mHal.state.type->getLODDimY(lod),
    153                                 drv->glFormat, drv->glType, p);
    154             }
    155         }
    156     }
    157 
    158     if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
    159         RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
    160     }
    161     rsdGLCheckError(rsc, "Upload2DTexture");
    162 }
    163 #endif
    164 
    165 static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
    166 #ifndef RS_COMPATIBILITY_LIB
    167     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    168 
    169     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
    170         if (!drv->textureID) {
    171             RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
    172         }
    173         return;
    174     }
    175 
    176     if (!drv->glType || !drv->glFormat) {
    177         return;
    178     }
    179 
    180     if (!alloc->mHal.drvState.lod[0].mallocPtr) {
    181         return;
    182     }
    183 
    184     bool isFirstUpload = false;
    185 
    186     if (!drv->textureID) {
    187         RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
    188         isFirstUpload = true;
    189     }
    190 
    191     Upload2DTexture(rsc, alloc, isFirstUpload);
    192 
    193     if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
    194         if (alloc->mHal.drvState.lod[0].mallocPtr) {
    195             free(alloc->mHal.drvState.lod[0].mallocPtr);
    196             alloc->mHal.drvState.lod[0].mallocPtr = NULL;
    197         }
    198     }
    199     rsdGLCheckError(rsc, "UploadToTexture");
    200 #endif
    201 }
    202 
    203 static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
    204 #ifndef RS_COMPATIBILITY_LIB
    205     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    206 
    207     if (!drv->glFormat) {
    208         return;
    209     }
    210 
    211     if (!drv->renderTargetID) {
    212         RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
    213 
    214         if (!drv->renderTargetID) {
    215             // This should generally not happen
    216             ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
    217             rsc->dumpDebug();
    218             return;
    219         }
    220         RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
    221         RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
    222                     alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
    223     }
    224     rsdGLCheckError(rsc, "AllocateRenderTarget");
    225 #endif
    226 }
    227 
    228 static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
    229 #ifndef RS_COMPATIBILITY_LIB
    230     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    231 
    232     rsAssert(!alloc->mHal.state.type->getDimY());
    233     rsAssert(!alloc->mHal.state.type->getDimZ());
    234 
    235     //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
    236 
    237     if (!drv->bufferID) {
    238         RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
    239     }
    240     if (!drv->bufferID) {
    241         ALOGE("Upload to buffer object failed");
    242         drv->uploadDeferred = true;
    243         return;
    244     }
    245     RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
    246     RSD_CALL_GL(glBufferData, drv->glTarget,
    247                 alloc->mHal.state.type->getPackedSizeBytes(),
    248                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
    249     RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
    250     rsdGLCheckError(rsc, "UploadToBufferObject");
    251 #endif
    252 }
    253 
    254 
    255 static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
    256     // YUV only supports basic 2d
    257     // so we can stash the plane pointers in the mipmap levels.
    258     size_t uvSize = 0;
    259     state->lod[1].dimX = state->lod[0].dimX / 2;
    260     state->lod[1].dimY = state->lod[0].dimY / 2;
    261     state->lod[2].dimX = state->lod[0].dimX / 2;
    262     state->lod[2].dimY = state->lod[0].dimY / 2;
    263     state->yuv.shift = 1;
    264     state->yuv.step = 1;
    265     state->lodCount = 3;
    266 
    267 #ifndef RS_SERVER
    268     switch(yuv) {
    269     case HAL_PIXEL_FORMAT_YV12:
    270         state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
    271         state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
    272                 (state->lod[0].stride * state->lod[0].dimY);
    273         uvSize += state->lod[2].stride * state->lod[2].dimY;
    274 
    275         state->lod[1].stride = state->lod[2].stride;
    276         state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
    277                 (state->lod[2].stride * state->lod[2].dimY);
    278         uvSize += state->lod[1].stride * state->lod[2].dimY;
    279         break;
    280     case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
    281         //state->lod[1].dimX = state->lod[0].dimX;
    282         state->lod[1].stride = state->lod[0].stride;
    283         state->lod[2].stride = state->lod[0].stride;
    284         state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
    285                 (state->lod[0].stride * state->lod[0].dimY);
    286         state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
    287         uvSize += state->lod[1].stride * state->lod[1].dimY;
    288         state->yuv.step = 2;
    289         break;
    290 #ifndef RS_COMPATIBILITY_LIB
    291     case HAL_PIXEL_FORMAT_YCbCr_420_888:
    292         // This will be filled in by ioReceive()
    293         break;
    294 #endif
    295     default:
    296         rsAssert(0);
    297     }
    298 #endif
    299     return uvSize;
    300 }
    301 
    302 
    303 static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
    304         const Type *type, uint8_t *ptr) {
    305     alloc->mHal.drvState.lod[0].dimX = type->getDimX();
    306     alloc->mHal.drvState.lod[0].dimY = type->getDimY();
    307     alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
    308     alloc->mHal.drvState.lod[0].mallocPtr = 0;
    309     // Stride needs to be 16-byte aligned too!
    310     size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
    311     alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
    312     alloc->mHal.drvState.lodCount = type->getLODCount();
    313     alloc->mHal.drvState.faceCount = type->getDimFaces();
    314 
    315     size_t offsets[Allocation::MAX_LOD];
    316     memset(offsets, 0, sizeof(offsets));
    317 
    318     size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
    319             rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
    320     if (alloc->mHal.state.yuv) {
    321         o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
    322 
    323         for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
    324             offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
    325         }
    326     } else if(alloc->mHal.drvState.lodCount > 1) {
    327         uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
    328         uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
    329         uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
    330         for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
    331             alloc->mHal.drvState.lod[lod].dimX = tx;
    332             alloc->mHal.drvState.lod[lod].dimY = ty;
    333             alloc->mHal.drvState.lod[lod].dimZ = tz;
    334             alloc->mHal.drvState.lod[lod].stride =
    335                     rsRound(tx * type->getElementSizeBytes(), 16);
    336             offsets[lod] = o;
    337             o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
    338             if (tx > 1) tx >>= 1;
    339             if (ty > 1) ty >>= 1;
    340             if (tz > 1) tz >>= 1;
    341         }
    342     }
    343 
    344     alloc->mHal.drvState.faceOffset = o;
    345 
    346     alloc->mHal.drvState.lod[0].mallocPtr = ptr;
    347     for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
    348         alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
    349     }
    350 
    351     size_t allocSize = alloc->mHal.drvState.faceOffset;
    352     if(alloc->mHal.drvState.faceCount) {
    353         allocSize *= 6;
    354     }
    355 
    356     return allocSize;
    357 }
    358 
    359 static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
    360     // We align all allocations to a 16-byte boundary.
    361     uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
    362     if (!ptr) {
    363         return NULL;
    364     }
    365     if (forceZero) {
    366         memset(ptr, 0, allocSize);
    367     }
    368     return ptr;
    369 }
    370 
    371 bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
    372     DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
    373     if (!drv) {
    374         return false;
    375     }
    376     alloc->mHal.drv = drv;
    377 
    378     // Calculate the object size.
    379     size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
    380 
    381     uint8_t * ptr = NULL;
    382     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
    383 
    384     } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
    385         // Allocation is allocated when the surface is created
    386         // in getSurface
    387     } else if (alloc->mHal.state.userProvidedPtr != NULL) {
    388         // user-provided allocation
    389         // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
    390         if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
    391               alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
    392             ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
    393             return false;
    394         }
    395         if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
    396             ALOGE("User-allocated buffers must not have multiple faces or LODs");
    397             return false;
    398         }
    399 
    400         // rows must be 16-byte aligned
    401         // validate that here, otherwise fall back to not use the user-backed allocation
    402         if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
    403             ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
    404             drv->useUserProvidedPtr = false;
    405 
    406             ptr = allocAlignedMemory(allocSize, forceZero);
    407             if (!ptr) {
    408                 alloc->mHal.drv = NULL;
    409                 free(drv);
    410                 return false;
    411             }
    412 
    413         } else {
    414             drv->useUserProvidedPtr = true;
    415             ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
    416         }
    417     } else {
    418         ptr = allocAlignedMemory(allocSize, forceZero);
    419         if (!ptr) {
    420             alloc->mHal.drv = NULL;
    421             free(drv);
    422             return false;
    423         }
    424     }
    425     // Build the pointer tables
    426     size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
    427     if(allocSize != verifySize) {
    428         rsAssert(!"Size mismatch");
    429     }
    430 
    431 #ifndef RS_SERVER
    432     drv->glTarget = GL_NONE;
    433     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
    434         if (alloc->mHal.state.hasFaces) {
    435             drv->glTarget = GL_TEXTURE_CUBE_MAP;
    436         } else {
    437             drv->glTarget = GL_TEXTURE_2D;
    438         }
    439     } else {
    440         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
    441             drv->glTarget = GL_ARRAY_BUFFER;
    442         }
    443     }
    444 #endif
    445 
    446 #ifndef RS_COMPATIBILITY_LIB
    447     drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
    448     drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
    449 #else
    450     drv->glType = 0;
    451     drv->glFormat = 0;
    452 #endif
    453 
    454     if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
    455         drv->uploadDeferred = true;
    456     }
    457 
    458 
    459     drv->readBackFBO = NULL;
    460 
    461     // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
    462     if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
    463         rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
    464     }
    465 
    466 
    467 #ifdef RS_FIND_OFFSETS
    468     ALOGE("pointer for allocation: %p", alloc);
    469     ALOGE("pointer for allocation.drv: %p", &alloc->mHal.drv);
    470 #endif
    471 
    472 
    473     return true;
    474 }
    475 
    476 void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
    477     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    478 
    479 #ifndef RS_COMPATIBILITY_LIB
    480     if (drv->bufferID) {
    481         // Causes a SW crash....
    482         //ALOGV(" mBufferID %i", mBufferID);
    483         //glDeleteBuffers(1, &mBufferID);
    484         //mBufferID = 0;
    485     }
    486     if (drv->textureID) {
    487         RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
    488         drv->textureID = 0;
    489     }
    490     if (drv->renderTargetID) {
    491         RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
    492         drv->renderTargetID = 0;
    493     }
    494 #endif
    495 
    496     if (alloc->mHal.drvState.lod[0].mallocPtr) {
    497         // don't free user-allocated ptrs or IO_OUTPUT buffers
    498         if (!(drv->useUserProvidedPtr) &&
    499             !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
    500             !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
    501                 free(alloc->mHal.drvState.lod[0].mallocPtr);
    502         }
    503         alloc->mHal.drvState.lod[0].mallocPtr = NULL;
    504     }
    505 
    506 #ifndef RS_COMPATIBILITY_LIB
    507     if (drv->readBackFBO != NULL) {
    508         delete drv->readBackFBO;
    509         drv->readBackFBO = NULL;
    510     }
    511 
    512     if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
    513         (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
    514 
    515         DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    516         ANativeWindow *nw = drv->wndSurface;
    517         if (nw) {
    518             GraphicBufferMapper &mapper = GraphicBufferMapper::get();
    519             mapper.unlock(drv->wndBuffer->handle);
    520             int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
    521 
    522             drv->wndSurface = NULL;
    523             native_window_api_disconnect(nw, NATIVE_WINDOW_API_CPU);
    524             nw->decStrong(NULL);
    525         }
    526     }
    527 #endif
    528 
    529     free(drv);
    530     alloc->mHal.drv = NULL;
    531 }
    532 
    533 void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
    534                          const Type *newType, bool zeroNew) {
    535     const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
    536     const uint32_t dimX = newType->getDimX();
    537 
    538     // can't resize Allocations with user-allocated buffers
    539     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
    540         ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
    541         return;
    542     }
    543     void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
    544     // Calculate the object size
    545     size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
    546     uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
    547     // Build the relative pointer tables.
    548     size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
    549     if(s != verifySize) {
    550         rsAssert(!"Size mismatch");
    551     }
    552 
    553 
    554     if (dimX > oldDimX) {
    555         size_t stride = alloc->mHal.state.elementSizeBytes;
    556         memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
    557                  0, stride * (dimX - oldDimX));
    558     }
    559 }
    560 
    561 static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
    562 #ifndef RS_COMPATIBILITY_LIB
    563     if (!alloc->getIsScript()) {
    564         return; // nothing to sync
    565     }
    566 
    567     RsdHal *dc = (RsdHal *)rsc->mHal.drv;
    568     RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
    569 
    570     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    571     if (!drv->textureID && !drv->renderTargetID) {
    572         return; // nothing was rendered here yet, so nothing to sync
    573     }
    574     if (drv->readBackFBO == NULL) {
    575         drv->readBackFBO = new RsdFrameBufferObj();
    576         drv->readBackFBO->setColorTarget(drv, 0);
    577         drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
    578                                         alloc->getType()->getDimY());
    579     }
    580 
    581     // Bind the framebuffer object so we can read back from it
    582     drv->readBackFBO->setActive(rsc);
    583 
    584     // Do the readback
    585     RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
    586                 alloc->mHal.drvState.lod[0].dimY,
    587                 drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
    588 
    589     // Revert framebuffer to its original
    590     lastFbo->setActive(rsc);
    591 #endif
    592 }
    593 
    594 
    595 void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
    596                          RsAllocationUsageType src) {
    597     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    598 
    599     if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
    600         if(!alloc->getIsRenderTarget()) {
    601             rsc->setError(RS_ERROR_FATAL_DRIVER,
    602                           "Attempting to sync allocation from render target, "
    603                           "for non-render target allocation");
    604         } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
    605             rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
    606                                                  "render target");
    607         } else {
    608             rsdAllocationSyncFromFBO(rsc, alloc);
    609         }
    610         return;
    611     }
    612 
    613     rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT || src == RS_ALLOCATION_USAGE_SHARED);
    614 
    615     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
    616         UploadToTexture(rsc, alloc);
    617     } else {
    618         if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
    619             !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
    620             AllocateRenderTarget(rsc, alloc);
    621         }
    622     }
    623     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
    624         UploadToBufferObject(rsc, alloc);
    625     }
    626 
    627     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
    628 
    629         if (src == RS_ALLOCATION_USAGE_SHARED) {
    630             // just a memory fence for the CPU driver
    631             // vendor drivers probably want to flush any dirty cachelines for
    632             // this particular Allocation
    633             __sync_synchronize();
    634         }
    635     }
    636 
    637     drv->uploadDeferred = false;
    638 }
    639 
    640 void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
    641     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    642     drv->uploadDeferred = true;
    643 }
    644 
    645 #ifndef RS_COMPATIBILITY_LIB
    646 static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
    647     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    648 
    649     int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
    650     if (r) {
    651         rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
    652         return false;
    653     }
    654 
    655     // Must lock the whole surface
    656     GraphicBufferMapper &mapper = GraphicBufferMapper::get();
    657     Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
    658 
    659     void *dst = NULL;
    660     mapper.lock(drv->wndBuffer->handle,
    661             GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
    662             bounds, &dst);
    663     alloc->mHal.drvState.lod[0].mallocPtr = dst;
    664     alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
    665     rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
    666 
    667     return true;
    668 }
    669 #endif
    670 
    671 void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
    672 #ifndef RS_COMPATIBILITY_LIB
    673     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    674     ANativeWindow *old = drv->wndSurface;
    675 
    676     if (nw) {
    677         nw->incStrong(NULL);
    678     }
    679 
    680     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
    681         //TODO finish support for render target + script
    682         drv->wnd = nw;
    683         return;
    684     }
    685 
    686     // Cleanup old surface if there is one.
    687     if (drv->wndSurface) {
    688         ANativeWindow *old = drv->wndSurface;
    689         GraphicBufferMapper &mapper = GraphicBufferMapper::get();
    690         mapper.unlock(drv->wndBuffer->handle);
    691         old->cancelBuffer(old, drv->wndBuffer, -1);
    692         drv->wndSurface = NULL;
    693 
    694         native_window_api_disconnect(old, NATIVE_WINDOW_API_CPU);
    695         old->decStrong(NULL);
    696     }
    697 
    698     if (nw != NULL) {
    699         int32_t r;
    700         uint32_t flags = 0;
    701 
    702         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
    703             flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
    704         }
    705         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
    706             flags |= GRALLOC_USAGE_HW_RENDER;
    707         }
    708 
    709         r = native_window_api_connect(nw, NATIVE_WINDOW_API_CPU);
    710         if (r) {
    711             rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
    712             goto error;
    713         }
    714 
    715         r = native_window_set_usage(nw, flags);
    716         if (r) {
    717             rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
    718             goto error;
    719         }
    720 
    721         r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
    722                                                  alloc->mHal.drvState.lod[0].dimY);
    723         if (r) {
    724             rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
    725             goto error;
    726         }
    727 
    728         int format = 0;
    729         const Element *e = alloc->mHal.state.type->getElement();
    730         rsAssert(e->getType() == RS_TYPE_UNSIGNED_8);
    731         rsAssert(e->getVectorSize() == 4);
    732         rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
    733         format = PIXEL_FORMAT_RGBA_8888;
    734 
    735         r = native_window_set_buffers_format(nw, format);
    736         if (r) {
    737             rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
    738             goto error;
    739         }
    740 
    741         IoGetBuffer(rsc, alloc, nw);
    742         drv->wndSurface = nw;
    743     }
    744 
    745     return;
    746 
    747  error:
    748 
    749     if (nw) {
    750         nw->decStrong(NULL);
    751     }
    752 
    753 
    754 #endif
    755 }
    756 
    757 void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
    758 #ifndef RS_COMPATIBILITY_LIB
    759     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    760     ANativeWindow *nw = drv->wndSurface;
    761     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
    762         RsdHal *dc = (RsdHal *)rsc->mHal.drv;
    763         RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
    764         return;
    765     }
    766     if (nw) {
    767         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
    768             GraphicBufferMapper &mapper = GraphicBufferMapper::get();
    769             mapper.unlock(drv->wndBuffer->handle);
    770             int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
    771             if (r) {
    772                 rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
    773                 return;
    774             }
    775 
    776             IoGetBuffer(rsc, alloc, nw);
    777         }
    778     } else {
    779         rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
    780         return;
    781     }
    782 #endif
    783 }
    784 
    785 void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
    786 #ifndef RS_COMPATIBILITY_LIB
    787     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    788     if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
    789         drv->surfaceTexture->updateTexImage();
    790     }
    791 #endif
    792 }
    793 
    794 
    795 void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
    796                          uint32_t xoff, uint32_t lod, size_t count,
    797                          const void *data, size_t sizeBytes) {
    798     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    799 
    800     const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
    801     uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
    802     size_t size = count * eSize;
    803     if (ptr != data) {
    804         // Skip the copy if we are the same allocation. This can arise from
    805         // our Bitmap optimization, where we share the same storage.
    806         if (alloc->mHal.state.hasReferences) {
    807             alloc->incRefs(data, count);
    808             alloc->decRefs(ptr, count);
    809         }
    810         memcpy(ptr, data, size);
    811     }
    812     drv->uploadDeferred = true;
    813 }
    814 
    815 void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
    816                          uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
    817                          uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
    818     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    819 
    820     size_t eSize = alloc->mHal.state.elementSizeBytes;
    821     size_t lineSize = eSize * w;
    822     if (!stride) {
    823         stride = lineSize;
    824     }
    825 
    826     if (alloc->mHal.drvState.lod[0].mallocPtr) {
    827         const uint8_t *src = static_cast<const uint8_t *>(data);
    828         uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
    829         if (dst == src) {
    830             // Skip the copy if we are the same allocation. This can arise from
    831             // our Bitmap optimization, where we share the same storage.
    832             drv->uploadDeferred = true;
    833             return;
    834         }
    835 
    836         for (uint32_t line=yoff; line < (yoff+h); line++) {
    837             if (alloc->mHal.state.hasReferences) {
    838                 alloc->incRefs(src, w);
    839                 alloc->decRefs(dst, w);
    840             }
    841             memcpy(dst, src, lineSize);
    842             src += stride;
    843             dst += alloc->mHal.drvState.lod[lod].stride;
    844         }
    845         if (alloc->mHal.state.yuv) {
    846             size_t clineSize = lineSize;
    847             int lod = 1;
    848             int maxLod = 2;
    849             if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YV12) {
    850                 maxLod = 3;
    851                 clineSize >>= 1;
    852             } else if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
    853                 lod = 2;
    854                 maxLod = 3;
    855             }
    856 
    857             while (lod < maxLod) {
    858                 uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
    859 
    860                 for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
    861                     memcpy(dst, src, clineSize);
    862                     src += alloc->mHal.drvState.lod[lod].stride;
    863                     dst += alloc->mHal.drvState.lod[lod].stride;
    864                 }
    865                 lod++;
    866             }
    867 
    868         }
    869         drv->uploadDeferred = true;
    870     } else {
    871         Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
    872     }
    873 }
    874 
    875 void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
    876                          uint32_t xoff, uint32_t yoff, uint32_t zoff,
    877                          uint32_t lod,
    878                          uint32_t w, uint32_t h, uint32_t d, const void *data,
    879                          size_t sizeBytes, size_t stride) {
    880     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
    881 
    882     uint32_t eSize = alloc->mHal.state.elementSizeBytes;
    883     uint32_t lineSize = eSize * w;
    884     if (!stride) {
    885         stride = lineSize;
    886     }
    887 
    888     if (alloc->mHal.drvState.lod[0].mallocPtr) {
    889         const uint8_t *src = static_cast<const uint8_t *>(data);
    890         for (uint32_t z = zoff; z < d; z++) {
    891             uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
    892                                         RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
    893             if (dst == src) {
    894                 // Skip the copy if we are the same allocation. This can arise from
    895                 // our Bitmap optimization, where we share the same storage.
    896                 drv->uploadDeferred = true;
    897                 return;
    898             }
    899 
    900             for (uint32_t line=yoff; line < (yoff+h); line++) {
    901                 if (alloc->mHal.state.hasReferences) {
    902                     alloc->incRefs(src, w);
    903                     alloc->decRefs(dst, w);
    904                 }
    905                 memcpy(dst, src, lineSize);
    906                 src += stride;
    907                 dst += alloc->mHal.drvState.lod[lod].stride;
    908             }
    909         }
    910         drv->uploadDeferred = true;
    911     }
    912 }
    913 
    914 void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
    915                          uint32_t xoff, uint32_t lod, size_t count,
    916                          void *data, size_t sizeBytes) {
    917     const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
    918     const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
    919     if (data != ptr) {
    920         // Skip the copy if we are the same allocation. This can arise from
    921         // our Bitmap optimization, where we share the same storage.
    922         memcpy(data, ptr, count * eSize);
    923     }
    924 }
    925 
    926 void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
    927                                 uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
    928                                 uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
    929     size_t eSize = alloc->mHal.state.elementSizeBytes;
    930     size_t lineSize = eSize * w;
    931     if (!stride) {
    932         stride = lineSize;
    933     }
    934 
    935     if (alloc->mHal.drvState.lod[0].mallocPtr) {
    936         uint8_t *dst = static_cast<uint8_t *>(data);
    937         const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
    938         if (dst == src) {
    939             // Skip the copy if we are the same allocation. This can arise from
    940             // our Bitmap optimization, where we share the same storage.
    941             return;
    942         }
    943 
    944         for (uint32_t line=yoff; line < (yoff+h); line++) {
    945             memcpy(dst, src, lineSize);
    946             dst += stride;
    947             src += alloc->mHal.drvState.lod[lod].stride;
    948         }
    949     } else {
    950         ALOGE("Add code to readback from non-script memory");
    951     }
    952 }
    953 
    954 
    955 void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
    956                          uint32_t xoff, uint32_t yoff, uint32_t zoff,
    957                          uint32_t lod,
    958                          uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
    959     uint32_t eSize = alloc->mHal.state.elementSizeBytes;
    960     uint32_t lineSize = eSize * w;
    961     if (!stride) {
    962         stride = lineSize;
    963     }
    964 
    965     if (alloc->mHal.drvState.lod[0].mallocPtr) {
    966         uint8_t *dst = static_cast<uint8_t *>(data);
    967         for (uint32_t z = zoff; z < d; z++) {
    968             const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
    969                                               RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
    970             if (dst == src) {
    971                 // Skip the copy if we are the same allocation. This can arise from
    972                 // our Bitmap optimization, where we share the same storage.
    973                 return;
    974             }
    975 
    976             for (uint32_t line=yoff; line < (yoff+h); line++) {
    977                 memcpy(dst, src, lineSize);
    978                 dst += stride;
    979                 src += alloc->mHal.drvState.lod[lod].stride;
    980             }
    981         }
    982     }
    983 }
    984 
    985 void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
    986                           const android::renderscript::Allocation *alloc) {
    987     return alloc->mHal.drvState.lod[0].mallocPtr;
    988 }
    989 
    990 void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
    991                           const android::renderscript::Allocation *alloc) {
    992 
    993 }
    994 
    995 void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
    996                                const android::renderscript::Allocation *dstAlloc,
    997                                uint32_t dstXoff, uint32_t dstLod, size_t count,
    998                                const android::renderscript::Allocation *srcAlloc,
    999                                uint32_t srcXoff, uint32_t srcLod) {
   1000 }
   1001 
   1002 
   1003 void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
   1004                                       const android::renderscript::Allocation *dstAlloc,
   1005                                       uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
   1006                                       RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
   1007                                       const android::renderscript::Allocation *srcAlloc,
   1008                                       uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
   1009                                       RsAllocationCubemapFace srcFace) {
   1010     size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
   1011     for (uint32_t i = 0; i < h; i ++) {
   1012         uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
   1013         uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
   1014         memcpy(dstPtr, srcPtr, w * elementSize);
   1015 
   1016         //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
   1017         //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
   1018     }
   1019 }
   1020 
   1021 void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
   1022                                       const android::renderscript::Allocation *dstAlloc,
   1023                                       uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
   1024                                       uint32_t w, uint32_t h, uint32_t d,
   1025                                       const android::renderscript::Allocation *srcAlloc,
   1026                                       uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
   1027     uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
   1028     for (uint32_t j = 0; j < d; j++) {
   1029         for (uint32_t i = 0; i < h; i ++) {
   1030             uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
   1031                                            dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
   1032             uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
   1033                                            srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
   1034             memcpy(dstPtr, srcPtr, w * elementSize);
   1035 
   1036             //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
   1037             //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
   1038         }
   1039     }
   1040 }
   1041 
   1042 void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
   1043                                const android::renderscript::Allocation *dstAlloc,
   1044                                uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
   1045                                RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
   1046                                const android::renderscript::Allocation *srcAlloc,
   1047                                uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
   1048                                RsAllocationCubemapFace srcFace) {
   1049     if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
   1050         rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
   1051                                              "yet implemented.");
   1052         return;
   1053     }
   1054     rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
   1055                                      dstLod, dstFace, w, h, srcAlloc,
   1056                                      srcXoff, srcYoff, srcLod, srcFace);
   1057 }
   1058 
   1059 void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
   1060                                const android::renderscript::Allocation *dstAlloc,
   1061                                uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
   1062                                uint32_t dstLod,
   1063                                uint32_t w, uint32_t h, uint32_t d,
   1064                                const android::renderscript::Allocation *srcAlloc,
   1065                                uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
   1066                                uint32_t srcLod) {
   1067     if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
   1068         rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
   1069                                              "yet implemented.");
   1070         return;
   1071     }
   1072     rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
   1073                                      dstLod, w, h, d, srcAlloc,
   1074                                      srcXoff, srcYoff, srcZoff, srcLod);
   1075 }
   1076 
   1077 void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
   1078                                 uint32_t x,
   1079                                 const void *data, uint32_t cIdx, size_t sizeBytes) {
   1080     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
   1081 
   1082     size_t eSize = alloc->mHal.state.elementSizeBytes;
   1083     uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
   1084 
   1085     const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
   1086     ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
   1087 
   1088     if (alloc->mHal.state.hasReferences) {
   1089         e->incRefs(data);
   1090         e->decRefs(ptr);
   1091     }
   1092 
   1093     memcpy(ptr, data, sizeBytes);
   1094     drv->uploadDeferred = true;
   1095 }
   1096 
   1097 void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
   1098                                 uint32_t x, uint32_t y,
   1099                                 const void *data, uint32_t cIdx, size_t sizeBytes) {
   1100     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
   1101 
   1102     size_t eSize = alloc->mHal.state.elementSizeBytes;
   1103     uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
   1104 
   1105     const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
   1106     ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
   1107 
   1108     if (alloc->mHal.state.hasReferences) {
   1109         e->incRefs(data);
   1110         e->decRefs(ptr);
   1111     }
   1112 
   1113     memcpy(ptr, data, sizeBytes);
   1114     drv->uploadDeferred = true;
   1115 }
   1116 
   1117 static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
   1118     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
   1119     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
   1120 
   1121     for (uint32_t y=0; y < h; y++) {
   1122         uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
   1123         const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
   1124         const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
   1125 
   1126         for (uint32_t x=0; x < w; x++) {
   1127             *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
   1128             oPtr ++;
   1129             i1 += 2;
   1130             i2 += 2;
   1131         }
   1132     }
   1133 }
   1134 
   1135 static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
   1136     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
   1137     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
   1138 
   1139     for (uint32_t y=0; y < h; y++) {
   1140         uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
   1141         const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
   1142         const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
   1143 
   1144         for (uint32_t x=0; x < w; x++) {
   1145             *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
   1146             oPtr ++;
   1147             i1 += 2;
   1148             i2 += 2;
   1149         }
   1150     }
   1151 }
   1152 
   1153 static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
   1154     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
   1155     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
   1156 
   1157     for (uint32_t y=0; y < h; y++) {
   1158         uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
   1159         const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
   1160         const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
   1161 
   1162         for (uint32_t x=0; x < w; x++) {
   1163             *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
   1164             oPtr ++;
   1165             i1 += 2;
   1166             i2 += 2;
   1167         }
   1168     }
   1169 }
   1170 
   1171 void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
   1172     if(!alloc->mHal.drvState.lod[0].mallocPtr) {
   1173         return;
   1174     }
   1175     uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
   1176     for (uint32_t face = 0; face < numFaces; face ++) {
   1177         for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
   1178             switch (alloc->getType()->getElement()->getSizeBits()) {
   1179             case 32:
   1180                 mip8888(alloc, lod, (RsAllocationCubemapFace)face);
   1181                 break;
   1182             case 16:
   1183                 mip565(alloc, lod, (RsAllocationCubemapFace)face);
   1184                 break;
   1185             case 8:
   1186                 mip8(alloc, lod, (RsAllocationCubemapFace)face);
   1187                 break;
   1188             }
   1189         }
   1190     }
   1191 }
   1192 
   1193 uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
   1194                                   android::renderscript::Allocation *alloc)
   1195 {
   1196     return 0;
   1197 }
   1198 
   1199 void rsdAllocationUpdateCachedObject(const Context *rsc,
   1200                                      const Allocation *alloc,
   1201                                      rs_allocation *obj)
   1202 {
   1203     obj->p = alloc;
   1204 #ifdef __LP64__
   1205     if (alloc != NULL) {
   1206         obj->r = alloc->mHal.drvState.lod[0].mallocPtr;
   1207         obj->v1 = alloc->mHal.drv;
   1208         obj->v2 = (void *)alloc->mHal.drvState.lod[0].stride;
   1209     } else {
   1210         obj->r = NULL;
   1211         obj->v1 = NULL;
   1212         obj->v2 = NULL;
   1213     }
   1214 #endif
   1215 }
   1216 
   1217 
   1218