Home | History | Annotate | Download | only in nouveau
      1 
      2 #include "util/u_inlines.h"
      3 #include "util/u_memory.h"
      4 #include "util/u_math.h"
      5 
      6 #include "nouveau_screen.h"
      7 #include "nouveau_context.h"
      8 #include "nouveau_winsys.h"
      9 #include "nouveau_fence.h"
     10 #include "nouveau_buffer.h"
     11 #include "nouveau_mm.h"
     12 
     13 struct nouveau_transfer {
     14    struct pipe_transfer base;
     15 };
     16 
     17 static INLINE struct nouveau_transfer *
     18 nouveau_transfer(struct pipe_transfer *transfer)
     19 {
     20    return (struct nouveau_transfer *)transfer;
     21 }
     22 
     23 static INLINE boolean
     24 nouveau_buffer_allocate(struct nouveau_screen *screen,
     25                         struct nv04_resource *buf, unsigned domain)
     26 {
     27    uint32_t size = buf->base.width0;
     28 
     29    if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
     30       size = align(size, 0x100);
     31 
     32    if (domain == NOUVEAU_BO_VRAM) {
     33       buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
     34                                     &buf->bo, &buf->offset);
     35       if (!buf->bo)
     36          return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
     37    } else
     38    if (domain == NOUVEAU_BO_GART) {
     39       buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
     40                                     &buf->bo, &buf->offset);
     41       if (!buf->bo)
     42          return FALSE;
     43    }
     44    if (domain != NOUVEAU_BO_GART) {
     45       if (!buf->data) {
     46          buf->data = MALLOC(buf->base.width0);
     47          if (!buf->data)
     48             return FALSE;
     49       }
     50    }
     51    buf->domain = domain;
     52    if (buf->bo)
     53       buf->address = buf->bo->offset + buf->offset;
     54 
     55    return TRUE;
     56 }
     57 
     58 static INLINE void
     59 release_allocation(struct nouveau_mm_allocation **mm,
     60                    struct nouveau_fence *fence)
     61 {
     62    nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
     63    (*mm) = NULL;
     64 }
     65 
     66 INLINE void
     67 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
     68 {
     69    nouveau_bo_ref(NULL, &buf->bo);
     70 
     71    if (buf->mm)
     72       release_allocation(&buf->mm, buf->fence);
     73 
     74    buf->domain = 0;
     75 }
     76 
     77 static INLINE boolean
     78 nouveau_buffer_reallocate(struct nouveau_screen *screen,
     79                           struct nv04_resource *buf, unsigned domain)
     80 {
     81    nouveau_buffer_release_gpu_storage(buf);
     82 
     83    return nouveau_buffer_allocate(screen, buf, domain);
     84 }
     85 
     86 static void
     87 nouveau_buffer_destroy(struct pipe_screen *pscreen,
     88                        struct pipe_resource *presource)
     89 {
     90    struct nv04_resource *res = nv04_resource(presource);
     91 
     92    nouveau_buffer_release_gpu_storage(res);
     93 
     94    if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
     95       FREE(res->data);
     96 
     97    nouveau_fence_ref(NULL, &res->fence);
     98    nouveau_fence_ref(NULL, &res->fence_wr);
     99 
    100    FREE(res);
    101 }
    102 
    103 /* Maybe just migrate to GART right away if we actually need to do this. */
    104 boolean
    105 nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
    106                         unsigned start, unsigned size)
    107 {
    108    struct nouveau_mm_allocation *mm;
    109    struct nouveau_bo *bounce = NULL;
    110    uint32_t offset;
    111 
    112    assert(buf->domain == NOUVEAU_BO_VRAM);
    113 
    114    mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
    115    if (!bounce)
    116       return FALSE;
    117 
    118    nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
    119                  buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);
    120 
    121    if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client))
    122       return FALSE;
    123    memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size);
    124 
    125    buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
    126 
    127    nouveau_bo_ref(NULL, &bounce);
    128    if (mm)
    129       nouveau_mm_free(mm);
    130    return TRUE;
    131 }
    132 
    133 static boolean
    134 nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
    135                       unsigned start, unsigned size)
    136 {
    137    struct nouveau_mm_allocation *mm;
    138    struct nouveau_bo *bounce = NULL;
    139    uint32_t offset;
    140 
    141    if (size <= 192 && (nv->push_data || nv->push_cb)) {
    142       if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
    143          nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
    144                      start, size / 4, (const uint32_t *)(buf->data + start));
    145       else
    146          nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
    147                        size, buf->data + start);
    148       return TRUE;
    149    }
    150 
    151    mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
    152    if (!bounce)
    153       return FALSE;
    154 
    155    nouveau_bo_map(bounce, 0, nv->screen->client);
    156    memcpy((uint8_t *)bounce->map + offset, buf->data + start, size);
    157 
    158    nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
    159                  bounce, offset, NOUVEAU_BO_GART, size);
    160 
    161    nouveau_bo_ref(NULL, &bounce);
    162    if (mm)
    163       release_allocation(&mm, nv->screen->fence.current);
    164 
    165    if (start == 0 && size == buf->base.width0)
    166       buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
    167    return TRUE;
    168 }
    169 
    170 static struct pipe_transfer *
    171 nouveau_buffer_transfer_get(struct pipe_context *pipe,
    172                             struct pipe_resource *resource,
    173                             unsigned level, unsigned usage,
    174                             const struct pipe_box *box)
    175 {
    176    struct nv04_resource *buf = nv04_resource(resource);
    177    struct nouveau_context *nv = nouveau_context(pipe);
    178    struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
    179    if (!xfr)
    180       return NULL;
    181 
    182    xfr->base.resource = resource;
    183    xfr->base.box.x = box->x;
    184    xfr->base.box.width = box->width;
    185    xfr->base.usage = usage;
    186 
    187    if (buf->domain == NOUVEAU_BO_VRAM) {
    188       if (usage & PIPE_TRANSFER_READ) {
    189          if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
    190             nouveau_buffer_download(nv, buf, 0, buf->base.width0);
    191       }
    192    }
    193 
    194    return &xfr->base;
    195 }
    196 
    197 static void
    198 nouveau_buffer_transfer_destroy(struct pipe_context *pipe,
    199                                 struct pipe_transfer *transfer)
    200 {
    201    struct nv04_resource *buf = nv04_resource(transfer->resource);
    202    struct nouveau_transfer *xfr = nouveau_transfer(transfer);
    203    struct nouveau_context *nv = nouveau_context(pipe);
    204 
    205    if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
    206       if (buf->domain == NOUVEAU_BO_VRAM) {
    207          nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
    208       }
    209 
    210       if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
    211                                                  PIPE_BIND_INDEX_BUFFER)))
    212          nouveau_context(pipe)->vbo_dirty = TRUE;
    213    }
    214 
    215    FREE(xfr);
    216 }
    217 
    218 static INLINE boolean
    219 nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
    220 {
    221    if (rw == PIPE_TRANSFER_READ) {
    222       if (!buf->fence_wr)
    223          return TRUE;
    224       if (!nouveau_fence_wait(buf->fence_wr))
    225          return FALSE;
    226    } else {
    227       if (!buf->fence)
    228          return TRUE;
    229       if (!nouveau_fence_wait(buf->fence))
    230          return FALSE;
    231 
    232       nouveau_fence_ref(NULL, &buf->fence);
    233    }
    234    nouveau_fence_ref(NULL, &buf->fence_wr);
    235 
    236    return TRUE;
    237 }
    238 
    239 static INLINE boolean
    240 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
    241 {
    242    if (rw == PIPE_TRANSFER_READ)
    243       return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
    244    else
    245       return (buf->fence && !nouveau_fence_signalled(buf->fence));
    246 }
    247 
    248 static void *
    249 nouveau_buffer_transfer_map(struct pipe_context *pipe,
    250                             struct pipe_transfer *transfer)
    251 {
    252    struct nouveau_context *nv = nouveau_context(pipe);
    253    struct nouveau_transfer *xfr = nouveau_transfer(transfer);
    254    struct nv04_resource *buf = nv04_resource(transfer->resource);
    255    struct nouveau_bo *bo = buf->bo;
    256    uint8_t *map;
    257    int ret;
    258    uint32_t offset = xfr->base.box.x;
    259    uint32_t flags = 0;
    260 
    261    if (buf->domain != NOUVEAU_BO_GART)
    262       return buf->data + offset;
    263 
    264    if (!buf->mm)
    265       flags = nouveau_screen_transfer_flags(xfr->base.usage);
    266 
    267    offset += buf->offset;
    268 
    269    ret = nouveau_bo_map(buf->bo, flags, nv->screen->client);
    270    if (ret)
    271       return NULL;
    272    map = (uint8_t *)bo->map + offset;
    273 
    274    if (buf->mm) {
    275       if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
    276          if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
    277             return NULL;
    278       } else
    279       if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
    280          nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
    281       }
    282    }
    283    return map;
    284 }
    285 
    286 
    287 
    288 static void
    289 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
    290                                      struct pipe_transfer *transfer,
    291                                      const struct pipe_box *box)
    292 {
    293 #if 0
    294    struct nv04_resource *res = nv04_resource(transfer->resource);
    295    struct nouveau_bo *bo = res->bo;
    296    unsigned offset = res->offset + transfer->box.x + box->x;
    297 
    298    /* not using non-snoop system memory yet, no need for cflush */
    299    if (1)
    300       return;
    301 
    302    /* XXX: maybe need to upload for VRAM buffers here */
    303 #endif
    304 }
    305 
    306 static void
    307 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
    308                               struct pipe_transfer *transfer)
    309 {
    310 }
    311 
    312 
    313 void *
    314 nouveau_resource_map_offset(struct nouveau_context *nv,
    315                             struct nv04_resource *res, uint32_t offset,
    316                             uint32_t flags)
    317 {
    318    if ((res->domain == NOUVEAU_BO_VRAM) &&
    319        (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
    320       nouveau_buffer_download(nv, res, 0, res->base.width0);
    321 
    322    if ((res->domain != NOUVEAU_BO_GART) ||
    323        (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
    324       return res->data + offset;
    325 
    326    if (res->mm) {
    327       unsigned rw;
    328       rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
    329       nouveau_buffer_sync(res, rw);
    330       if (nouveau_bo_map(res->bo, 0, NULL))
    331          return NULL;
    332    } else {
    333       if (nouveau_bo_map(res->bo, flags, nv->screen->client))
    334          return NULL;
    335    }
    336    return (uint8_t *)res->bo->map + res->offset + offset;
    337 }
    338 
    339 
    340 const struct u_resource_vtbl nouveau_buffer_vtbl =
    341 {
    342    u_default_resource_get_handle,     /* get_handle */
    343    nouveau_buffer_destroy,               /* resource_destroy */
    344    nouveau_buffer_transfer_get,          /* get_transfer */
    345    nouveau_buffer_transfer_destroy,      /* transfer_destroy */
    346    nouveau_buffer_transfer_map,          /* transfer_map */
    347    nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
    348    nouveau_buffer_transfer_unmap,        /* transfer_unmap */
    349    u_default_transfer_inline_write    /* transfer_inline_write */
    350 };
    351 
    352 struct pipe_resource *
    353 nouveau_buffer_create(struct pipe_screen *pscreen,
    354                       const struct pipe_resource *templ)
    355 {
    356    struct nouveau_screen *screen = nouveau_screen(pscreen);
    357    struct nv04_resource *buffer;
    358    boolean ret;
    359 
    360    buffer = CALLOC_STRUCT(nv04_resource);
    361    if (!buffer)
    362       return NULL;
    363 
    364    buffer->base = *templ;
    365    buffer->vtbl = &nouveau_buffer_vtbl;
    366    pipe_reference_init(&buffer->base.reference, 1);
    367    buffer->base.screen = pscreen;
    368 
    369    if (buffer->base.bind &
    370        (screen->vidmem_bindings & screen->sysmem_bindings)) {
    371       switch (buffer->base.usage) {
    372       case PIPE_USAGE_DEFAULT:
    373       case PIPE_USAGE_IMMUTABLE:
    374       case PIPE_USAGE_STATIC:
    375          buffer->domain = NOUVEAU_BO_VRAM;
    376          break;
    377       case PIPE_USAGE_DYNAMIC:
    378       case PIPE_USAGE_STAGING:
    379       case PIPE_USAGE_STREAM:
    380          buffer->domain = NOUVEAU_BO_GART;
    381          break;
    382       default:
    383          assert(0);
    384          break;
    385       }
    386    } else {
    387       if (buffer->base.bind & screen->vidmem_bindings)
    388          buffer->domain = NOUVEAU_BO_VRAM;
    389       else
    390       if (buffer->base.bind & screen->sysmem_bindings)
    391          buffer->domain = NOUVEAU_BO_GART;
    392    }
    393    ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
    394 
    395    if (ret == FALSE)
    396       goto fail;
    397 
    398    return &buffer->base;
    399 
    400 fail:
    401    FREE(buffer);
    402    return NULL;
    403 }
    404 
    405 
    406 struct pipe_resource *
    407 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
    408                            unsigned bytes, unsigned bind)
    409 {
    410    struct nv04_resource *buffer;
    411 
    412    buffer = CALLOC_STRUCT(nv04_resource);
    413    if (!buffer)
    414       return NULL;
    415 
    416    pipe_reference_init(&buffer->base.reference, 1);
    417    buffer->vtbl = &nouveau_buffer_vtbl;
    418    buffer->base.screen = pscreen;
    419    buffer->base.format = PIPE_FORMAT_R8_UNORM;
    420    buffer->base.usage = PIPE_USAGE_IMMUTABLE;
    421    buffer->base.bind = bind;
    422    buffer->base.width0 = bytes;
    423    buffer->base.height0 = 1;
    424    buffer->base.depth0 = 1;
    425 
    426    buffer->data = ptr;
    427    buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
    428 
    429    return &buffer->base;
    430 }
    431 
    432 /* Like download, but for GART buffers. Merge ? */
    433 static INLINE boolean
    434 nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
    435                           struct nouveau_bo *bo, unsigned offset, unsigned size)
    436 {
    437    if (!buf->data) {
    438       buf->data = MALLOC(size);
    439       if (!buf->data)
    440          return FALSE;
    441    }
    442    if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->screen->client))
    443       return FALSE;
    444    memcpy(buf->data, (uint8_t *)bo->map + offset, size);
    445 
    446    return TRUE;
    447 }
    448 
    449 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
    450 boolean
    451 nouveau_buffer_migrate(struct nouveau_context *nv,
    452                        struct nv04_resource *buf, const unsigned new_domain)
    453 {
    454    struct nouveau_screen *screen = nv->screen;
    455    struct nouveau_bo *bo;
    456    const unsigned old_domain = buf->domain;
    457    unsigned size = buf->base.width0;
    458    unsigned offset;
    459    int ret;
    460 
    461    assert(new_domain != old_domain);
    462 
    463    if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
    464       if (!nouveau_buffer_allocate(screen, buf, new_domain))
    465          return FALSE;
    466       ret = nouveau_bo_map(buf->bo, 0, nv->screen->client);
    467       if (ret)
    468          return ret;
    469       memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
    470       FREE(buf->data);
    471    } else
    472    if (old_domain != 0 && new_domain != 0) {
    473       struct nouveau_mm_allocation *mm = buf->mm;
    474 
    475       if (new_domain == NOUVEAU_BO_VRAM) {
    476          /* keep a system memory copy of our data in case we hit a fallback */
    477          if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
    478             return FALSE;
    479          if (nouveau_mesa_debug)
    480             debug_printf("migrating %u KiB to VRAM\n", size / 1024);
    481       }
    482 
    483       offset = buf->offset;
    484       bo = buf->bo;
    485       buf->bo = NULL;
    486       buf->mm = NULL;
    487       nouveau_buffer_allocate(screen, buf, new_domain);
    488 
    489       nv->copy_data(nv, buf->bo, buf->offset, new_domain,
    490                     bo, offset, old_domain, buf->base.width0);
    491 
    492       nouveau_bo_ref(NULL, &bo);
    493       if (mm)
    494          release_allocation(&mm, screen->fence.current);
    495    } else
    496    if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
    497       if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
    498          return FALSE;
    499       if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
    500          return FALSE;
    501    } else
    502       return FALSE;
    503 
    504    assert(buf->domain == new_domain);
    505    return TRUE;
    506 }
    507 
    508 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
    509  * We'd like to only allocate @size bytes here, but then we'd have to rebase
    510  * the vertex indices ...
    511  */
    512 boolean
    513 nouveau_user_buffer_upload(struct nouveau_context *nv,
    514                            struct nv04_resource *buf,
    515                            unsigned base, unsigned size)
    516 {
    517    struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
    518    int ret;
    519 
    520    assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
    521 
    522    buf->base.width0 = base + size;
    523    if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
    524       return FALSE;
    525 
    526    ret = nouveau_bo_map(buf->bo, 0, nv->screen->client);
    527    if (ret)
    528       return FALSE;
    529    memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
    530 
    531    return TRUE;
    532 }
    533 
    534 
    535 /* Scratch data allocation. */
    536 
    537 static INLINE int
    538 nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
    539                          unsigned size)
    540 {
    541    return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
    542                          4096, size, NULL, pbo);
    543 }
    544 
    545 void
    546 nouveau_scratch_runout_release(struct nouveau_context *nv)
    547 {
    548    if (!nv->scratch.nr_runout)
    549       return;
    550    do {
    551       --nv->scratch.nr_runout;
    552       nouveau_bo_ref(NULL, &nv->scratch.runout[nv->scratch.nr_runout]);
    553    } while (nv->scratch.nr_runout);
    554 
    555    FREE(nv->scratch.runout);
    556    nv->scratch.end = 0;
    557    nv->scratch.runout = NULL;
    558 }
    559 
    560 /* Allocate an extra bo if we can't fit everything we need simultaneously.
    561  * (Could happen for very large user arrays.)
    562  */
    563 static INLINE boolean
    564 nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
    565 {
    566    int ret;
    567    const unsigned n = nv->scratch.nr_runout++;
    568 
    569    nv->scratch.runout = REALLOC(nv->scratch.runout,
    570                                 (n + 0) * sizeof(*nv->scratch.runout),
    571                                 (n + 1) * sizeof(*nv->scratch.runout));
    572    nv->scratch.runout[n] = NULL;
    573 
    574    ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout[n], size);
    575    if (!ret) {
    576       ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL);
    577       if (ret)
    578          nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]);
    579    }
    580    if (!ret) {
    581       nv->scratch.current = nv->scratch.runout[n];
    582       nv->scratch.offset = 0;
    583       nv->scratch.end = size;
    584       nv->scratch.map = nv->scratch.current->map;
    585    }
    586    return !ret;
    587 }
    588 
    589 /* Continue to next scratch buffer, if available (no wrapping, large enough).
    590  * Allocate it if it has not yet been created.
    591  */
    592 static INLINE boolean
    593 nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
    594 {
    595    struct nouveau_bo *bo;
    596    int ret;
    597    const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
    598 
    599    if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
    600       return FALSE;
    601    nv->scratch.id = i;
    602 
    603    bo = nv->scratch.bo[i];
    604    if (!bo) {
    605       ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
    606       if (ret)
    607          return FALSE;
    608       nv->scratch.bo[i] = bo;
    609    }
    610    nv->scratch.current = bo;
    611    nv->scratch.offset = 0;
    612    nv->scratch.end = nv->scratch.bo_size;
    613 
    614    ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->screen->client);
    615    if (!ret)
    616       nv->scratch.map = bo->map;
    617    return !ret;
    618 }
    619 
    620 static boolean
    621 nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
    622 {
    623    boolean ret;
    624 
    625    ret = nouveau_scratch_next(nv, min_size);
    626    if (!ret)
    627       ret = nouveau_scratch_runout(nv, min_size);
    628    return ret;
    629 }
    630 
    631 
    632 /* Copy data to a scratch buffer and return address & bo the data resides in. */
    633 uint64_t
    634 nouveau_scratch_data(struct nouveau_context *nv,
    635                      const void *data, unsigned base, unsigned size,
    636                      struct nouveau_bo **bo)
    637 {
    638    unsigned bgn = MAX2(base, nv->scratch.offset);
    639    unsigned end = bgn + size;
    640 
    641    if (end >= nv->scratch.end) {
    642       end = base + size;
    643       if (!nouveau_scratch_more(nv, end))
    644          return 0;
    645       bgn = base;
    646    }
    647    nv->scratch.offset = align(end, 4);
    648 
    649    memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
    650 
    651    *bo = nv->scratch.current;
    652    return (*bo)->offset + (bgn - base);
    653 }
    654 
    655 void *
    656 nouveau_scratch_get(struct nouveau_context *nv,
    657                     unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
    658 {
    659    unsigned bgn = nv->scratch.offset;
    660    unsigned end = nv->scratch.offset + size;
    661 
    662    if (end >= nv->scratch.end) {
    663       end = size;
    664       if (!nouveau_scratch_more(nv, end))
    665          return NULL;
    666       bgn = 0;
    667    }
    668    nv->scratch.offset = align(end, 4);
    669 
    670    *pbo = nv->scratch.current;
    671    *gpu_addr = nv->scratch.current->offset + bgn;
    672    return nv->scratch.map + bgn;
    673 }
    674