Home | History | Annotate | Download | only in nvc0
      1 
      2 #include "pipe/p_context.h"
      3 #include "pipe/p_state.h"
      4 #include "util/u_inlines.h"
      5 #include "util/u_format.h"
      6 #include "translate/translate.h"
      7 
      8 #include "nvc0/nvc0_context.h"
      9 #include "nvc0/nvc0_resource.h"
     10 
     11 #include "nvc0/nvc0_3d.xml.h"
     12 
     13 struct push_context {
     14    struct nouveau_pushbuf *push;
     15 
     16    struct translate *translate;
     17    void *dest;
     18    const void *idxbuf;
     19 
     20    uint32_t vertex_size;
     21    uint32_t restart_index;
     22    uint32_t start_instance;
     23    uint32_t instance_id;
     24 
     25    bool prim_restart;
     26    bool need_vertex_id;
     27 
     28    struct {
     29       bool enabled;
     30       bool value;
     31       uint8_t width;
     32       unsigned stride;
     33       const uint8_t *data;
     34    } edgeflag;
     35 };
     36 
     37 static void nvc0_push_upload_vertex_ids(struct push_context *,
     38                                         struct nvc0_context *,
     39                                         const struct pipe_draw_info *);
     40 
     41 static void
     42 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
     43 {
     44    ctx->push = nvc0->base.pushbuf;
     45 
     46    ctx->translate = nvc0->vertex->translate;
     47    ctx->vertex_size = nvc0->vertex->size;
     48    ctx->instance_id = 0;
     49 
     50    ctx->need_vertex_id =
     51       nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
     52 
     53    ctx->edgeflag.value = true;
     54    ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
     55 
     56    /* silence warnings */
     57    ctx->edgeflag.data = NULL;
     58    ctx->edgeflag.stride = 0;
     59    ctx->edgeflag.width = 0;
     60 }
     61 
     62 static inline void
     63 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
     64 {
     65    struct translate *translate = nvc0->vertex->translate;
     66    unsigned i;
     67 
     68    for (i = 0; i < nvc0->num_vtxbufs; ++i) {
     69       const uint8_t *map;
     70       const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
     71 
     72       if (likely(!vb->buffer))
     73          map = (const uint8_t *)vb->user_buffer;
     74       else
     75          map = nouveau_resource_map_offset(&nvc0->base,
     76             nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
     77 
     78       if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
     79          map += (intptr_t)index_bias * vb->stride;
     80 
     81       translate->set_buffer(translate, i, map, vb->stride, ~0);
     82    }
     83 }
     84 
     85 static inline void
     86 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
     87 {
     88    if (nvc0->idxbuf.buffer) {
     89       struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
     90       ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
     91          buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
     92    } else {
     93       ctx->idxbuf = nvc0->idxbuf.user_buffer;
     94    }
     95 }
     96 
     97 static inline void
     98 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
     99                        int32_t index_bias)
    100 {
    101    unsigned attr = nvc0->vertprog->vp.edgeflag;
    102    struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
    103    struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
    104    struct nv04_resource *buf = nv04_resource(vb->buffer);
    105 
    106    ctx->edgeflag.stride = vb->stride;
    107    ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
    108    if (buf) {
    109       unsigned offset = vb->buffer_offset + ve->src_offset;
    110       ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
    111                            buf, offset, NOUVEAU_BO_RD);
    112    } else {
    113       ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
    114    }
    115 
    116    if (index_bias)
    117       ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
    118 }
    119 
    120 static inline unsigned
    121 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
    122 {
    123    unsigned i;
    124    for (i = 0; i < push && elts[i] != index; ++i);
    125    return i;
    126 }
    127 
    128 static inline unsigned
    129 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
    130 {
    131    unsigned i;
    132    for (i = 0; i < push && elts[i] != index; ++i);
    133    return i;
    134 }
    135 
    136 static inline unsigned
    137 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
    138 {
    139    unsigned i;
    140    for (i = 0; i < push && elts[i] != index; ++i);
    141    return i;
    142 }
    143 
    144 static inline bool
    145 ef_value_8(const struct push_context *ctx, uint32_t index)
    146 {
    147    uint8_t *pf = (uint8_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
    148    return !!*pf;
    149 }
    150 
    151 static inline bool
    152 ef_value_32(const struct push_context *ctx, uint32_t index)
    153 {
    154    uint32_t *pf = (uint32_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
    155    return !!*pf;
    156 }
    157 
    158 static inline bool
    159 ef_toggle(struct push_context *ctx)
    160 {
    161    ctx->edgeflag.value = !ctx->edgeflag.value;
    162    return ctx->edgeflag.value;
    163 }
    164 
    165 static inline unsigned
    166 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
    167 {
    168    unsigned i;
    169    bool ef = ctx->edgeflag.value;
    170    if (ctx->edgeflag.width == 1)
    171       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
    172    else
    173       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
    174    return i;
    175 }
    176 
    177 static inline unsigned
    178 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
    179 {
    180    unsigned i;
    181    bool ef = ctx->edgeflag.value;
    182    if (ctx->edgeflag.width == 1)
    183       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
    184    else
    185       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
    186    return i;
    187 }
    188 
    189 static inline unsigned
    190 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
    191 {
    192    unsigned i;
    193    bool ef = ctx->edgeflag.value;
    194    if (ctx->edgeflag.width == 1)
    195       for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
    196    else
    197       for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
    198    return i;
    199 }
    200 
    201 static inline unsigned
    202 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
    203 {
    204    unsigned i;
    205    bool ef = ctx->edgeflag.value;
    206    if (ctx->edgeflag.width == 1)
    207       for (i = 0; i < n && ef_value_8(ctx, start++) == ef; ++i);
    208    else
    209       for (i = 0; i < n && ef_value_32(ctx, start++) == ef; ++i);
    210    return i;
    211 }
    212 
    213 static inline void *
    214 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
    215 {
    216    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
    217    struct nouveau_bo *bo;
    218    uint64_t va;
    219    const unsigned size = count * nvc0->vertex->size;
    220 
    221    void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
    222 
    223    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
    224    PUSH_DATAh(push, va);
    225    PUSH_DATA (push, va);
    226    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
    227    PUSH_DATAh(push, va + size - 1);
    228    PUSH_DATA (push, va + size - 1);
    229 
    230    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
    231                 bo);
    232    nouveau_pushbuf_validate(push);
    233 
    234    return dest;
    235 }
    236 
    237 static void
    238 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
    239 {
    240    struct nouveau_pushbuf *push = ctx->push;
    241    struct translate *translate = ctx->translate;
    242    const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
    243    unsigned pos = 0;
    244 
    245    do {
    246       unsigned nR = count;
    247 
    248       if (unlikely(ctx->prim_restart))
    249          nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
    250 
    251       translate->run_elts8(translate, elts, nR,
    252                            ctx->start_instance, ctx->instance_id, ctx->dest);
    253       count -= nR;
    254       ctx->dest += nR * ctx->vertex_size;
    255 
    256       while (nR) {
    257          unsigned nE = nR;
    258 
    259          if (unlikely(ctx->edgeflag.enabled))
    260             nE = ef_toggle_search_i08(ctx, elts, nR);
    261 
    262          PUSH_SPACE(push, 4);
    263          if (likely(nE >= 2)) {
    264             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
    265             PUSH_DATA (push, pos);
    266             PUSH_DATA (push, nE);
    267          } else
    268          if (nE) {
    269             if (pos <= 0xff) {
    270                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
    271             } else {
    272                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    273                PUSH_DATA (push, pos);
    274             }
    275          }
    276          if (unlikely(nE != nR))
    277             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
    278 
    279          pos += nE;
    280          elts += nE;
    281          nR -= nE;
    282       }
    283       if (count) {
    284          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    285          PUSH_DATA (push, 0xffffffff);
    286          ++elts;
    287          ctx->dest += ctx->vertex_size;
    288          ++pos;
    289          --count;
    290       }
    291    } while (count);
    292 }
    293 
    294 static void
    295 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
    296 {
    297    struct nouveau_pushbuf *push = ctx->push;
    298    struct translate *translate = ctx->translate;
    299    const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
    300    unsigned pos = 0;
    301 
    302    do {
    303       unsigned nR = count;
    304 
    305       if (unlikely(ctx->prim_restart))
    306          nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
    307 
    308       translate->run_elts16(translate, elts, nR,
    309                             ctx->start_instance, ctx->instance_id, ctx->dest);
    310       count -= nR;
    311       ctx->dest += nR * ctx->vertex_size;
    312 
    313       while (nR) {
    314          unsigned nE = nR;
    315 
    316          if (unlikely(ctx->edgeflag.enabled))
    317             nE = ef_toggle_search_i16(ctx, elts, nR);
    318 
    319          PUSH_SPACE(push, 4);
    320          if (likely(nE >= 2)) {
    321             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
    322             PUSH_DATA (push, pos);
    323             PUSH_DATA (push, nE);
    324          } else
    325          if (nE) {
    326             if (pos <= 0xff) {
    327                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
    328             } else {
    329                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    330                PUSH_DATA (push, pos);
    331             }
    332          }
    333          if (unlikely(nE != nR))
    334             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
    335 
    336          pos += nE;
    337          elts += nE;
    338          nR -= nE;
    339       }
    340       if (count) {
    341          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    342          PUSH_DATA (push, 0xffffffff);
    343          ++elts;
    344          ctx->dest += ctx->vertex_size;
    345          ++pos;
    346          --count;
    347       }
    348    } while (count);
    349 }
    350 
    351 static void
    352 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
    353 {
    354    struct nouveau_pushbuf *push = ctx->push;
    355    struct translate *translate = ctx->translate;
    356    const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
    357    unsigned pos = 0;
    358 
    359    do {
    360       unsigned nR = count;
    361 
    362       if (unlikely(ctx->prim_restart))
    363          nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
    364 
    365       translate->run_elts(translate, elts, nR,
    366                           ctx->start_instance, ctx->instance_id, ctx->dest);
    367       count -= nR;
    368       ctx->dest += nR * ctx->vertex_size;
    369 
    370       while (nR) {
    371          unsigned nE = nR;
    372 
    373          if (unlikely(ctx->edgeflag.enabled))
    374             nE = ef_toggle_search_i32(ctx, elts, nR);
    375 
    376          PUSH_SPACE(push, 4);
    377          if (likely(nE >= 2)) {
    378             BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
    379             PUSH_DATA (push, pos);
    380             PUSH_DATA (push, nE);
    381          } else
    382          if (nE) {
    383             if (pos <= 0xff) {
    384                IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
    385             } else {
    386                BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    387                PUSH_DATA (push, pos);
    388             }
    389          }
    390          if (unlikely(nE != nR))
    391             IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
    392 
    393          pos += nE;
    394          elts += nE;
    395          nR -= nE;
    396       }
    397       if (count) {
    398          BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
    399          PUSH_DATA (push, 0xffffffff);
    400          ++elts;
    401          ctx->dest += ctx->vertex_size;
    402          ++pos;
    403          --count;
    404       }
    405    } while (count);
    406 }
    407 
    408 static void
    409 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
    410 {
    411    struct nouveau_pushbuf *push = ctx->push;
    412    struct translate *translate = ctx->translate;
    413    unsigned pos = 0;
    414 
    415    /* XXX: This will read the data corresponding to the primitive restart index,
    416     *  maybe we should avoid that ?
    417     */
    418    translate->run(translate, start, count,
    419                   ctx->start_instance, ctx->instance_id, ctx->dest);
    420    do {
    421       unsigned nr = count;
    422 
    423       if (unlikely(ctx->edgeflag.enabled))
    424          nr = ef_toggle_search_seq(ctx, start + pos, nr);
    425 
    426       PUSH_SPACE(push, 4);
    427       if (likely(nr)) {
    428          BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
    429          PUSH_DATA (push, pos);
    430          PUSH_DATA (push, nr);
    431       }
    432       if (unlikely(nr != count))
    433          IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
    434 
    435       pos += nr;
    436       count -= nr;
    437    } while (count);
    438 }
    439 
    440 
    441 #define NVC0_PRIM_GL_CASE(n) \
    442    case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
    443 
    444 static inline unsigned
    445 nvc0_prim_gl(unsigned prim)
    446 {
    447    switch (prim) {
    448    NVC0_PRIM_GL_CASE(POINTS);
    449    NVC0_PRIM_GL_CASE(LINES);
    450    NVC0_PRIM_GL_CASE(LINE_LOOP);
    451    NVC0_PRIM_GL_CASE(LINE_STRIP);
    452    NVC0_PRIM_GL_CASE(TRIANGLES);
    453    NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
    454    NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
    455    NVC0_PRIM_GL_CASE(QUADS);
    456    NVC0_PRIM_GL_CASE(QUAD_STRIP);
    457    NVC0_PRIM_GL_CASE(POLYGON);
    458    NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
    459    NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
    460    NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
    461    NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
    462    NVC0_PRIM_GL_CASE(PATCHES);
    463    default:
    464       return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
    465    }
    466 }
    467 
    468 void
    469 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
    470 {
    471    struct push_context ctx;
    472    unsigned i, index_size;
    473    unsigned inst_count = info->instance_count;
    474    unsigned vert_count = info->count;
    475    unsigned prim;
    476 
    477    nvc0_push_context_init(nvc0, &ctx);
    478 
    479    nvc0_vertex_configure_translate(nvc0, info->index_bias);
    480 
    481    if (nvc0->state.index_bias) {
    482       /* this is already taken care of by translate */
    483       IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
    484       nvc0->state.index_bias = 0;
    485    }
    486 
    487    if (unlikely(ctx.edgeflag.enabled))
    488       nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
    489 
    490    ctx.prim_restart = info->primitive_restart;
    491    ctx.restart_index = info->restart_index;
    492 
    493    if (info->primitive_restart) {
    494       /* NOTE: I hope we won't ever need that last index (~0).
    495        * If we do, we have to disable primitive restart here always and
    496        * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
    497        * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
    498        * and add manual restart to disp_vertices_seq.
    499        */
    500       BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
    501       PUSH_DATA (ctx.push, 1);
    502       PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
    503    } else
    504    if (nvc0->state.prim_restart) {
    505       IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
    506    }
    507    nvc0->state.prim_restart = info->primitive_restart;
    508 
    509    if (info->indexed) {
    510       nvc0_push_map_idxbuf(&ctx, nvc0);
    511       index_size = nvc0->idxbuf.index_size;
    512    } else {
    513       if (unlikely(info->count_from_stream_output)) {
    514          struct pipe_context *pipe = &nvc0->base.pipe;
    515          struct nvc0_so_target *targ;
    516          targ = nvc0_so_target(info->count_from_stream_output);
    517          pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
    518          vert_count /= targ->stride;
    519       }
    520       ctx.idxbuf = NULL; /* shut up warnings */
    521       index_size = 0;
    522    }
    523 
    524    ctx.start_instance = info->start_instance;
    525 
    526    prim = nvc0_prim_gl(info->mode);
    527    do {
    528       PUSH_SPACE(ctx.push, 9);
    529 
    530       ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
    531       if (unlikely(!ctx.dest))
    532          break;
    533 
    534       if (unlikely(ctx.need_vertex_id))
    535          nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
    536 
    537       if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
    538          IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
    539       BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
    540       PUSH_DATA (ctx.push, prim);
    541       switch (index_size) {
    542       case 1:
    543          disp_vertices_i08(&ctx, info->start, vert_count);
    544          break;
    545       case 2:
    546          disp_vertices_i16(&ctx, info->start, vert_count);
    547          break;
    548       case 4:
    549          disp_vertices_i32(&ctx, info->start, vert_count);
    550          break;
    551       default:
    552          assert(index_size == 0);
    553          disp_vertices_seq(&ctx, info->start, vert_count);
    554          break;
    555       }
    556       PUSH_SPACE(ctx.push, 1);
    557       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
    558 
    559       if (--inst_count) {
    560          prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
    561          ++ctx.instance_id;
    562       }
    563       nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
    564       nouveau_scratch_done(&nvc0->base);
    565    } while (inst_count);
    566 
    567 
    568    /* reset state and unmap buffers (no-op) */
    569 
    570    if (unlikely(!ctx.edgeflag.value)) {
    571       PUSH_SPACE(ctx.push, 1);
    572       IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
    573    }
    574 
    575    if (unlikely(ctx.need_vertex_id)) {
    576       PUSH_SPACE(ctx.push, 4);
    577       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
    578       BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
    579       PUSH_DATA (ctx.push,
    580                  NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
    581                  NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
    582                  NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
    583       IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
    584    }
    585 
    586    if (info->indexed)
    587       nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
    588    for (i = 0; i < nvc0->num_vtxbufs; ++i)
    589       nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
    590 
    591    NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
    592 }
    593 
    594 static inline void
    595 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
    596 {
    597    unsigned i;
    598    for (i = 0; i < n; ++i)
    599       dst[i] = elts[i] + bias;
    600 }
    601 
    602 static inline void
    603 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
    604 {
    605    unsigned i;
    606    for (i = 0; i < n; ++i)
    607       dst[i] = elts[i] + bias;
    608 }
    609 
    610 static inline void
    611 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
    612 {
    613    unsigned i;
    614    for (i = 0; i < n; ++i)
    615       dst[i] = elts[i] + bias;
    616 }
    617 
    618 static void
    619 nvc0_push_upload_vertex_ids(struct push_context *ctx,
    620                             struct nvc0_context *nvc0,
    621                             const struct pipe_draw_info *info)
    622 
    623 {
    624    struct nouveau_pushbuf *push = ctx->push;
    625    struct nouveau_bo *bo;
    626    uint64_t va;
    627    uint32_t *data;
    628    uint32_t format;
    629    unsigned index_size = nvc0->idxbuf.index_size;
    630    unsigned i;
    631    unsigned a = nvc0->vertex->num_elements;
    632 
    633    if (!index_size || info->index_bias)
    634       index_size = 4;
    635    data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
    636                                           info->count * index_size, &va, &bo);
    637 
    638    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
    639                 bo);
    640    nouveau_pushbuf_validate(push);
    641 
    642    if (info->indexed) {
    643       if (!info->index_bias) {
    644          memcpy(data, ctx->idxbuf, info->count * index_size);
    645       } else {
    646          switch (nvc0->idxbuf.index_size) {
    647          case 1:
    648             copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
    649             break;
    650          case 2:
    651             copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
    652             break;
    653          default:
    654             copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
    655             break;
    656          }
    657       }
    658    } else {
    659       for (i = 0; i < info->count; ++i)
    660          data[i] = i + (info->start + info->index_bias);
    661    }
    662 
    663    format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
    664       NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
    665 
    666    switch (index_size) {
    667    case 1:
    668       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
    669       break;
    670    case 2:
    671       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
    672       break;
    673    default:
    674       format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
    675       break;
    676    }
    677 
    678    PUSH_SPACE(push, 12);
    679 
    680    if (unlikely(nvc0->state.instance_elts & 2)) {
    681       nvc0->state.instance_elts &= ~2;
    682       IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
    683    }
    684 
    685    BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
    686    PUSH_DATA (push, format);
    687 
    688    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
    689    PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
    690    PUSH_DATAh(push, va);
    691    PUSH_DATA (push, va);
    692    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
    693    PUSH_DATAh(push, va + info->count * index_size - 1);
    694    PUSH_DATA (push, va + info->count * index_size - 1);
    695 
    696 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
    697    (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
    698 
    699    BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
    700    PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
    701 }
    702