Home | History | Annotate | Download | only in nv50
      1 /*
      2  * Copyright 2008 Ben Skeggs
      3  * Copyright 2010 Christoph Bumiller
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining a
      6  * copy of this software and associated documentation files (the "Software"),
      7  * to deal in the Software without restriction, including without limitation
      8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9  * and/or sell copies of the Software, and to permit persons to whom the
     10  * Software is furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be included in
     13  * all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21  * OTHER DEALINGS IN THE SOFTWARE.
     22  */
     23 
     24 #include "pipe/p_context.h"
     25 #include "pipe/p_defines.h"
     26 #include "pipe/p_state.h"
     27 #include "util/u_inlines.h"
     28 
     29 #include "nv50/nv50_context.h"
     30 #include "nv50/nv50_query_hw.h"
     31 
     32 #include "nv50/nv50_compute.xml.h"
     33 
     34 void
     35 nv50_constbufs_validate(struct nv50_context *nv50)
     36 {
     37    struct nouveau_pushbuf *push = nv50->base.pushbuf;
     38    unsigned s;
     39 
     40    for (s = 0; s < 3; ++s) {
     41       unsigned p;
     42 
     43       if (s == PIPE_SHADER_FRAGMENT)
     44          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
     45       else
     46       if (s == PIPE_SHADER_GEOMETRY)
     47          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
     48       else
     49          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
     50 
     51       while (nv50->constbuf_dirty[s]) {
     52          const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
     53 
     54          assert(i < NV50_MAX_PIPE_CONSTBUFS);
     55          nv50->constbuf_dirty[s] &= ~(1 << i);
     56 
     57          if (nv50->constbuf[s][i].user) {
     58             const unsigned b = NV50_CB_PVP + s;
     59             unsigned start = 0;
     60             unsigned words = nv50->constbuf[s][0].size / 4;
     61             if (i) {
     62                NOUVEAU_ERR("user constbufs only supported in slot 0\n");
     63                continue;
     64             }
     65             if (!nv50->state.uniform_buffer_bound[s]) {
     66                nv50->state.uniform_buffer_bound[s] = true;
     67                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
     68                PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
     69             }
     70             while (words) {
     71                unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
     72 
     73                PUSH_SPACE(push, nr + 3);
     74                BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
     75                PUSH_DATA (push, (start << 8) | b);
     76                BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
     77                PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
     78 
     79                start += nr;
     80                words -= nr;
     81             }
     82          } else {
     83             struct nv04_resource *res =
     84                nv04_resource(nv50->constbuf[s][i].u.buf);
     85             if (res) {
     86                /* TODO: allocate persistent bindings */
     87                const unsigned b = s * 16 + i;
     88 
     89                assert(nouveau_resource_mapped_by_gpu(&res->base));
     90 
     91                BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
     92                PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
     93                PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
     94                PUSH_DATA (push, (b << 16) |
     95                           (nv50->constbuf[s][i].size & 0xffff));
     96                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
     97                PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
     98 
     99                BCTX_REFN(nv50->bufctx_3d, 3D_CB(s, i), res, RD);
    100 
    101                nv50->cb_dirty = 1; /* Force cache flush for UBO. */
    102                res->cb_bindings[s] |= 1 << i;
    103             } else {
    104                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
    105                PUSH_DATA (push, (i << 8) | p | 0);
    106             }
    107             if (i == 0)
    108                nv50->state.uniform_buffer_bound[s] = false;
    109          }
    110       }
    111    }
    112 }
    113 
    114 static bool
    115 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
    116 {
    117    if (!prog->translated) {
    118       prog->translated = nv50_program_translate(
    119          prog, nv50->screen->base.device->chipset, &nv50->base.debug);
    120       if (!prog->translated)
    121          return false;
    122    } else
    123    if (prog->mem)
    124       return true;
    125 
    126    return nv50_program_upload_code(nv50, prog);
    127 }
    128 
    129 static inline void
    130 nv50_program_update_context_state(struct nv50_context *nv50,
    131                                   struct nv50_program *prog, int stage)
    132 {
    133    const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
    134 
    135    if (prog && prog->tls_space) {
    136       if (nv50->state.new_tls_space)
    137          nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
    138       if (!nv50->state.tls_required || nv50->state.new_tls_space)
    139          BCTX_REFN_bo(nv50->bufctx_3d, 3D_TLS, flags, nv50->screen->tls_bo);
    140       nv50->state.new_tls_space = false;
    141       nv50->state.tls_required |= 1 << stage;
    142    } else {
    143       if (nv50->state.tls_required == (1 << stage))
    144          nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
    145       nv50->state.tls_required &= ~(1 << stage);
    146    }
    147 }
    148 
    149 void
    150 nv50_vertprog_validate(struct nv50_context *nv50)
    151 {
    152    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    153    struct nv50_program *vp = nv50->vertprog;
    154 
    155    if (!nv50_program_validate(nv50, vp))
    156          return;
    157    nv50_program_update_context_state(nv50, vp, 0);
    158 
    159    BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
    160    PUSH_DATA (push, vp->vp.attrs[0]);
    161    PUSH_DATA (push, vp->vp.attrs[1]);
    162    BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
    163    PUSH_DATA (push, vp->max_out);
    164    BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
    165    PUSH_DATA (push, vp->max_gpr);
    166    BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
    167    PUSH_DATA (push, vp->code_base);
    168 }
    169 
    170 void
    171 nv50_fragprog_validate(struct nv50_context *nv50)
    172 {
    173    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    174    struct nv50_program *fp = nv50->fragprog;
    175    struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
    176 
    177    if (nv50->zsa && nv50->zsa->pipe.alpha.enabled) {
    178       struct pipe_framebuffer_state *fb = &nv50->framebuffer;
    179       bool blendable = fb->nr_cbufs == 0 || !fb->cbufs[0] ||
    180          nv50->screen->base.base.is_format_supported(
    181                &nv50->screen->base.base,
    182                fb->cbufs[0]->format,
    183                fb->cbufs[0]->texture->target,
    184                fb->cbufs[0]->texture->nr_samples,
    185                PIPE_BIND_BLENDABLE);
    186       /* If we already have alphatest code, we have to keep updating
    187        * it. However we only have to have different code if the current RT0 is
    188        * non-blendable. Otherwise we just set it to always pass and use the
    189        * hardware alpha test.
    190        */
    191       if (fp->fp.alphatest || !blendable) {
    192          uint8_t alphatest = PIPE_FUNC_ALWAYS + 1;
    193          if (!blendable)
    194             alphatest = nv50->zsa->pipe.alpha.func + 1;
    195          if (!fp->fp.alphatest)
    196             nv50_program_destroy(nv50, fp);
    197          else if (fp->mem && fp->fp.alphatest != alphatest)
    198             nouveau_heap_free(&fp->mem);
    199 
    200          fp->fp.alphatest = alphatest;
    201       }
    202    } else if (fp->fp.alphatest && fp->fp.alphatest != PIPE_FUNC_ALWAYS + 1) {
    203       /* Alpha test is disabled but we have a shader where it's filled
    204        * in. Make sure to reset the function to 'always', otherwise it'll end
    205        * up discarding fragments incorrectly.
    206        */
    207       if (fp->mem)
    208          nouveau_heap_free(&fp->mem);
    209 
    210       fp->fp.alphatest = PIPE_FUNC_ALWAYS + 1;
    211    }
    212 
    213    if (fp->fp.force_persample_interp != rast->force_persample_interp) {
    214       /* Force the program to be reuploaded, which will trigger interp fixups
    215        * to get applied
    216        */
    217       if (fp->mem)
    218          nouveau_heap_free(&fp->mem);
    219 
    220       fp->fp.force_persample_interp = rast->force_persample_interp;
    221    }
    222 
    223    if (fp->mem && !(nv50->dirty_3d & (NV50_NEW_3D_FRAGPROG | NV50_NEW_3D_MIN_SAMPLES)))
    224       return;
    225 
    226    if (!nv50_program_validate(nv50, fp))
    227       return;
    228    nv50_program_update_context_state(nv50, fp, 1);
    229 
    230    BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
    231    PUSH_DATA (push, fp->max_gpr);
    232    BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
    233    PUSH_DATA (push, fp->max_out);
    234    BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
    235    PUSH_DATA (push, fp->fp.flags[0]);
    236    BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
    237    PUSH_DATA (push, fp->fp.flags[1]);
    238    BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
    239    PUSH_DATA (push, fp->code_base);
    240 
    241    if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
    242       BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
    243       if (nv50->min_samples > 1 || fp->fp.has_samplemask)
    244          PUSH_DATA(push,
    245                    NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
    246                    (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
    247                     fp->fp.has_samplemask));
    248       else
    249          PUSH_DATA(push, 0);
    250    }
    251 }
    252 
    253 void
    254 nv50_gmtyprog_validate(struct nv50_context *nv50)
    255 {
    256    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    257    struct nv50_program *gp = nv50->gmtyprog;
    258 
    259    if (gp) {
    260       if (!nv50_program_validate(nv50, gp))
    261          return;
    262       BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
    263       PUSH_DATA (push, gp->max_gpr);
    264       BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
    265       PUSH_DATA (push, gp->max_out);
    266       BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
    267       PUSH_DATA (push, gp->gp.prim_type);
    268       BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
    269       PUSH_DATA (push, gp->gp.vert_count);
    270       BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
    271       PUSH_DATA (push, gp->code_base);
    272 
    273       nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
    274    }
    275    nv50_program_update_context_state(nv50, gp, 2);
    276 
    277    /* GP_ENABLE is updated in linkage validation */
    278 }
    279 
    280 void
    281 nv50_compprog_validate(struct nv50_context *nv50)
    282 {
    283    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    284    struct nv50_program *cp = nv50->compprog;
    285 
    286    if (cp && !nv50_program_validate(nv50, cp))
    287       return;
    288 
    289    BEGIN_NV04(push, NV50_CP(CODE_CB_FLUSH), 1);
    290    PUSH_DATA (push, 0);
    291 }
    292 
    293 static void
    294 nv50_sprite_coords_validate(struct nv50_context *nv50)
    295 {
    296    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    297    uint32_t pntc[8], mode;
    298    struct nv50_program *fp = nv50->fragprog;
    299    unsigned i, c;
    300    unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
    301 
    302    if (!nv50->rast->pipe.point_quad_rasterization) {
    303       if (nv50->state.point_sprite) {
    304          BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
    305          for (i = 0; i < 8; ++i)
    306             PUSH_DATA(push, 0);
    307 
    308          nv50->state.point_sprite = false;
    309       }
    310       return;
    311    } else {
    312       nv50->state.point_sprite = true;
    313    }
    314 
    315    memset(pntc, 0, sizeof(pntc));
    316 
    317    for (i = 0; i < fp->in_nr; i++) {
    318       unsigned n = util_bitcount(fp->in[i].mask);
    319 
    320       if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
    321          m += n;
    322          continue;
    323       }
    324       if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
    325          m += n;
    326          continue;
    327       }
    328 
    329       for (c = 0; c < 4; ++c) {
    330          if (fp->in[i].mask & (1 << c)) {
    331             pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
    332             ++m;
    333          }
    334       }
    335    }
    336 
    337    if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
    338       mode = 0x00;
    339    else
    340       mode = 0x10;
    341 
    342    BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
    343    PUSH_DATA (push, mode);
    344 
    345    BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
    346    PUSH_DATAp(push, pntc, 8);
    347 }
    348 
    349 /* Validate state derived from shaders and the rasterizer cso. */
    350 void
    351 nv50_validate_derived_rs(struct nv50_context *nv50)
    352 {
    353    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    354    uint32_t color, psize;
    355 
    356    nv50_sprite_coords_validate(nv50);
    357 
    358    if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
    359       nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
    360       BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
    361       PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
    362    }
    363 
    364    if (nv50->dirty_3d & NV50_NEW_3D_FRAGPROG)
    365       return;
    366    psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
    367    color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
    368 
    369    if (nv50->rast->pipe.clamp_vertex_color)
    370       color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
    371 
    372    if (color != nv50->state.semantic_color) {
    373       nv50->state.semantic_color = color;
    374       BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
    375       PUSH_DATA (push, color);
    376    }
    377 
    378    if (nv50->rast->pipe.point_size_per_vertex)
    379       psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
    380 
    381    if (psize != nv50->state.semantic_psize) {
    382       nv50->state.semantic_psize = psize;
    383       BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
    384       PUSH_DATA (push, psize);
    385    }
    386 }
    387 
    388 static int
    389 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
    390               struct nv50_varying *in, struct nv50_varying *out)
    391 {
    392    int c;
    393    uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
    394 
    395    for (c = 0; c < 4; ++c) {
    396       if (mf & 1) {
    397          if (in->linear)
    398             lin[mid / 32] |= 1 << (mid % 32);
    399          if (mv & 1)
    400             map[mid] = oid;
    401          else
    402          if (c == 3)
    403             map[mid] |= 1;
    404          ++mid;
    405       }
    406 
    407       oid += mv & 1;
    408       mf >>= 1;
    409       mv >>= 1;
    410    }
    411 
    412    return mid;
    413 }
    414 
    415 void
    416 nv50_fp_linkage_validate(struct nv50_context *nv50)
    417 {
    418    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    419    struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
    420    struct nv50_program *fp = nv50->fragprog;
    421    struct nv50_varying dummy;
    422    int i, n, c, m;
    423    uint32_t primid = 0;
    424    uint32_t layerid = 0;
    425    uint32_t viewportid = 0;
    426    uint32_t psiz = 0x000;
    427    uint32_t interp = fp->fp.interp;
    428    uint32_t colors = fp->fp.colors;
    429    uint32_t clpd_nr = util_last_bit(vp->vp.clip_enable | vp->vp.cull_enable);
    430    uint32_t lin[4];
    431    uint8_t map[64];
    432    uint8_t so_map[64];
    433 
    434    if (!(nv50->dirty_3d & (NV50_NEW_3D_VERTPROG |
    435                            NV50_NEW_3D_FRAGPROG |
    436                            NV50_NEW_3D_GMTYPROG))) {
    437       uint8_t bfc, ffc;
    438       ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
    439       bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
    440          >> 8;
    441       if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
    442          return;
    443    }
    444 
    445    memset(lin, 0x00, sizeof(lin));
    446 
    447    /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
    448     *  or is it the first byte ?
    449     */
    450    memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
    451 
    452    dummy.mask = 0xf; /* map all components of HPOS */
    453    dummy.linear = 0;
    454    m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
    455 
    456    for (c = 0; c < clpd_nr; ++c)
    457       map[m++] = vp->vp.clpd[c / 4] + (c % 4);
    458 
    459    colors |= m << 8; /* adjust BFC0 id */
    460 
    461    dummy.mask = 0x0;
    462 
    463    /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
    464    if (nv50->rast->pipe.light_twoside) {
    465       for (i = 0; i < 2; ++i) {
    466          n = vp->vp.bfc[i];
    467          if (fp->vp.bfc[i] >= fp->in_nr)
    468             continue;
    469          m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
    470                            (n < vp->out_nr) ? &vp->out[n] : &dummy);
    471       }
    472    }
    473    colors += m - 4; /* adjust FFC0 id */
    474    interp |= m << 8; /* set map id where 'normal' FP inputs start */
    475 
    476    for (i = 0; i < fp->in_nr; ++i) {
    477       for (n = 0; n < vp->out_nr; ++n)
    478          if (vp->out[n].sn == fp->in[i].sn &&
    479              vp->out[n].si == fp->in[i].si)
    480             break;
    481       switch (fp->in[i].sn) {
    482       case TGSI_SEMANTIC_PRIMID:
    483          primid = m;
    484          break;
    485       case TGSI_SEMANTIC_LAYER:
    486          layerid = m;
    487          break;
    488       case TGSI_SEMANTIC_VIEWPORT_INDEX:
    489          viewportid = m;
    490          break;
    491       }
    492       m = nv50_vec4_map(map, m, lin,
    493                         &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
    494    }
    495 
    496    if (vp->gp.has_layer && !layerid) {
    497       layerid = m;
    498       map[m++] = vp->gp.layerid;
    499    }
    500 
    501    if (vp->gp.has_viewport && !viewportid) {
    502       viewportid = m;
    503       map[m++] = vp->gp.viewportid;
    504    }
    505 
    506    if (nv50->rast->pipe.point_size_per_vertex) {
    507       psiz = (m << 4) | 1;
    508       map[m++] = vp->vp.psiz;
    509    }
    510 
    511    if (nv50->rast->pipe.clamp_vertex_color)
    512       colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
    513 
    514    if (unlikely(vp->so)) {
    515       /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
    516        * gets written.
    517        *
    518        * TODO:
    519        * Inverting vp->so->map (output -> offset) would probably speed this up.
    520        */
    521       memset(so_map, 0, sizeof(so_map));
    522       for (i = 0; i < vp->so->map_size; ++i) {
    523          if (vp->so->map[i] == 0xff)
    524             continue;
    525          for (c = 0; c < m; ++c)
    526             if (map[c] == vp->so->map[i] && !so_map[c])
    527                break;
    528          if (c == m) {
    529             c = m;
    530             map[m++] = vp->so->map[i];
    531          }
    532          so_map[c] = 0x80 | i;
    533       }
    534       for (c = m; c & 3; ++c)
    535          so_map[c] = 0;
    536    }
    537 
    538    n = (m + 3) / 4;
    539    assert(m <= 64);
    540 
    541    if (unlikely(nv50->gmtyprog)) {
    542       BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
    543       PUSH_DATA (push, m);
    544       BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
    545       PUSH_DATAp(push, map, n);
    546    } else {
    547       BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
    548       PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
    549 
    550       BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
    551       PUSH_DATA (push, primid);
    552 
    553       assert(m > 0);
    554       BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
    555       PUSH_DATA (push, m);
    556       BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
    557       PUSH_DATAp(push, map, n);
    558    }
    559 
    560    BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
    561    PUSH_DATA (push, vp->gp.has_viewport);
    562    PUSH_DATA (push, colors);
    563    PUSH_DATA (push, (clpd_nr << 8) | 4);
    564    PUSH_DATA (push, layerid);
    565    PUSH_DATA (push, psiz);
    566 
    567    BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
    568    PUSH_DATA (push, viewportid);
    569 
    570    BEGIN_NV04(push, NV50_3D(LAYER), 1);
    571    PUSH_DATA (push, vp->gp.has_layer << 16);
    572 
    573    BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
    574    PUSH_DATA (push, interp);
    575 
    576    nv50->state.interpolant_ctrl = interp;
    577 
    578    nv50->state.semantic_color = colors;
    579    nv50->state.semantic_psize = psiz;
    580 
    581    BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
    582    PUSH_DATAp(push, lin, 4);
    583 
    584    BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
    585    PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
    586 
    587    if (vp->so) {
    588       BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
    589       PUSH_DATAp(push, so_map, n);
    590    }
    591 }
    592 
    593 static int
    594 nv50_vp_gp_mapping(uint8_t *map, int m,
    595                    struct nv50_program *vp, struct nv50_program *gp)
    596 {
    597    int i, j, c;
    598 
    599    for (i = 0; i < gp->in_nr; ++i) {
    600       uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
    601 
    602       for (j = 0; j < vp->out_nr; ++j) {
    603          if (vp->out[j].sn == gp->in[i].sn &&
    604              vp->out[j].si == gp->in[i].si) {
    605             mv = vp->out[j].mask;
    606             oid = vp->out[j].hw;
    607             break;
    608          }
    609       }
    610 
    611       for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
    612          if (mg & mv & 1)
    613             map[m++] = oid;
    614          else
    615          if (mg & 1)
    616             map[m++] = (c == 3) ? 0x41 : 0x40;
    617          oid += mv & 1;
    618       }
    619    }
    620    if (!m)
    621       map[m++] = 0;
    622    return m;
    623 }
    624 
    625 void
    626 nv50_gp_linkage_validate(struct nv50_context *nv50)
    627 {
    628    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    629    struct nv50_program *vp = nv50->vertprog;
    630    struct nv50_program *gp = nv50->gmtyprog;
    631    int m = 0;
    632    int n;
    633    uint8_t map[64];
    634 
    635    if (!gp)
    636       return;
    637    memset(map, 0, sizeof(map));
    638 
    639    m = nv50_vp_gp_mapping(map, m, vp, gp);
    640 
    641    n = (m + 3) / 4;
    642 
    643    BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
    644    PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
    645 
    646    assert(m > 0);
    647    BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
    648    PUSH_DATA (push, m);
    649    BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
    650    PUSH_DATAp(push, map, n);
    651 }
    652 
    653 void
    654 nv50_stream_output_validate(struct nv50_context *nv50)
    655 {
    656    struct nouveau_pushbuf *push = nv50->base.pushbuf;
    657    struct nv50_stream_output_state *so;
    658    uint32_t ctrl;
    659    unsigned i;
    660    unsigned prims = ~0;
    661 
    662    so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
    663 
    664    BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
    665    PUSH_DATA (push, 0);
    666    if (!so || !nv50->num_so_targets) {
    667       if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
    668          BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
    669          PUSH_DATA (push, 0);
    670       }
    671       BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
    672       PUSH_DATA (push, 1);
    673       return;
    674    }
    675 
    676    /* previous TFB needs to complete */
    677    if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
    678       BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
    679       PUSH_DATA (push, 0);
    680    }
    681 
    682    ctrl = so->ctrl;
    683    if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
    684       ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
    685 
    686    BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
    687    PUSH_DATA (push, ctrl);
    688 
    689    for (i = 0; i < nv50->num_so_targets; ++i) {
    690       struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
    691       struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
    692 
    693       const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
    694 
    695       if (n == 4 && !targ->clean)
    696          nv84_hw_query_fifo_wait(push, nv50_query(targ->pq));
    697       BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
    698       PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
    699       PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
    700       PUSH_DATA (push, so->num_attribs[i]);
    701       if (n == 4) {
    702          PUSH_DATA(push, targ->pipe.buffer_size);
    703          if (!targ->clean) {
    704             assert(targ->pq);
    705             nv50_hw_query_pushbuf_submit(push, NVA0_3D_STRMOUT_OFFSET(i),
    706                                          nv50_query(targ->pq), 0x4);
    707          } else {
    708             BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
    709             PUSH_DATA(push, 0);
    710             targ->clean = false;
    711          }
    712       } else {
    713          const unsigned limit = targ->pipe.buffer_size /
    714             (so->stride[i] * nv50->state.prim_size);
    715          prims = MIN2(prims, limit);
    716       }
    717       targ->stride = so->stride[i];
    718       BCTX_REFN(nv50->bufctx_3d, 3D_SO, buf, WR);
    719    }
    720    if (prims != ~0) {
    721       BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
    722       PUSH_DATA (push, prims);
    723    }
    724    BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
    725    PUSH_DATA (push, 1);
    726    BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
    727    PUSH_DATA (push, 1);
    728 }
    729