Home | History | Annotate | Download | only in svga
      1 /**********************************************************
      2  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
      3  *
      4  * Permission is hereby granted, free of charge, to any person
      5  * obtaining a copy of this software and associated documentation
      6  * files (the "Software"), to deal in the Software without
      7  * restriction, including without limitation the rights to use, copy,
      8  * modify, merge, publish, distribute, sublicense, and/or sell copies
      9  * of the Software, and to permit persons to whom the Software is
     10  * furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be
     13  * included in all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     22  * SOFTWARE.
     23  *
     24  **********************************************************/
     25 
     26 #include "util/u_inlines.h"
     27 #include "pipe/p_defines.h"
     28 #include "util/u_math.h"
     29 #include "util/u_memory.h"
     30 #include "util/u_bitmask.h"
     31 #include "translate/translate.h"
     32 #include "tgsi/tgsi_ureg.h"
     33 
     34 #include "svga_context.h"
     35 #include "svga_state.h"
     36 #include "svga_cmd.h"
     37 #include "svga_shader.h"
     38 #include "svga_tgsi.h"
     39 
     40 #include "svga_hw_reg.h"
     41 
     42 
     43 /**
     44  * If we fail to compile a vertex shader we'll use a dummy/fallback shader
     45  * that simply emits a (0,0,0,1) vertex position.
     46  */
     47 static const struct tgsi_token *
     48 get_dummy_vertex_shader(void)
     49 {
     50    static const float zero[4] = { 0.0, 0.0, 0.0, 1.0 };
     51    struct ureg_program *ureg;
     52    const struct tgsi_token *tokens;
     53    struct ureg_src src;
     54    struct ureg_dst dst;
     55 
     56    ureg = ureg_create(PIPE_SHADER_VERTEX);
     57    if (!ureg)
     58       return NULL;
     59 
     60    dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
     61    src = ureg_DECL_immediate(ureg, zero, 4);
     62    ureg_MOV(ureg, dst, src);
     63    ureg_END(ureg);
     64 
     65    tokens = ureg_get_tokens(ureg, NULL);
     66 
     67    ureg_destroy(ureg);
     68 
     69    return tokens;
     70 }
     71 
     72 
     73 static struct svga_shader_variant *
     74 translate_vertex_program(struct svga_context *svga,
     75                          const struct svga_vertex_shader *vs,
     76                          const struct svga_compile_key *key)
     77 {
     78    if (svga_have_vgpu10(svga)) {
     79       return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
     80                                         PIPE_SHADER_VERTEX);
     81    }
     82    else {
     83       return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
     84                                        PIPE_SHADER_VERTEX);
     85    }
     86 }
     87 
     88 
     89 /**
     90  * Replace the given shader's instruction with a simple / dummy shader.
     91  * We use this when normal shader translation fails.
     92  */
     93 static struct svga_shader_variant *
     94 get_compiled_dummy_vertex_shader(struct svga_context *svga,
     95                                  struct svga_vertex_shader *vs,
     96                                  const struct svga_compile_key *key)
     97 {
     98    const struct tgsi_token *dummy = get_dummy_vertex_shader();
     99    struct svga_shader_variant *variant;
    100 
    101    if (!dummy) {
    102       return NULL;
    103    }
    104 
    105    FREE((void *) vs->base.tokens);
    106    vs->base.tokens = dummy;
    107 
    108    variant = translate_vertex_program(svga, vs, key);
    109    return variant;
    110 }
    111 
    112 
    113 /**
    114  * Translate TGSI shader into an svga shader variant.
    115  */
    116 static enum pipe_error
    117 compile_vs(struct svga_context *svga,
    118            struct svga_vertex_shader *vs,
    119            const struct svga_compile_key *key,
    120            struct svga_shader_variant **out_variant)
    121 {
    122    struct svga_shader_variant *variant;
    123    enum pipe_error ret = PIPE_ERROR;
    124 
    125    variant = translate_vertex_program(svga, vs, key);
    126    if (variant == NULL) {
    127       debug_printf("Failed to compile vertex shader,"
    128                    " using dummy shader instead.\n");
    129       variant = get_compiled_dummy_vertex_shader(svga, vs, key);
    130    }
    131    else if (svga_shader_too_large(svga, variant)) {
    132       /* too big, use dummy shader */
    133       debug_printf("Shader too large (%u bytes),"
    134                    " using dummy shader instead.\n",
    135                    (unsigned) (variant->nr_tokens
    136                                * sizeof(variant->tokens[0])));
    137       /* Free the too-large variant */
    138       svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
    139       /* Use simple pass-through shader instead */
    140       variant = get_compiled_dummy_vertex_shader(svga, vs, key);
    141    }
    142 
    143    if (!variant) {
    144       return PIPE_ERROR;
    145    }
    146 
    147    ret = svga_define_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
    148    if (ret != PIPE_OK) {
    149       svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
    150       return ret;
    151    }
    152 
    153    *out_variant = variant;
    154 
    155    return PIPE_OK;
    156 }
    157 
    158 
    159 /* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
    160  */
    161 static void
    162 make_vs_key(struct svga_context *svga, struct svga_compile_key *key)
    163 {
    164    const enum pipe_shader_type shader = PIPE_SHADER_VERTEX;
    165 
    166    memset(key, 0, sizeof *key);
    167 
    168    if (svga->state.sw.need_swtnl && svga_have_vgpu10(svga)) {
    169       /* Set both of these flags, to match compile_passthrough_vs() */
    170       key->vs.passthrough = 1;
    171       key->vs.undo_viewport = 1;
    172       return;
    173    }
    174 
    175    /* SVGA_NEW_PRESCALE */
    176    key->vs.need_prescale = svga->state.hw_clear.prescale.enabled &&
    177                            (svga->curr.gs == NULL);
    178 
    179    /* SVGA_NEW_RAST */
    180    key->vs.allow_psiz = svga->curr.rast->templ.point_size_per_vertex;
    181 
    182    /* SVGA_NEW_FS */
    183    key->vs.fs_generic_inputs = svga->curr.fs->generic_inputs;
    184 
    185    svga_remap_generics(key->vs.fs_generic_inputs, key->generic_remap_table);
    186 
    187    /* SVGA_NEW_VELEMENT */
    188    key->vs.adjust_attrib_range = svga->curr.velems->adjust_attrib_range;
    189    key->vs.adjust_attrib_w_1 = svga->curr.velems->adjust_attrib_w_1;
    190    key->vs.attrib_is_pure_int = svga->curr.velems->attrib_is_pure_int;
    191    key->vs.adjust_attrib_itof = svga->curr.velems->adjust_attrib_itof;
    192    key->vs.adjust_attrib_utof = svga->curr.velems->adjust_attrib_utof;
    193    key->vs.attrib_is_bgra = svga->curr.velems->attrib_is_bgra;
    194    key->vs.attrib_puint_to_snorm = svga->curr.velems->attrib_puint_to_snorm;
    195    key->vs.attrib_puint_to_uscaled = svga->curr.velems->attrib_puint_to_uscaled;
    196    key->vs.attrib_puint_to_sscaled = svga->curr.velems->attrib_puint_to_sscaled;
    197 
    198    /* SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER */
    199    svga_init_shader_key_common(svga, shader, key);
    200 
    201    /* SVGA_NEW_RAST */
    202    key->clip_plane_enable = svga->curr.rast->templ.clip_plane_enable;
    203 }
    204 
    205 
    206 /**
    207  * svga_reemit_vs_bindings - Reemit the vertex shader bindings
    208  */
    209 enum pipe_error
    210 svga_reemit_vs_bindings(struct svga_context *svga)
    211 {
    212    enum pipe_error ret;
    213    struct svga_winsys_gb_shader *gbshader = NULL;
    214    SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;
    215 
    216    assert(svga->rebind.flags.vs);
    217    assert(svga_have_gb_objects(svga));
    218 
    219    if (svga->state.hw_draw.vs) {
    220       gbshader = svga->state.hw_draw.vs->gb_shader;
    221       shaderId = svga->state.hw_draw.vs->id;
    222    }
    223 
    224    if (!svga_need_to_rebind_resources(svga)) {
    225       ret =  svga->swc->resource_rebind(svga->swc, NULL, gbshader,
    226                                         SVGA_RELOC_READ);
    227       goto out;
    228    }
    229 
    230    if (svga_have_vgpu10(svga))
    231       ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_VS,
    232                                     gbshader, shaderId);
    233    else
    234       ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_VS, gbshader);
    235 
    236  out:
    237    if (ret != PIPE_OK)
    238       return ret;
    239 
    240    svga->rebind.flags.vs = FALSE;
    241    return PIPE_OK;
    242 }
    243 
    244 
    245 /**
    246  * The current vertex shader is already executed by the 'draw'
    247  * module, so we just need to generate a simple vertex shader
    248  * to pass through all those VS outputs that will
    249  * be consumed by the fragment shader.
    250  * Used when we employ the 'draw' module.
    251  */
    252 static enum pipe_error
    253 compile_passthrough_vs(struct svga_context *svga,
    254                        struct svga_vertex_shader *vs,
    255                        struct svga_fragment_shader *fs,
    256                        struct svga_shader_variant **out_variant)
    257 {
    258    struct svga_shader_variant *variant = NULL;
    259    unsigned num_inputs;
    260    unsigned i;
    261    unsigned num_elements;
    262    struct svga_vertex_shader new_vs;
    263    struct ureg_src src[PIPE_MAX_SHADER_INPUTS];
    264    struct ureg_dst dst[PIPE_MAX_SHADER_OUTPUTS];
    265    struct ureg_program *ureg;
    266    struct svga_compile_key key;
    267    enum pipe_error ret;
    268 
    269    assert(svga_have_vgpu10(svga));
    270    assert(fs);
    271 
    272    num_inputs = fs->base.info.num_inputs;
    273 
    274    ureg = ureg_create(PIPE_SHADER_VERTEX);
    275    if (!ureg)
    276       return PIPE_ERROR_OUT_OF_MEMORY;
    277 
    278    /* draw will always add position */
    279    dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
    280    src[0] = ureg_DECL_vs_input(ureg, 0);
    281    num_elements = 1;
    282 
    283    /**
    284     * swtnl backend redefines the input layout based on the
    285     * fragment shader's inputs. So we only need to passthrough
    286     * those inputs that will be consumed by the fragment shader.
    287     * Note: DX10 requires the number of vertex elements
    288     * specified in the input layout to be no less than the
    289     * number of inputs to the vertex shader.
    290     */
    291    for (i = 0; i < num_inputs; i++) {
    292       switch (fs->base.info.input_semantic_name[i]) {
    293       case TGSI_SEMANTIC_COLOR:
    294       case TGSI_SEMANTIC_GENERIC:
    295       case TGSI_SEMANTIC_FOG:
    296          dst[num_elements] = ureg_DECL_output(ureg,
    297                                 fs->base.info.input_semantic_name[i],
    298                                 fs->base.info.input_semantic_index[i]);
    299          src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
    300          num_elements++;
    301          break;
    302       default:
    303          break;
    304       }
    305    }
    306 
    307    for (i = 0; i < num_elements; i++) {
    308       ureg_MOV(ureg, dst[i], src[i]);
    309    }
    310 
    311    ureg_END(ureg);
    312 
    313    memset(&new_vs, 0, sizeof(new_vs));
    314    new_vs.base.tokens = ureg_get_tokens(ureg, NULL);
    315    tgsi_scan_shader(new_vs.base.tokens, &new_vs.base.info);
    316 
    317    memset(&key, 0, sizeof(key));
    318    key.vs.undo_viewport = 1;
    319 
    320    ret = compile_vs(svga, &new_vs, &key, &variant);
    321    if (ret != PIPE_OK)
    322       return ret;
    323 
    324    ureg_free_tokens(new_vs.base.tokens);
    325    ureg_destroy(ureg);
    326 
    327    /* Overwrite the variant key to indicate it's a pass-through VS */
    328    memset(&variant->key, 0, sizeof(variant->key));
    329    variant->key.vs.passthrough = 1;
    330    variant->key.vs.undo_viewport = 1;
    331 
    332    *out_variant = variant;
    333 
    334    return PIPE_OK;
    335 }
    336 
    337 
    338 static enum pipe_error
    339 emit_hw_vs(struct svga_context *svga, unsigned dirty)
    340 {
    341    struct svga_shader_variant *variant;
    342    struct svga_vertex_shader *vs = svga->curr.vs;
    343    struct svga_fragment_shader *fs = svga->curr.fs;
    344    enum pipe_error ret = PIPE_OK;
    345    struct svga_compile_key key;
    346 
    347    SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
    348 
    349    /* If there is an active geometry shader, and it has stream output
    350     * defined, then we will skip the stream output from the vertex shader
    351     */
    352    if (!svga_have_gs_streamout(svga)) {
    353       /* No GS stream out */
    354       if (svga_have_vs_streamout(svga)) {
    355          /* Set VS stream out */
    356          ret = svga_set_stream_output(svga, vs->base.stream_output);
    357       }
    358       else {
    359          /* turn off stream out */
    360          ret = svga_set_stream_output(svga, NULL);
    361       }
    362       if (ret != PIPE_OK) {
    363          goto done;
    364       }
    365    }
    366 
    367    /* SVGA_NEW_NEED_SWTNL */
    368    if (svga->state.sw.need_swtnl && !svga_have_vgpu10(svga)) {
    369       /* No vertex shader is needed */
    370       variant = NULL;
    371    }
    372    else {
    373       make_vs_key(svga, &key);
    374 
    375       /* See if we already have a VS variant that matches the key */
    376       variant = svga_search_shader_key(&vs->base, &key);
    377 
    378       if (!variant) {
    379          /* Create VS variant now */
    380          if (key.vs.passthrough) {
    381             ret = compile_passthrough_vs(svga, vs, fs, &variant);
    382          }
    383          else {
    384             ret = compile_vs(svga, vs, &key, &variant);
    385          }
    386          if (ret != PIPE_OK)
    387             goto done;
    388 
    389          /* insert the new variant at head of linked list */
    390          assert(variant);
    391          variant->next = vs->base.variants;
    392          vs->base.variants = variant;
    393       }
    394    }
    395 
    396    if (variant != svga->state.hw_draw.vs) {
    397       /* Bind the new variant */
    398       if (variant) {
    399          ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
    400          if (ret != PIPE_OK)
    401             goto done;
    402          svga->rebind.flags.vs = FALSE;
    403       }
    404 
    405       svga->dirty |= SVGA_NEW_VS_VARIANT;
    406       svga->state.hw_draw.vs = variant;
    407    }
    408 
    409 done:
    410    SVGA_STATS_TIME_POP(svga_sws(svga));
    411    return ret;
    412 }
    413 
    414 struct svga_tracked_state svga_hw_vs =
    415 {
    416    "vertex shader (hwtnl)",
    417    (SVGA_NEW_VS |
    418     SVGA_NEW_FS |
    419     SVGA_NEW_TEXTURE_BINDING |
    420     SVGA_NEW_SAMPLER |
    421     SVGA_NEW_RAST |
    422     SVGA_NEW_PRESCALE |
    423     SVGA_NEW_VELEMENT |
    424     SVGA_NEW_NEED_SWTNL),
    425    emit_hw_vs
    426 };
    427