Home | History | Annotate | Download | only in compiler
      1 /*
      2  * Copyright  2010 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 /** @file brw_fs_visitor.cpp
     25  *
     26  * This file supports generating the FS LIR from the GLSL IR.  The LIR
     27  * makes it easier to do backend-specific optimizations than doing so
     28  * in the GLSL IR or in the native code.
     29  */
     30 #include "brw_fs.h"
     31 #include "compiler/glsl_types.h"
     32 
     33 using namespace brw;
     34 
     35 /* Sample from the MCS surface attached to this multisample texture. */
     36 fs_reg
     37 fs_visitor::emit_mcs_fetch(const fs_reg &coordinate, unsigned components,
     38                            const fs_reg &texture)
     39 {
     40    const fs_reg dest = vgrf(glsl_type::uvec4_type);
     41 
     42    fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
     43    srcs[TEX_LOGICAL_SRC_COORDINATE] = coordinate;
     44    srcs[TEX_LOGICAL_SRC_SURFACE] = texture;
     45    srcs[TEX_LOGICAL_SRC_SAMPLER] = texture;
     46    srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(components);
     47    srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
     48 
     49    fs_inst *inst = bld.emit(SHADER_OPCODE_TXF_MCS_LOGICAL, dest, srcs,
     50                             ARRAY_SIZE(srcs));
     51 
     52    /* We only care about one or two regs of response, but the sampler always
     53     * writes 4/8.
     54     */
     55    inst->size_written = 4 * dest.component_size(inst->exec_size);
     56 
     57    return dest;
     58 }
     59 
     60 /**
     61  * Apply workarounds for Gen6 gather with UINT/SINT
     62  */
     63 void
     64 fs_visitor::emit_gen6_gather_wa(uint8_t wa, fs_reg dst)
     65 {
     66    if (!wa)
     67       return;
     68 
     69    int width = (wa & WA_8BIT) ? 8 : 16;
     70 
     71    for (int i = 0; i < 4; i++) {
     72       fs_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
     73       /* Convert from UNORM to UINT */
     74       bld.MUL(dst_f, dst_f, brw_imm_f((1 << width) - 1));
     75       bld.MOV(dst, dst_f);
     76 
     77       if (wa & WA_SIGN) {
     78          /* Reinterpret the UINT value as a signed INT value by
     79           * shifting the sign bit into place, then shifting back
     80           * preserving sign.
     81           */
     82          bld.SHL(dst, dst, brw_imm_d(32 - width));
     83          bld.ASR(dst, dst, brw_imm_d(32 - width));
     84       }
     85 
     86       dst = offset(dst, bld, 1);
     87    }
     88 }
     89 
     90 /** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
     91 void
     92 fs_visitor::emit_dummy_fs()
     93 {
     94    int reg_width = dispatch_width / 8;
     95 
     96    /* Everyone's favorite color. */
     97    const float color[4] = { 1.0, 0.0, 1.0, 0.0 };
     98    for (int i = 0; i < 4; i++) {
     99       bld.MOV(fs_reg(MRF, 2 + i * reg_width, BRW_REGISTER_TYPE_F),
    100               brw_imm_f(color[i]));
    101    }
    102 
    103    fs_inst *write;
    104    write = bld.emit(FS_OPCODE_FB_WRITE);
    105    write->eot = true;
    106    if (devinfo->gen >= 6) {
    107       write->base_mrf = 2;
    108       write->mlen = 4 * reg_width;
    109    } else {
    110       write->header_size = 2;
    111       write->base_mrf = 0;
    112       write->mlen = 2 + 4 * reg_width;
    113    }
    114 
    115    /* Tell the SF we don't have any inputs.  Gen4-5 require at least one
    116     * varying to avoid GPU hangs, so set that.
    117     */
    118    struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(this->prog_data);
    119    wm_prog_data->num_varying_inputs = devinfo->gen < 6 ? 1 : 0;
    120    memset(wm_prog_data->urb_setup, -1,
    121           sizeof(wm_prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
    122 
    123    /* We don't have any uniforms. */
    124    stage_prog_data->nr_params = 0;
    125    stage_prog_data->nr_pull_params = 0;
    126    stage_prog_data->curb_read_length = 0;
    127    stage_prog_data->dispatch_grf_start_reg = 2;
    128    wm_prog_data->dispatch_grf_start_reg_2 = 2;
    129    grf_used = 1; /* Gen4-5 don't allow zero GRF blocks */
    130 
    131    calculate_cfg();
    132 }
    133 
    134 /* The register location here is relative to the start of the URB
    135  * data.  It will get adjusted to be a real location before
    136  * generate_code() time.
    137  */
    138 struct brw_reg
    139 fs_visitor::interp_reg(int location, int channel)
    140 {
    141    assert(stage == MESA_SHADER_FRAGMENT);
    142    struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
    143    int regnr = prog_data->urb_setup[location] * 2 + channel / 2;
    144    int stride = (channel & 1) * 4;
    145 
    146    assert(prog_data->urb_setup[location] != -1);
    147 
    148    return brw_vec1_grf(regnr, stride);
    149 }
    150 
    151 /** Emits the interpolation for the varying inputs. */
    152 void
    153 fs_visitor::emit_interpolation_setup_gen4()
    154 {
    155    struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
    156 
    157    fs_builder abld = bld.annotate("compute pixel centers");
    158    this->pixel_x = vgrf(glsl_type::uint_type);
    159    this->pixel_y = vgrf(glsl_type::uint_type);
    160    this->pixel_x.type = BRW_REGISTER_TYPE_UW;
    161    this->pixel_y.type = BRW_REGISTER_TYPE_UW;
    162    abld.ADD(this->pixel_x,
    163             fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
    164             fs_reg(brw_imm_v(0x10101010)));
    165    abld.ADD(this->pixel_y,
    166             fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
    167             fs_reg(brw_imm_v(0x11001100)));
    168 
    169    abld = bld.annotate("compute pixel deltas from v0");
    170 
    171    this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] =
    172       vgrf(glsl_type::vec2_type);
    173    const fs_reg &delta_xy = this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL];
    174    const fs_reg xstart(negate(brw_vec1_grf(1, 0)));
    175    const fs_reg ystart(negate(brw_vec1_grf(1, 1)));
    176 
    177    if (devinfo->has_pln && dispatch_width == 16) {
    178       for (unsigned i = 0; i < 2; i++) {
    179          abld.half(i).ADD(half(offset(delta_xy, abld, i), 0),
    180                           half(this->pixel_x, i), xstart);
    181          abld.half(i).ADD(half(offset(delta_xy, abld, i), 1),
    182                           half(this->pixel_y, i), ystart);
    183       }
    184    } else {
    185       abld.ADD(offset(delta_xy, abld, 0), this->pixel_x, xstart);
    186       abld.ADD(offset(delta_xy, abld, 1), this->pixel_y, ystart);
    187    }
    188 
    189    abld = bld.annotate("compute pos.w and 1/pos.w");
    190    /* Compute wpos.w.  It's always in our setup, since it's needed to
    191     * interpolate the other attributes.
    192     */
    193    this->wpos_w = vgrf(glsl_type::float_type);
    194    abld.emit(FS_OPCODE_LINTERP, wpos_w, delta_xy,
    195              interp_reg(VARYING_SLOT_POS, 3));
    196    /* Compute the pixel 1/W value from wpos.w. */
    197    this->pixel_w = vgrf(glsl_type::float_type);
    198    abld.emit(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
    199 }
    200 
    201 /** Emits the interpolation for the varying inputs. */
    202 void
    203 fs_visitor::emit_interpolation_setup_gen6()
    204 {
    205    struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
    206 
    207    fs_builder abld = bld.annotate("compute pixel centers");
    208    if (devinfo->gen >= 8 || dispatch_width == 8) {
    209       /* The "Register Region Restrictions" page says for BDW (and newer,
    210        * presumably):
    211        *
    212        *     "When destination spans two registers, the source may be one or
    213        *      two registers. The destination elements must be evenly split
    214        *      between the two registers."
    215        *
    216        * Thus we can do a single add(16) in SIMD8 or an add(32) in SIMD16 to
    217        * compute our pixel centers.
    218        */
    219       fs_reg int_pixel_xy(VGRF, alloc.allocate(dispatch_width / 8),
    220                           BRW_REGISTER_TYPE_UW);
    221 
    222       const fs_builder dbld = abld.exec_all().group(dispatch_width * 2, 0);
    223       dbld.ADD(int_pixel_xy,
    224                fs_reg(stride(suboffset(g1_uw, 4), 1, 4, 0)),
    225                fs_reg(brw_imm_v(0x11001010)));
    226 
    227       this->pixel_x = vgrf(glsl_type::float_type);
    228       this->pixel_y = vgrf(glsl_type::float_type);
    229       abld.emit(FS_OPCODE_PIXEL_X, this->pixel_x, int_pixel_xy);
    230       abld.emit(FS_OPCODE_PIXEL_Y, this->pixel_y, int_pixel_xy);
    231    } else {
    232       /* The "Register Region Restrictions" page says for SNB, IVB, HSW:
    233        *
    234        *     "When destination spans two registers, the source MUST span two
    235        *      registers."
    236        *
    237        * Since the GRF source of the ADD will only read a single register, we
    238        * must do two separate ADDs in SIMD16.
    239        */
    240       fs_reg int_pixel_x = vgrf(glsl_type::uint_type);
    241       fs_reg int_pixel_y = vgrf(glsl_type::uint_type);
    242       int_pixel_x.type = BRW_REGISTER_TYPE_UW;
    243       int_pixel_y.type = BRW_REGISTER_TYPE_UW;
    244       abld.ADD(int_pixel_x,
    245                fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
    246                fs_reg(brw_imm_v(0x10101010)));
    247       abld.ADD(int_pixel_y,
    248                fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
    249                fs_reg(brw_imm_v(0x11001100)));
    250 
    251       /* As of gen6, we can no longer mix float and int sources.  We have
    252        * to turn the integer pixel centers into floats for their actual
    253        * use.
    254        */
    255       this->pixel_x = vgrf(glsl_type::float_type);
    256       this->pixel_y = vgrf(glsl_type::float_type);
    257       abld.MOV(this->pixel_x, int_pixel_x);
    258       abld.MOV(this->pixel_y, int_pixel_y);
    259    }
    260 
    261    abld = bld.annotate("compute pos.w");
    262    this->pixel_w = fs_reg(brw_vec8_grf(payload.source_w_reg, 0));
    263    this->wpos_w = vgrf(glsl_type::float_type);
    264    abld.emit(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w);
    265 
    266    struct brw_wm_prog_data *wm_prog_data = brw_wm_prog_data(prog_data);
    267    uint32_t centroid_modes = wm_prog_data->barycentric_interp_modes &
    268       (1 << BRW_BARYCENTRIC_PERSPECTIVE_CENTROID |
    269        1 << BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID);
    270 
    271    for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
    272       uint8_t reg = payload.barycentric_coord_reg[i];
    273       this->delta_xy[i] = fs_reg(brw_vec16_grf(reg, 0));
    274 
    275       if (devinfo->needs_unlit_centroid_workaround &&
    276           (centroid_modes & (1 << i))) {
    277          /* Get the pixel/sample mask into f0 so that we know which
    278           * pixels are lit.  Then, for each channel that is unlit,
    279           * replace the centroid data with non-centroid data.
    280           */
    281          bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
    282 
    283          uint8_t pixel_reg = payload.barycentric_coord_reg[i - 1];
    284 
    285          set_predicate_inv(BRW_PREDICATE_NORMAL, true,
    286                            bld.half(0).MOV(brw_vec8_grf(reg, 0),
    287                                            brw_vec8_grf(pixel_reg, 0)));
    288          set_predicate_inv(BRW_PREDICATE_NORMAL, true,
    289                            bld.half(0).MOV(brw_vec8_grf(reg + 1, 0),
    290                                            brw_vec8_grf(pixel_reg + 1, 0)));
    291          if (dispatch_width == 16) {
    292             set_predicate_inv(BRW_PREDICATE_NORMAL, true,
    293                               bld.half(1).MOV(brw_vec8_grf(reg + 2, 0),
    294                                               brw_vec8_grf(pixel_reg + 2, 0)));
    295             set_predicate_inv(BRW_PREDICATE_NORMAL, true,
    296                               bld.half(1).MOV(brw_vec8_grf(reg + 3, 0),
    297                                               brw_vec8_grf(pixel_reg + 3, 0)));
    298          }
    299          assert(dispatch_width != 32); /* not implemented yet */
    300       }
    301    }
    302 }
    303 
    304 static enum brw_conditional_mod
    305 cond_for_alpha_func(GLenum func)
    306 {
    307    switch(func) {
    308       case GL_GREATER:
    309          return BRW_CONDITIONAL_G;
    310       case GL_GEQUAL:
    311          return BRW_CONDITIONAL_GE;
    312       case GL_LESS:
    313          return BRW_CONDITIONAL_L;
    314       case GL_LEQUAL:
    315          return BRW_CONDITIONAL_LE;
    316       case GL_EQUAL:
    317          return BRW_CONDITIONAL_EQ;
    318       case GL_NOTEQUAL:
    319          return BRW_CONDITIONAL_NEQ;
    320       default:
    321          unreachable("Not reached");
    322    }
    323 }
    324 
    325 /**
    326  * Alpha test support for when we compile it into the shader instead
    327  * of using the normal fixed-function alpha test.
    328  */
    329 void
    330 fs_visitor::emit_alpha_test()
    331 {
    332    assert(stage == MESA_SHADER_FRAGMENT);
    333    brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
    334    const fs_builder abld = bld.annotate("Alpha test");
    335 
    336    fs_inst *cmp;
    337    if (key->alpha_test_func == GL_ALWAYS)
    338       return;
    339 
    340    if (key->alpha_test_func == GL_NEVER) {
    341       /* f0.1 = 0 */
    342       fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
    343                                       BRW_REGISTER_TYPE_UW));
    344       cmp = abld.CMP(bld.null_reg_f(), some_reg, some_reg,
    345                      BRW_CONDITIONAL_NEQ);
    346    } else {
    347       /* RT0 alpha */
    348       fs_reg color = offset(outputs[0], bld, 3);
    349 
    350       /* f0.1 &= func(color, ref) */
    351       cmp = abld.CMP(bld.null_reg_f(), color, brw_imm_f(key->alpha_test_ref),
    352                      cond_for_alpha_func(key->alpha_test_func));
    353    }
    354    cmp->predicate = BRW_PREDICATE_NORMAL;
    355    cmp->flag_subreg = 1;
    356 }
    357 
    358 fs_inst *
    359 fs_visitor::emit_single_fb_write(const fs_builder &bld,
    360                                  fs_reg color0, fs_reg color1,
    361                                  fs_reg src0_alpha, unsigned components)
    362 {
    363    assert(stage == MESA_SHADER_FRAGMENT);
    364    struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
    365 
    366    /* Hand over gl_FragDepth or the payload depth. */
    367    const fs_reg dst_depth = (payload.dest_depth_reg ?
    368                              fs_reg(brw_vec8_grf(payload.dest_depth_reg, 0)) :
    369                              fs_reg());
    370    fs_reg src_depth, src_stencil;
    371 
    372    if (source_depth_to_render_target) {
    373       if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
    374          src_depth = frag_depth;
    375       else
    376          src_depth = fs_reg(brw_vec8_grf(payload.source_depth_reg, 0));
    377    }
    378 
    379    if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
    380       src_stencil = frag_stencil;
    381 
    382    const fs_reg sources[] = {
    383       color0, color1, src0_alpha, src_depth, dst_depth, src_stencil,
    384       (prog_data->uses_omask ? sample_mask : fs_reg()),
    385       brw_imm_ud(components)
    386    };
    387    assert(ARRAY_SIZE(sources) - 1 == FB_WRITE_LOGICAL_SRC_COMPONENTS);
    388    fs_inst *write = bld.emit(FS_OPCODE_FB_WRITE_LOGICAL, fs_reg(),
    389                              sources, ARRAY_SIZE(sources));
    390 
    391    if (prog_data->uses_kill) {
    392       write->predicate = BRW_PREDICATE_NORMAL;
    393       write->flag_subreg = 1;
    394    }
    395 
    396    return write;
    397 }
    398 
    399 void
    400 fs_visitor::emit_fb_writes()
    401 {
    402    assert(stage == MESA_SHADER_FRAGMENT);
    403    struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
    404    brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
    405 
    406    fs_inst *inst = NULL;
    407 
    408    if (source_depth_to_render_target && devinfo->gen == 6) {
    409       /* For outputting oDepth on gen6, SIMD8 writes have to be used.  This
    410        * would require SIMD8 moves of each half to message regs, e.g. by using
    411        * the SIMD lowering pass.  Unfortunately this is more difficult than it
    412        * sounds because the SIMD8 single-source message lacks channel selects
    413        * for the second and third subspans.
    414        */
    415       limit_dispatch_width(8, "Depth writes unsupported in SIMD16+ mode.\n");
    416    }
    417 
    418    if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL)) {
    419       /* From the 'Render Target Write message' section of the docs:
    420        * "Output Stencil is not supported with SIMD16 Render Target Write
    421        * Messages."
    422        */
    423       limit_dispatch_width(8, "gl_FragStencilRefARB unsupported "
    424                            "in SIMD16+ mode.\n");
    425    }
    426 
    427    for (int target = 0; target < key->nr_color_regions; target++) {
    428       /* Skip over outputs that weren't written. */
    429       if (this->outputs[target].file == BAD_FILE)
    430          continue;
    431 
    432       const fs_builder abld = bld.annotate(
    433          ralloc_asprintf(this->mem_ctx, "FB write target %d", target));
    434 
    435       fs_reg src0_alpha;
    436       if (devinfo->gen >= 6 && key->replicate_alpha && target != 0)
    437          src0_alpha = offset(outputs[0], bld, 3);
    438 
    439       inst = emit_single_fb_write(abld, this->outputs[target],
    440                                   this->dual_src_output, src0_alpha, 4);
    441       inst->target = target;
    442    }
    443 
    444    prog_data->dual_src_blend = (this->dual_src_output.file != BAD_FILE);
    445    assert(!prog_data->dual_src_blend || key->nr_color_regions == 1);
    446 
    447    if (inst == NULL) {
    448       /* Even if there's no color buffers enabled, we still need to send
    449        * alpha out the pipeline to our null renderbuffer to support
    450        * alpha-testing, alpha-to-coverage, and so on.
    451        */
    452       /* FINISHME: Factor out this frequently recurring pattern into a
    453        * helper function.
    454        */
    455       const fs_reg srcs[] = { reg_undef, reg_undef,
    456                               reg_undef, offset(this->outputs[0], bld, 3) };
    457       const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 4);
    458       bld.LOAD_PAYLOAD(tmp, srcs, 4, 0);
    459 
    460       inst = emit_single_fb_write(bld, tmp, reg_undef, reg_undef, 4);
    461       inst->target = 0;
    462    }
    463 
    464    inst->eot = true;
    465 }
    466 
    467 void
    468 fs_visitor::setup_uniform_clipplane_values()
    469 {
    470    const struct brw_vs_prog_key *key =
    471       (const struct brw_vs_prog_key *) this->key;
    472 
    473    if (key->nr_userclip_plane_consts == 0)
    474       return;
    475 
    476    assert(stage_prog_data->nr_params == uniforms);
    477    brw_stage_prog_data_add_params(stage_prog_data,
    478                                   key->nr_userclip_plane_consts * 4);
    479 
    480    for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
    481       this->userplane[i] = fs_reg(UNIFORM, uniforms);
    482       for (int j = 0; j < 4; ++j) {
    483          stage_prog_data->param[uniforms + j] =
    484             BRW_PARAM_BUILTIN_CLIP_PLANE(i, j);
    485       }
    486       uniforms += 4;
    487    }
    488 }
    489 
    490 /**
    491  * Lower legacy fixed-function and gl_ClipVertex clipping to clip distances.
    492  *
    493  * This does nothing if the shader uses gl_ClipDistance or user clipping is
    494  * disabled altogether.
    495  */
    496 void fs_visitor::compute_clip_distance()
    497 {
    498    struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
    499    const struct brw_vs_prog_key *key =
    500       (const struct brw_vs_prog_key *) this->key;
    501 
    502    /* Bail unless some sort of legacy clipping is enabled */
    503    if (key->nr_userclip_plane_consts == 0)
    504       return;
    505 
    506    /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
    507     *
    508     *     "If a linked set of shaders forming the vertex stage contains no
    509     *     static write to gl_ClipVertex or gl_ClipDistance, but the
    510     *     application has requested clipping against user clip planes through
    511     *     the API, then the coordinate written to gl_Position is used for
    512     *     comparison against the user clip planes."
    513     *
    514     * This function is only called if the shader didn't write to
    515     * gl_ClipDistance.  Accordingly, we use gl_ClipVertex to perform clipping
    516     * if the user wrote to it; otherwise we use gl_Position.
    517     */
    518 
    519    gl_varying_slot clip_vertex = VARYING_SLOT_CLIP_VERTEX;
    520    if (!(vue_prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX))
    521       clip_vertex = VARYING_SLOT_POS;
    522 
    523    /* If the clip vertex isn't written, skip this.  Typically this means
    524     * the GS will set up clipping. */
    525    if (outputs[clip_vertex].file == BAD_FILE)
    526       return;
    527 
    528    setup_uniform_clipplane_values();
    529 
    530    const fs_builder abld = bld.annotate("user clip distances");
    531 
    532    this->outputs[VARYING_SLOT_CLIP_DIST0] = vgrf(glsl_type::vec4_type);
    533    this->outputs[VARYING_SLOT_CLIP_DIST1] = vgrf(glsl_type::vec4_type);
    534 
    535    for (int i = 0; i < key->nr_userclip_plane_consts; i++) {
    536       fs_reg u = userplane[i];
    537       const fs_reg output = offset(outputs[VARYING_SLOT_CLIP_DIST0 + i / 4],
    538                                    bld, i & 3);
    539 
    540       abld.MUL(output, outputs[clip_vertex], u);
    541       for (int j = 1; j < 4; j++) {
    542          u.nr = userplane[i].nr + j;
    543          abld.MAD(output, output, offset(outputs[clip_vertex], bld, j), u);
    544       }
    545    }
    546 }
    547 
    548 void
    549 fs_visitor::emit_urb_writes(const fs_reg &gs_vertex_count)
    550 {
    551    int slot, urb_offset, length;
    552    int starting_urb_offset = 0;
    553    const struct brw_vue_prog_data *vue_prog_data =
    554       brw_vue_prog_data(this->prog_data);
    555    const struct brw_vs_prog_key *vs_key =
    556       (const struct brw_vs_prog_key *) this->key;
    557    const GLbitfield64 psiz_mask =
    558       VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT | VARYING_BIT_PSIZ;
    559    const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
    560    bool flush;
    561    fs_reg sources[8];
    562    fs_reg urb_handle;
    563 
    564    if (stage == MESA_SHADER_TESS_EVAL)
    565       urb_handle = fs_reg(retype(brw_vec8_grf(4, 0), BRW_REGISTER_TYPE_UD));
    566    else
    567       urb_handle = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
    568 
    569    opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
    570    int header_size = 1;
    571    fs_reg per_slot_offsets;
    572 
    573    if (stage == MESA_SHADER_GEOMETRY) {
    574       const struct brw_gs_prog_data *gs_prog_data =
    575          brw_gs_prog_data(this->prog_data);
    576 
    577       /* We need to increment the Global Offset to skip over the control data
    578        * header and the extra "Vertex Count" field (1 HWord) at the beginning
    579        * of the VUE.  We're counting in OWords, so the units are doubled.
    580        */
    581       starting_urb_offset = 2 * gs_prog_data->control_data_header_size_hwords;
    582       if (gs_prog_data->static_vertex_count == -1)
    583          starting_urb_offset += 2;
    584 
    585       /* We also need to use per-slot offsets.  The per-slot offset is the
    586        * Vertex Count.  SIMD8 mode processes 8 different primitives at a
    587        * time; each may output a different number of vertices.
    588        */
    589       opcode = SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT;
    590       header_size++;
    591 
    592       /* The URB offset is in 128-bit units, so we need to multiply by 2 */
    593       const int output_vertex_size_owords =
    594          gs_prog_data->output_vertex_size_hwords * 2;
    595 
    596       if (gs_vertex_count.file == IMM) {
    597          per_slot_offsets = brw_imm_ud(output_vertex_size_owords *
    598                                        gs_vertex_count.ud);
    599       } else {
    600          per_slot_offsets = vgrf(glsl_type::int_type);
    601          bld.MUL(per_slot_offsets, gs_vertex_count,
    602                  brw_imm_ud(output_vertex_size_owords));
    603       }
    604    }
    605 
    606    length = 0;
    607    urb_offset = starting_urb_offset;
    608    flush = false;
    609 
    610    /* SSO shaders can have VUE slots allocated which are never actually
    611     * written to, so ignore them when looking for the last (written) slot.
    612     */
    613    int last_slot = vue_map->num_slots - 1;
    614    while (last_slot > 0 &&
    615           (vue_map->slot_to_varying[last_slot] == BRW_VARYING_SLOT_PAD ||
    616            outputs[vue_map->slot_to_varying[last_slot]].file == BAD_FILE)) {
    617       last_slot--;
    618    }
    619 
    620    bool urb_written = false;
    621    for (slot = 0; slot < vue_map->num_slots; slot++) {
    622       int varying = vue_map->slot_to_varying[slot];
    623       switch (varying) {
    624       case VARYING_SLOT_PSIZ: {
    625          /* The point size varying slot is the vue header and is always in the
    626           * vue map.  But often none of the special varyings that live there
    627           * are written and in that case we can skip writing to the vue
    628           * header, provided the corresponding state properly clamps the
    629           * values further down the pipeline. */
    630          if ((vue_map->slots_valid & psiz_mask) == 0) {
    631             assert(length == 0);
    632             urb_offset++;
    633             break;
    634          }
    635 
    636          fs_reg zero(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
    637          bld.MOV(zero, brw_imm_ud(0u));
    638 
    639          sources[length++] = zero;
    640          if (vue_map->slots_valid & VARYING_BIT_LAYER)
    641             sources[length++] = this->outputs[VARYING_SLOT_LAYER];
    642          else
    643             sources[length++] = zero;
    644 
    645          if (vue_map->slots_valid & VARYING_BIT_VIEWPORT)
    646             sources[length++] = this->outputs[VARYING_SLOT_VIEWPORT];
    647          else
    648             sources[length++] = zero;
    649 
    650          if (vue_map->slots_valid & VARYING_BIT_PSIZ)
    651             sources[length++] = this->outputs[VARYING_SLOT_PSIZ];
    652          else
    653             sources[length++] = zero;
    654          break;
    655       }
    656       case BRW_VARYING_SLOT_NDC:
    657       case VARYING_SLOT_EDGE:
    658          unreachable("unexpected scalar vs output");
    659          break;
    660 
    661       default:
    662          /* gl_Position is always in the vue map, but isn't always written by
    663           * the shader.  Other varyings (clip distances) get added to the vue
    664           * map but don't always get written.  In those cases, the
    665           * corresponding this->output[] slot will be invalid we and can skip
    666           * the urb write for the varying.  If we've already queued up a vue
    667           * slot for writing we flush a mlen 5 urb write, otherwise we just
    668           * advance the urb_offset.
    669           */
    670          if (varying == BRW_VARYING_SLOT_PAD ||
    671              this->outputs[varying].file == BAD_FILE) {
    672             if (length > 0)
    673                flush = true;
    674             else
    675                urb_offset++;
    676             break;
    677          }
    678 
    679          if (stage == MESA_SHADER_VERTEX && vs_key->clamp_vertex_color &&
    680              (varying == VARYING_SLOT_COL0 ||
    681               varying == VARYING_SLOT_COL1 ||
    682               varying == VARYING_SLOT_BFC0 ||
    683               varying == VARYING_SLOT_BFC1)) {
    684             /* We need to clamp these guys, so do a saturating MOV into a
    685              * temp register and use that for the payload.
    686              */
    687             for (int i = 0; i < 4; i++) {
    688                fs_reg reg = fs_reg(VGRF, alloc.allocate(1), outputs[varying].type);
    689                fs_reg src = offset(this->outputs[varying], bld, i);
    690                set_saturate(true, bld.MOV(reg, src));
    691                sources[length++] = reg;
    692             }
    693          } else {
    694             for (unsigned i = 0; i < 4; i++)
    695                sources[length++] = offset(this->outputs[varying], bld, i);
    696          }
    697          break;
    698       }
    699 
    700       const fs_builder abld = bld.annotate("URB write");
    701 
    702       /* If we've queued up 8 registers of payload (2 VUE slots), if this is
    703        * the last slot or if we need to flush (see BAD_FILE varying case
    704        * above), emit a URB write send now to flush out the data.
    705        */
    706       if (length == 8 || (length > 0 && slot == last_slot))
    707          flush = true;
    708       if (flush) {
    709          fs_reg *payload_sources =
    710             ralloc_array(mem_ctx, fs_reg, length + header_size);
    711          fs_reg payload = fs_reg(VGRF, alloc.allocate(length + header_size),
    712                                  BRW_REGISTER_TYPE_F);
    713          payload_sources[0] = urb_handle;
    714 
    715          if (opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT)
    716             payload_sources[1] = per_slot_offsets;
    717 
    718          memcpy(&payload_sources[header_size], sources,
    719                 length * sizeof sources[0]);
    720 
    721          abld.LOAD_PAYLOAD(payload, payload_sources, length + header_size,
    722                            header_size);
    723 
    724          fs_inst *inst = abld.emit(opcode, reg_undef, payload);
    725          inst->eot = slot == last_slot && stage != MESA_SHADER_GEOMETRY;
    726          inst->mlen = length + header_size;
    727          inst->offset = urb_offset;
    728          urb_offset = starting_urb_offset + slot + 1;
    729          length = 0;
    730          flush = false;
    731          urb_written = true;
    732       }
    733    }
    734 
    735    /* If we don't have any valid slots to write, just do a minimal urb write
    736     * send to terminate the shader.  This includes 1 slot of undefined data,
    737     * because it's invalid to write 0 data:
    738     *
    739     * From the Broadwell PRM, Volume 7: 3D Media GPGPU, Shared Functions -
    740     * Unified Return Buffer (URB) > URB_SIMD8_Write and URB_SIMD8_Read >
    741     * Write Data Payload:
    742     *
    743     *    "The write data payload can be between 1 and 8 message phases long."
    744     */
    745    if (!urb_written) {
    746       /* For GS, just turn EmitVertex() into a no-op.  We don't want it to
    747        * end the thread, and emit_gs_thread_end() already emits a SEND with
    748        * EOT at the end of the program for us.
    749        */
    750       if (stage == MESA_SHADER_GEOMETRY)
    751          return;
    752 
    753       fs_reg payload = fs_reg(VGRF, alloc.allocate(2), BRW_REGISTER_TYPE_UD);
    754       bld.exec_all().MOV(payload, urb_handle);
    755 
    756       fs_inst *inst = bld.emit(SHADER_OPCODE_URB_WRITE_SIMD8, reg_undef, payload);
    757       inst->eot = true;
    758       inst->mlen = 2;
    759       inst->offset = 1;
    760       return;
    761    }
    762 }
    763 
    764 void
    765 fs_visitor::emit_cs_terminate()
    766 {
    767    assert(devinfo->gen >= 7);
    768 
    769    /* We are getting the thread ID from the compute shader header */
    770    assert(stage == MESA_SHADER_COMPUTE);
    771 
    772    /* We can't directly send from g0, since sends with EOT have to use
    773     * g112-127. So, copy it to a virtual register, The register allocator will
    774     * make sure it uses the appropriate register range.
    775     */
    776    struct brw_reg g0 = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD);
    777    fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
    778    bld.group(8, 0).exec_all().MOV(payload, g0);
    779 
    780    /* Send a message to the thread spawner to terminate the thread. */
    781    fs_inst *inst = bld.exec_all()
    782                       .emit(CS_OPCODE_CS_TERMINATE, reg_undef, payload);
    783    inst->eot = true;
    784 }
    785 
    786 void
    787 fs_visitor::emit_barrier()
    788 {
    789    assert(devinfo->gen >= 7);
    790    const uint32_t barrier_id_mask =
    791       devinfo->gen >= 9 ? 0x8f000000u : 0x0f000000u;
    792 
    793    /* We are getting the barrier ID from the compute shader header */
    794    assert(stage == MESA_SHADER_COMPUTE);
    795 
    796    fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
    797 
    798    /* Clear the message payload */
    799    bld.exec_all().group(8, 0).MOV(payload, brw_imm_ud(0u));
    800 
    801    /* Copy the barrier id from r0.2 to the message payload reg.2 */
    802    fs_reg r0_2 = fs_reg(retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD));
    803    bld.exec_all().group(1, 0).AND(component(payload, 2), r0_2,
    804                                   brw_imm_ud(barrier_id_mask));
    805 
    806    /* Emit a gateway "barrier" message using the payload we set up, followed
    807     * by a wait instruction.
    808     */
    809    bld.exec_all().emit(SHADER_OPCODE_BARRIER, reg_undef, payload);
    810 }
    811 
    812 fs_visitor::fs_visitor(const struct brw_compiler *compiler, void *log_data,
    813                        void *mem_ctx,
    814                        const void *key,
    815                        struct brw_stage_prog_data *prog_data,
    816                        struct gl_program *prog,
    817                        const nir_shader *shader,
    818                        unsigned dispatch_width,
    819                        int shader_time_index,
    820                        const struct brw_vue_map *input_vue_map)
    821    : backend_shader(compiler, log_data, mem_ctx, shader, prog_data),
    822      key(key), gs_compile(NULL), prog_data(prog_data), prog(prog),
    823      input_vue_map(input_vue_map),
    824      dispatch_width(dispatch_width),
    825      shader_time_index(shader_time_index),
    826      bld(fs_builder(this, dispatch_width).at_end())
    827 {
    828    init();
    829 }
    830 
    831 fs_visitor::fs_visitor(const struct brw_compiler *compiler, void *log_data,
    832                        void *mem_ctx,
    833                        struct brw_gs_compile *c,
    834                        struct brw_gs_prog_data *prog_data,
    835                        const nir_shader *shader,
    836                        int shader_time_index)
    837    : backend_shader(compiler, log_data, mem_ctx, shader,
    838                     &prog_data->base.base),
    839      key(&c->key), gs_compile(c),
    840      prog_data(&prog_data->base.base), prog(NULL),
    841      dispatch_width(8),
    842      shader_time_index(shader_time_index),
    843      bld(fs_builder(this, dispatch_width).at_end())
    844 {
    845    init();
    846 }
    847 
    848 
    849 void
    850 fs_visitor::init()
    851 {
    852    switch (stage) {
    853    case MESA_SHADER_FRAGMENT:
    854       key_tex = &((const brw_wm_prog_key *) key)->tex;
    855       break;
    856    case MESA_SHADER_VERTEX:
    857       key_tex = &((const brw_vs_prog_key *) key)->tex;
    858       break;
    859    case MESA_SHADER_TESS_CTRL:
    860       key_tex = &((const brw_tcs_prog_key *) key)->tex;
    861       break;
    862    case MESA_SHADER_TESS_EVAL:
    863       key_tex = &((const brw_tes_prog_key *) key)->tex;
    864       break;
    865    case MESA_SHADER_GEOMETRY:
    866       key_tex = &((const brw_gs_prog_key *) key)->tex;
    867       break;
    868    case MESA_SHADER_COMPUTE:
    869       key_tex = &((const brw_cs_prog_key*) key)->tex;
    870       break;
    871    default:
    872       unreachable("unhandled shader stage");
    873    }
    874 
    875    this->max_dispatch_width = 32;
    876    this->prog_data = this->stage_prog_data;
    877 
    878    this->failed = false;
    879 
    880    this->nir_locals = NULL;
    881    this->nir_ssa_values = NULL;
    882 
    883    memset(&this->payload, 0, sizeof(this->payload));
    884    this->source_depth_to_render_target = false;
    885    this->runtime_check_aads_emit = false;
    886    this->first_non_payload_grf = 0;
    887    this->max_grf = devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
    888 
    889    this->virtual_grf_start = NULL;
    890    this->virtual_grf_end = NULL;
    891    this->live_intervals = NULL;
    892    this->regs_live_at_ip = NULL;
    893 
    894    this->uniforms = 0;
    895    this->last_scratch = 0;
    896    this->pull_constant_loc = NULL;
    897    this->push_constant_loc = NULL;
    898 
    899    this->promoted_constants = 0,
    900 
    901    this->grf_used = 0;
    902    this->spilled_any_registers = false;
    903 }
    904 
    905 fs_visitor::~fs_visitor()
    906 {
    907 }
    908