Home | History | Annotate | Download | only in tnl
      1 /*
      2  * Copyright 2003 Tungsten Graphics, inc.
      3  * All Rights Reserved.
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining a
      6  * copy of this software and associated documentation files (the "Software"),
      7  * to deal in the Software without restriction, including without limitation
      8  * on the rights to use, copy, modify, merge, publish, distribute, sub
      9  * license, and/or sell copies of the Software, and to permit persons to whom
     10  * the Software is furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice (including the next
     13  * paragraph) shall be included in all copies or substantial portions of the
     14  * Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
     19  * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
     20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors:
     25  *    Keith Whitwell <keithw (at) tungstengraphics.com>
     26  */
     27 
     28 #include "main/glheader.h"
     29 #include "main/context.h"
     30 #include "main/colormac.h"
     31 #include "swrast/s_chan.h"
     32 #include "t_context.h"
     33 #include "t_vertex.h"
     34 
     35 #define DBG 0
     36 
     37 /* Build and manage clipspace/ndc/window vertices.
     38  */
     39 
     40 static GLboolean match_fastpath( struct tnl_clipspace *vtx,
     41 				 const struct tnl_clipspace_fastpath *fp)
     42 {
     43    GLuint j;
     44 
     45    if (vtx->attr_count != fp->attr_count)
     46       return GL_FALSE;
     47 
     48    for (j = 0; j < vtx->attr_count; j++)
     49       if (vtx->attr[j].format != fp->attr[j].format ||
     50 	  vtx->attr[j].inputsize != fp->attr[j].size ||
     51 	  vtx->attr[j].vertoffset != fp->attr[j].offset)
     52 	 return GL_FALSE;
     53 
     54    if (fp->match_strides) {
     55       if (vtx->vertex_size != fp->vertex_size)
     56 	 return GL_FALSE;
     57 
     58       for (j = 0; j < vtx->attr_count; j++)
     59 	 if (vtx->attr[j].inputstride != fp->attr[j].stride)
     60 	    return GL_FALSE;
     61    }
     62 
     63    return GL_TRUE;
     64 }
     65 
     66 static GLboolean search_fastpath_emit( struct tnl_clipspace *vtx )
     67 {
     68    struct tnl_clipspace_fastpath *fp = vtx->fastpath;
     69 
     70    for ( ; fp ; fp = fp->next) {
     71       if (match_fastpath(vtx, fp)) {
     72          vtx->emit = fp->func;
     73 	 return GL_TRUE;
     74       }
     75    }
     76 
     77    return GL_FALSE;
     78 }
     79 
     80 void _tnl_register_fastpath( struct tnl_clipspace *vtx,
     81 			     GLboolean match_strides )
     82 {
     83    struct tnl_clipspace_fastpath *fastpath = CALLOC_STRUCT(tnl_clipspace_fastpath);
     84    GLuint i;
     85 
     86    fastpath->vertex_size = vtx->vertex_size;
     87    fastpath->attr_count = vtx->attr_count;
     88    fastpath->match_strides = match_strides;
     89    fastpath->func = vtx->emit;
     90    fastpath->attr = (struct tnl_attr_type *)
     91       malloc(vtx->attr_count * sizeof(fastpath->attr[0]));
     92 
     93    for (i = 0; i < vtx->attr_count; i++) {
     94       fastpath->attr[i].format = vtx->attr[i].format;
     95       fastpath->attr[i].stride = vtx->attr[i].inputstride;
     96       fastpath->attr[i].size = vtx->attr[i].inputsize;
     97       fastpath->attr[i].offset = vtx->attr[i].vertoffset;
     98    }
     99 
    100    fastpath->next = vtx->fastpath;
    101    vtx->fastpath = fastpath;
    102 }
    103 
    104 
    105 
    106 /***********************************************************************
    107  * Build codegen functions or return generic ones:
    108  */
    109 static void choose_emit_func( struct gl_context *ctx, GLuint count, GLubyte *dest)
    110 {
    111    struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
    112    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    113    struct tnl_clipspace_attr *a = vtx->attr;
    114    const GLuint attr_count = vtx->attr_count;
    115    GLuint j;
    116 
    117    for (j = 0; j < attr_count; j++) {
    118       GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
    119       a[j].inputstride = vptr->stride;
    120       a[j].inputsize = vptr->size;
    121       a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */
    122    }
    123 
    124    vtx->emit = NULL;
    125 
    126    /* Does this match an existing (hardwired, codegen or known-bad)
    127     * fastpath?
    128     */
    129    if (search_fastpath_emit(vtx)) {
    130       /* Use this result.  If it is null, then it is already known
    131        * that the current state will fail for codegen and there is no
    132        * point trying again.
    133        */
    134    }
    135    else if (vtx->codegen_emit) {
    136       vtx->codegen_emit(ctx);
    137    }
    138 
    139    if (!vtx->emit) {
    140       _tnl_generate_hardwired_emit(ctx);
    141    }
    142 
    143    /* Otherwise use the generic version:
    144     */
    145    if (!vtx->emit)
    146       vtx->emit = _tnl_generic_emit;
    147 
    148    vtx->emit( ctx, count, dest );
    149 }
    150 
    151 
    152 
    153 static void choose_interp_func( struct gl_context *ctx,
    154 				GLfloat t,
    155 				GLuint edst, GLuint eout, GLuint ein,
    156 				GLboolean force_boundary )
    157 {
    158    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    159 
    160    if (vtx->need_extras &&
    161        (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
    162       vtx->interp = _tnl_generic_interp_extras;
    163    } else {
    164       vtx->interp = _tnl_generic_interp;
    165    }
    166 
    167    vtx->interp( ctx, t, edst, eout, ein, force_boundary );
    168 }
    169 
    170 
    171 static void choose_copy_pv_func(  struct gl_context *ctx, GLuint edst, GLuint esrc )
    172 {
    173    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    174 
    175    if (vtx->need_extras &&
    176        (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
    177       vtx->copy_pv = _tnl_generic_copy_pv_extras;
    178    } else {
    179       vtx->copy_pv = _tnl_generic_copy_pv;
    180    }
    181 
    182    vtx->copy_pv( ctx, edst, esrc );
    183 }
    184 
    185 
    186 /***********************************************************************
    187  * Public entrypoints, mostly dispatch to the above:
    188  */
    189 
    190 
    191 /* Interpolate between two vertices to produce a third:
    192  */
    193 void _tnl_interp( struct gl_context *ctx,
    194 		  GLfloat t,
    195 		  GLuint edst, GLuint eout, GLuint ein,
    196 		  GLboolean force_boundary )
    197 {
    198    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    199    vtx->interp( ctx, t, edst, eout, ein, force_boundary );
    200 }
    201 
    202 /* Copy colors from one vertex to another:
    203  */
    204 void _tnl_copy_pv(  struct gl_context *ctx, GLuint edst, GLuint esrc )
    205 {
    206    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    207    vtx->copy_pv( ctx, edst, esrc );
    208 }
    209 
    210 
    211 /* Extract a named attribute from a hardware vertex.  Will have to
    212  * reverse any viewport transformation, swizzling or other conversions
    213  * which may have been applied:
    214  */
    215 void _tnl_get_attr( struct gl_context *ctx, const void *vin,
    216 			      GLenum attr, GLfloat *dest )
    217 {
    218    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    219    const struct tnl_clipspace_attr *a = vtx->attr;
    220    const GLuint attr_count = vtx->attr_count;
    221    GLuint j;
    222 
    223    for (j = 0; j < attr_count; j++) {
    224       if (a[j].attrib == attr) {
    225 	 a[j].extract( &a[j], dest, (GLubyte *)vin + a[j].vertoffset );
    226 	 return;
    227       }
    228    }
    229 
    230    /* Else return the value from ctx->Current.
    231     */
    232    if (attr == _TNL_ATTRIB_POINTSIZE) {
    233       /* If the hardware vertex doesn't have point size then use size from
    234        * struct gl_context.  XXX this will be wrong if drawing attenuated points!
    235        */
    236       dest[0] = ctx->Point.Size;
    237    }
    238    else {
    239       memcpy( dest, ctx->Current.Attrib[attr], 4*sizeof(GLfloat));
    240    }
    241 }
    242 
    243 
    244 /* Complementary operation to the above.
    245  */
    246 void _tnl_set_attr( struct gl_context *ctx, void *vout,
    247 		    GLenum attr, const GLfloat *src )
    248 {
    249    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    250    const struct tnl_clipspace_attr *a = vtx->attr;
    251    const GLuint attr_count = vtx->attr_count;
    252    GLuint j;
    253 
    254    for (j = 0; j < attr_count; j++) {
    255       if (a[j].attrib == attr) {
    256 	 a[j].insert[4-1]( &a[j], (GLubyte *)vout + a[j].vertoffset, src );
    257 	 return;
    258       }
    259    }
    260 }
    261 
    262 
    263 void *_tnl_get_vertex( struct gl_context *ctx, GLuint nr )
    264 {
    265    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    266 
    267    return vtx->vertex_buf + nr * vtx->vertex_size;
    268 }
    269 
    270 void _tnl_invalidate_vertex_state( struct gl_context *ctx, GLuint new_state )
    271 {
    272    /* if two-sided lighting changes or filled/unfilled polygon state changes */
    273    if (new_state & (_NEW_LIGHT | _NEW_POLYGON) ) {
    274       struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    275       vtx->new_inputs = ~0;
    276       vtx->interp = choose_interp_func;
    277       vtx->copy_pv = choose_copy_pv_func;
    278    }
    279 }
    280 
    281 static void invalidate_funcs( struct tnl_clipspace *vtx )
    282 {
    283    vtx->emit = choose_emit_func;
    284    vtx->interp = choose_interp_func;
    285    vtx->copy_pv = choose_copy_pv_func;
    286    vtx->new_inputs = ~0;
    287 }
    288 
    289 GLuint _tnl_install_attrs( struct gl_context *ctx, const struct tnl_attr_map *map,
    290 			   GLuint nr, const GLfloat *vp,
    291 			   GLuint unpacked_size )
    292 {
    293    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    294    GLuint offset = 0;
    295    GLuint i, j;
    296 
    297    assert(nr < _TNL_ATTRIB_MAX);
    298    assert(nr == 0 || map[0].attrib == VERT_ATTRIB_POS);
    299 
    300    vtx->new_inputs = ~0;
    301    vtx->need_viewport = GL_FALSE;
    302 
    303    if (vp) {
    304       vtx->need_viewport = GL_TRUE;
    305    }
    306 
    307    for (j = 0, i = 0; i < nr; i++) {
    308       const GLuint format = map[i].format;
    309       if (format == EMIT_PAD) {
    310 	 if (DBG)
    311 	    printf("%d: pad %d, offset %d\n", i,
    312 		   map[i].offset, offset);
    313 
    314 	 offset += map[i].offset;
    315 
    316       }
    317       else {
    318 	 GLuint tmpoffset;
    319 
    320 	 if (unpacked_size)
    321 	    tmpoffset = map[i].offset;
    322 	 else
    323 	    tmpoffset = offset;
    324 
    325 	 if (vtx->attr_count != j ||
    326 	     vtx->attr[j].attrib != map[i].attrib ||
    327 	     vtx->attr[j].format != format ||
    328 	     vtx->attr[j].vertoffset != tmpoffset) {
    329 	    invalidate_funcs(vtx);
    330 
    331 	    vtx->attr[j].attrib = map[i].attrib;
    332 	    vtx->attr[j].format = format;
    333 	    vtx->attr[j].vp = vp;
    334 	    vtx->attr[j].insert = _tnl_format_info[format].insert;
    335 	    vtx->attr[j].extract = _tnl_format_info[format].extract;
    336 	    vtx->attr[j].vertattrsize = _tnl_format_info[format].attrsize;
    337 	    vtx->attr[j].vertoffset = tmpoffset;
    338 	 }
    339 
    340 
    341 	 if (DBG)
    342 	    printf("%d: %s, vp %p, offset %d\n", i,
    343 		   _tnl_format_info[format].name, (void *)vp,
    344 		   vtx->attr[j].vertoffset);
    345 
    346 	 offset += _tnl_format_info[format].attrsize;
    347 	 j++;
    348       }
    349    }
    350 
    351    vtx->attr_count = j;
    352 
    353    if (unpacked_size)
    354       vtx->vertex_size = unpacked_size;
    355    else
    356       vtx->vertex_size = offset;
    357 
    358    assert(vtx->vertex_size <= vtx->max_vertex_size);
    359    return vtx->vertex_size;
    360 }
    361 
    362 
    363 
    364 void _tnl_invalidate_vertices( struct gl_context *ctx, GLuint newinputs )
    365 {
    366    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    367    vtx->new_inputs |= newinputs;
    368 }
    369 
    370 
    371 /* This event has broader use beyond this file - will move elsewhere
    372  * and probably invoke a driver callback.
    373  */
    374 void _tnl_notify_pipeline_output_change( struct gl_context *ctx )
    375 {
    376    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    377    invalidate_funcs(vtx);
    378 }
    379 
    380 
    381 static void adjust_input_ptrs( struct gl_context *ctx, GLint diff)
    382 {
    383    struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
    384    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    385    struct tnl_clipspace_attr *a = vtx->attr;
    386    const GLuint count = vtx->attr_count;
    387    GLuint j;
    388 
    389    diff -= 1;
    390    for (j=0; j<count; ++j) {
    391            register GLvector4f *vptr = VB->AttribPtr[a->attrib];
    392 	   (a++)->inputptr += diff*vptr->stride;
    393    }
    394 }
    395 
    396 static void update_input_ptrs( struct gl_context *ctx, GLuint start )
    397 {
    398    struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
    399    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    400    struct tnl_clipspace_attr *a = vtx->attr;
    401    const GLuint count = vtx->attr_count;
    402    GLuint j;
    403 
    404    for (j = 0; j < count; j++) {
    405       GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
    406 
    407       if (vtx->emit != choose_emit_func) {
    408 	 assert(a[j].inputstride == vptr->stride);
    409 	 assert(a[j].inputsize == vptr->size);
    410       }
    411 
    412       a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride;
    413    }
    414 
    415    if (a->vp) {
    416       vtx->vp_scale[0] = a->vp[MAT_SX];
    417       vtx->vp_scale[1] = a->vp[MAT_SY];
    418       vtx->vp_scale[2] = a->vp[MAT_SZ];
    419       vtx->vp_scale[3] = 1.0;
    420       vtx->vp_xlate[0] = a->vp[MAT_TX];
    421       vtx->vp_xlate[1] = a->vp[MAT_TY];
    422       vtx->vp_xlate[2] = a->vp[MAT_TZ];
    423       vtx->vp_xlate[3] = 0.0;
    424    }
    425 }
    426 
    427 
    428 void _tnl_build_vertices( struct gl_context *ctx,
    429 			  GLuint start,
    430 			  GLuint end,
    431 			  GLuint newinputs )
    432 {
    433    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    434    update_input_ptrs( ctx, start );
    435    vtx->emit( ctx, end - start,
    436 	      (GLubyte *)(vtx->vertex_buf +
    437 			  start * vtx->vertex_size));
    438 }
    439 
    440 /* Emit VB vertices start..end to dest.  Note that VB vertex at
    441  * postion start will be emitted to dest at position zero.
    442  */
    443 void *_tnl_emit_vertices_to_buffer( struct gl_context *ctx,
    444 				    GLuint start,
    445 				    GLuint end,
    446 				    void *dest )
    447 {
    448    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    449 
    450    update_input_ptrs(ctx, start);
    451    /* Note: dest should not be adjusted for non-zero 'start' values:
    452     */
    453    vtx->emit( ctx, end - start, (GLubyte*) dest );
    454    return (void *)((GLubyte *)dest + vtx->vertex_size * (end - start));
    455 }
    456 
    457 /* Emit indexed VB vertices start..end to dest.  Note that VB vertex at
    458  * postion start will be emitted to dest at position zero.
    459  */
    460 
    461 void *_tnl_emit_indexed_vertices_to_buffer( struct gl_context *ctx,
    462 					    const GLuint *elts,
    463 					    GLuint start,
    464 					    GLuint end,
    465 					    void *dest )
    466 {
    467    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    468    GLuint oldIndex;
    469    GLubyte *cdest = dest;
    470 
    471    update_input_ptrs(ctx, oldIndex = elts[start++]);
    472    vtx->emit( ctx, 1, cdest );
    473    cdest += vtx->vertex_size;
    474 
    475    for (; start < end; ++start) {
    476       adjust_input_ptrs(ctx, elts[start] - oldIndex);
    477       oldIndex = elts[start];
    478       vtx->emit( ctx, 1, cdest);
    479       cdest += vtx->vertex_size;
    480    }
    481 
    482    return (void *) cdest;
    483 }
    484 
    485 
    486 void _tnl_init_vertices( struct gl_context *ctx,
    487 			GLuint vb_size,
    488 			GLuint max_vertex_size )
    489 {
    490    struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    491 
    492    _tnl_install_attrs( ctx, NULL, 0, NULL, 0 );
    493 
    494    vtx->need_extras = GL_TRUE;
    495    if (max_vertex_size > vtx->max_vertex_size) {
    496       _tnl_free_vertices( ctx );
    497       vtx->max_vertex_size = max_vertex_size;
    498       vtx->vertex_buf = (GLubyte *)_mesa_align_calloc(vb_size * max_vertex_size, 32 );
    499       invalidate_funcs(vtx);
    500    }
    501 
    502    switch(CHAN_TYPE) {
    503    case GL_UNSIGNED_BYTE:
    504       vtx->chan_scale[0] = 255.0;
    505       vtx->chan_scale[1] = 255.0;
    506       vtx->chan_scale[2] = 255.0;
    507       vtx->chan_scale[3] = 255.0;
    508       break;
    509    case GL_UNSIGNED_SHORT:
    510       vtx->chan_scale[0] = 65535.0;
    511       vtx->chan_scale[1] = 65535.0;
    512       vtx->chan_scale[2] = 65535.0;
    513       vtx->chan_scale[3] = 65535.0;
    514       break;
    515    default:
    516       vtx->chan_scale[0] = 1.0;
    517       vtx->chan_scale[1] = 1.0;
    518       vtx->chan_scale[2] = 1.0;
    519       vtx->chan_scale[3] = 1.0;
    520       break;
    521    }
    522 
    523    vtx->identity[0] = 0.0;
    524    vtx->identity[1] = 0.0;
    525    vtx->identity[2] = 0.0;
    526    vtx->identity[3] = 1.0;
    527 
    528    vtx->codegen_emit = NULL;
    529 
    530 #ifdef USE_SSE_ASM
    531    if (!_mesa_getenv("MESA_NO_CODEGEN"))
    532       vtx->codegen_emit = _tnl_generate_sse_emit;
    533 #endif
    534 }
    535 
    536 
    537 void _tnl_free_vertices( struct gl_context *ctx )
    538 {
    539    TNLcontext *tnl = TNL_CONTEXT(ctx);
    540    if (tnl) {
    541       struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
    542       struct tnl_clipspace_fastpath *fp, *tmp;
    543 
    544       if (vtx->vertex_buf) {
    545          _mesa_align_free(vtx->vertex_buf);
    546          vtx->vertex_buf = NULL;
    547       }
    548 
    549       for (fp = vtx->fastpath ; fp ; fp = tmp) {
    550          tmp = fp->next;
    551          FREE(fp->attr);
    552 
    553          /* KW: At the moment, fp->func is constrained to be allocated by
    554           * _mesa_exec_alloc(), as the hardwired fastpaths in
    555           * t_vertex_generic.c are handled specially.  It would be nice
    556           * to unify them, but this probably won't change until this
    557           * module gets another overhaul.
    558           */
    559          _mesa_exec_free((void *) fp->func);
    560          FREE(fp);
    561       }
    562 
    563       vtx->fastpath = NULL;
    564    }
    565 }
    566