1 /************************************************************************** 2 * 3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 /* 29 * Authors: 30 * Keith Whitwell <keith (at) tungstengraphics.com> 31 */ 32 33 34 #include "pipe/p_context.h" 35 #include "util/u_memory.h" 36 #include "util/u_math.h" 37 #include "util/u_cpu_detect.h" 38 #include "util/u_inlines.h" 39 #include "draw_context.h" 40 #include "draw_vs.h" 41 #include "draw_gs.h" 42 43 #if HAVE_LLVM 44 #include "gallivm/lp_bld_init.h" 45 #include "gallivm/lp_bld_limits.h" 46 #include "draw_llvm.h" 47 48 static boolean 49 draw_get_option_use_llvm(void) 50 { 51 static boolean first = TRUE; 52 static boolean value; 53 if (first) { 54 first = FALSE; 55 value = debug_get_bool_option("DRAW_USE_LLVM", TRUE); 56 57 #ifdef PIPE_ARCH_X86 58 util_cpu_detect(); 59 /* require SSE2 due to LLVM PR6960. */ 60 if (!util_cpu_caps.has_sse2) 61 value = FALSE; 62 #endif 63 } 64 return value; 65 } 66 #endif 67 68 69 /** 70 * Create new draw module context with gallivm state for LLVM JIT. 71 */ 72 static struct draw_context * 73 draw_create_context(struct pipe_context *pipe, boolean try_llvm) 74 { 75 struct draw_context *draw = CALLOC_STRUCT( draw_context ); 76 if (draw == NULL) 77 goto err_out; 78 79 #if HAVE_LLVM 80 if (try_llvm && draw_get_option_use_llvm()) { 81 draw->llvm = draw_llvm_create(draw); 82 if (!draw->llvm) 83 goto err_destroy; 84 } 85 #endif 86 87 draw->pipe = pipe; 88 89 if (!draw_init(draw)) 90 goto err_destroy; 91 92 return draw; 93 94 err_destroy: 95 draw_destroy( draw ); 96 err_out: 97 return NULL; 98 } 99 100 101 /** 102 * Create new draw module context, with LLVM JIT. 103 */ 104 struct draw_context * 105 draw_create(struct pipe_context *pipe) 106 { 107 return draw_create_context(pipe, TRUE); 108 } 109 110 111 /** 112 * Create a new draw context, without LLVM JIT. 113 */ 114 struct draw_context * 115 draw_create_no_llvm(struct pipe_context *pipe) 116 { 117 return draw_create_context(pipe, FALSE); 118 } 119 120 121 boolean draw_init(struct draw_context *draw) 122 { 123 /* 124 * Note that several functions compute the clipmask of the predefined 125 * formats with hardcoded formulas instead of using these. So modifications 126 * here must be reflected there too. 127 */ 128 129 ASSIGN_4V( draw->plane[0], -1, 0, 0, 1 ); 130 ASSIGN_4V( draw->plane[1], 1, 0, 0, 1 ); 131 ASSIGN_4V( draw->plane[2], 0, -1, 0, 1 ); 132 ASSIGN_4V( draw->plane[3], 0, 1, 0, 1 ); 133 ASSIGN_4V( draw->plane[4], 0, 0, 1, 1 ); /* yes these are correct */ 134 ASSIGN_4V( draw->plane[5], 0, 0, -1, 1 ); /* mesa's a bit wonky */ 135 draw->clip_xy = TRUE; 136 draw->clip_z = TRUE; 137 138 draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]); 139 140 if (!draw_pipeline_init( draw )) 141 return FALSE; 142 143 if (!draw_pt_init( draw )) 144 return FALSE; 145 146 if (!draw_vs_init( draw )) 147 return FALSE; 148 149 if (!draw_gs_init( draw )) 150 return FALSE; 151 152 draw->quads_always_flatshade_last = !draw->pipe->screen->get_param( 153 draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION); 154 155 return TRUE; 156 } 157 158 159 void draw_destroy( struct draw_context *draw ) 160 { 161 struct pipe_context *pipe; 162 int i, j; 163 164 if (!draw) 165 return; 166 167 pipe = draw->pipe; 168 169 /* free any rasterizer CSOs that we may have created. 170 */ 171 for (i = 0; i < 2; i++) { 172 for (j = 0; j < 2; j++) { 173 if (draw->rasterizer_no_cull[i][j]) { 174 pipe->delete_rasterizer_state(pipe, draw->rasterizer_no_cull[i][j]); 175 } 176 } 177 } 178 179 for (i = 0; i < draw->pt.nr_vertex_buffers; i++) { 180 pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL); 181 } 182 183 /* Not so fast -- we're just borrowing this at the moment. 184 * 185 if (draw->render) 186 draw->render->destroy( draw->render ); 187 */ 188 189 draw_pipeline_destroy( draw ); 190 draw_pt_destroy( draw ); 191 draw_vs_destroy( draw ); 192 draw_gs_destroy( draw ); 193 #ifdef HAVE_LLVM 194 if (draw->llvm) 195 draw_llvm_destroy( draw->llvm ); 196 #endif 197 198 FREE( draw ); 199 } 200 201 202 203 void draw_flush( struct draw_context *draw ) 204 { 205 draw_do_flush( draw, DRAW_FLUSH_BACKEND ); 206 } 207 208 209 /** 210 * Specify the Minimum Resolvable Depth factor for polygon offset. 211 * This factor potentially depends on the number of Z buffer bits, 212 * the rasterization algorithm and the arithmetic performed on Z 213 * values between vertex shading and rasterization. It will vary 214 * from one driver to another. 215 */ 216 void draw_set_mrd(struct draw_context *draw, double mrd) 217 { 218 draw->mrd = mrd; 219 } 220 221 222 static void update_clip_flags( struct draw_context *draw ) 223 { 224 draw->clip_xy = !draw->driver.bypass_clip_xy; 225 draw->guard_band_xy = (!draw->driver.bypass_clip_xy && 226 draw->driver.guard_band_xy); 227 draw->clip_z = (!draw->driver.bypass_clip_z && 228 draw->rasterizer && draw->rasterizer->depth_clip); 229 draw->clip_user = draw->rasterizer && 230 draw->rasterizer->clip_plane_enable != 0; 231 } 232 233 /** 234 * Register new primitive rasterization/rendering state. 235 * This causes the drawing pipeline to be rebuilt. 236 */ 237 void draw_set_rasterizer_state( struct draw_context *draw, 238 const struct pipe_rasterizer_state *raster, 239 void *rast_handle ) 240 { 241 if (!draw->suspend_flushing) { 242 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 243 244 draw->rasterizer = raster; 245 draw->rast_handle = rast_handle; 246 update_clip_flags(draw); 247 } 248 } 249 250 /* With a little more work, llvmpipe will be able to turn this off and 251 * do its own x/y clipping. 252 * 253 * Some hardware can turn off clipping altogether - in particular any 254 * hardware with a TNL unit can do its own clipping, even if it is 255 * relying on the draw module for some other reason. 256 */ 257 void draw_set_driver_clipping( struct draw_context *draw, 258 boolean bypass_clip_xy, 259 boolean bypass_clip_z, 260 boolean guard_band_xy) 261 { 262 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 263 264 draw->driver.bypass_clip_xy = bypass_clip_xy; 265 draw->driver.bypass_clip_z = bypass_clip_z; 266 draw->driver.guard_band_xy = guard_band_xy; 267 update_clip_flags(draw); 268 } 269 270 271 /** 272 * Plug in the primitive rendering/rasterization stage (which is the last 273 * stage in the drawing pipeline). 274 * This is provided by the device driver. 275 */ 276 void draw_set_rasterize_stage( struct draw_context *draw, 277 struct draw_stage *stage ) 278 { 279 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 280 281 draw->pipeline.rasterize = stage; 282 } 283 284 285 /** 286 * Set the draw module's clipping state. 287 */ 288 void draw_set_clip_state( struct draw_context *draw, 289 const struct pipe_clip_state *clip ) 290 { 291 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 292 293 memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp)); 294 } 295 296 297 /** 298 * Set the draw module's viewport state. 299 */ 300 void draw_set_viewport_state( struct draw_context *draw, 301 const struct pipe_viewport_state *viewport ) 302 { 303 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 304 draw->viewport = *viewport; /* struct copy */ 305 draw->identity_viewport = (viewport->scale[0] == 1.0f && 306 viewport->scale[1] == 1.0f && 307 viewport->scale[2] == 1.0f && 308 viewport->scale[3] == 1.0f && 309 viewport->translate[0] == 0.0f && 310 viewport->translate[1] == 0.0f && 311 viewport->translate[2] == 0.0f && 312 viewport->translate[3] == 0.0f); 313 314 draw_vs_set_viewport( draw, viewport ); 315 } 316 317 318 319 void 320 draw_set_vertex_buffers(struct draw_context *draw, 321 unsigned count, 322 const struct pipe_vertex_buffer *buffers) 323 { 324 assert(count <= PIPE_MAX_ATTRIBS); 325 326 util_copy_vertex_buffers(draw->pt.vertex_buffer, 327 &draw->pt.nr_vertex_buffers, 328 buffers, count); 329 } 330 331 332 void 333 draw_set_vertex_elements(struct draw_context *draw, 334 unsigned count, 335 const struct pipe_vertex_element *elements) 336 { 337 assert(count <= PIPE_MAX_ATTRIBS); 338 339 /* We could improve this by only flushing the frontend and the fetch part 340 * of the middle. This would avoid recalculating the emit keys.*/ 341 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 342 343 memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0])); 344 draw->pt.nr_vertex_elements = count; 345 } 346 347 348 /** 349 * Tell drawing context where to find mapped vertex buffers. 350 */ 351 void 352 draw_set_mapped_vertex_buffer(struct draw_context *draw, 353 unsigned attr, const void *buffer) 354 { 355 draw->pt.user.vbuffer[attr] = buffer; 356 } 357 358 359 void 360 draw_set_mapped_constant_buffer(struct draw_context *draw, 361 unsigned shader_type, 362 unsigned slot, 363 const void *buffer, 364 unsigned size ) 365 { 366 debug_assert(shader_type == PIPE_SHADER_VERTEX || 367 shader_type == PIPE_SHADER_GEOMETRY); 368 debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS); 369 370 switch (shader_type) { 371 case PIPE_SHADER_VERTEX: 372 draw->pt.user.vs_constants[slot] = buffer; 373 draw->pt.user.vs_constants_size[slot] = size; 374 draw_vs_set_constants(draw, slot, buffer, size); 375 break; 376 case PIPE_SHADER_GEOMETRY: 377 draw->pt.user.gs_constants[slot] = buffer; 378 draw->pt.user.gs_constants_size[slot] = size; 379 draw_gs_set_constants(draw, slot, buffer, size); 380 break; 381 default: 382 assert(0 && "invalid shader type in draw_set_mapped_constant_buffer"); 383 } 384 } 385 386 387 /** 388 * Tells the draw module to draw points with triangles if their size 389 * is greater than this threshold. 390 */ 391 void 392 draw_wide_point_threshold(struct draw_context *draw, float threshold) 393 { 394 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 395 draw->pipeline.wide_point_threshold = threshold; 396 } 397 398 399 /** 400 * Should the draw module handle point->quad conversion for drawing sprites? 401 */ 402 void 403 draw_wide_point_sprites(struct draw_context *draw, boolean draw_sprite) 404 { 405 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 406 draw->pipeline.wide_point_sprites = draw_sprite; 407 } 408 409 410 /** 411 * Tells the draw module to draw lines with triangles if their width 412 * is greater than this threshold. 413 */ 414 void 415 draw_wide_line_threshold(struct draw_context *draw, float threshold) 416 { 417 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 418 draw->pipeline.wide_line_threshold = roundf(threshold); 419 } 420 421 422 /** 423 * Tells the draw module whether or not to implement line stipple. 424 */ 425 void 426 draw_enable_line_stipple(struct draw_context *draw, boolean enable) 427 { 428 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 429 draw->pipeline.line_stipple = enable; 430 } 431 432 433 /** 434 * Tells draw module whether to convert points to quads for sprite mode. 435 */ 436 void 437 draw_enable_point_sprites(struct draw_context *draw, boolean enable) 438 { 439 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 440 draw->pipeline.point_sprite = enable; 441 } 442 443 444 void 445 draw_set_force_passthrough( struct draw_context *draw, boolean enable ) 446 { 447 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE ); 448 draw->force_passthrough = enable; 449 } 450 451 452 453 /** 454 * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't 455 * exist already. 456 * 457 * This is used by some of the optional draw module stages such 458 * as wide_point which may need to allocate additional generic/texcoord 459 * attributes. 460 */ 461 int 462 draw_alloc_extra_vertex_attrib(struct draw_context *draw, 463 uint semantic_name, uint semantic_index) 464 { 465 int slot; 466 uint num_outputs; 467 uint n; 468 469 slot = draw_find_shader_output(draw, semantic_name, semantic_index); 470 if (slot > 0) { 471 return slot; 472 } 473 474 num_outputs = draw_current_shader_outputs(draw); 475 n = draw->extra_shader_outputs.num; 476 477 assert(n < Elements(draw->extra_shader_outputs.semantic_name)); 478 479 draw->extra_shader_outputs.semantic_name[n] = semantic_name; 480 draw->extra_shader_outputs.semantic_index[n] = semantic_index; 481 draw->extra_shader_outputs.slot[n] = num_outputs + n; 482 draw->extra_shader_outputs.num++; 483 484 return draw->extra_shader_outputs.slot[n]; 485 } 486 487 488 /** 489 * Remove all extra vertex attributes that were allocated with 490 * draw_alloc_extra_vertex_attrib(). 491 */ 492 void 493 draw_remove_extra_vertex_attribs(struct draw_context *draw) 494 { 495 draw->extra_shader_outputs.num = 0; 496 } 497 498 499 /** 500 * If a geometry shader is present, return its info, else the vertex shader's 501 * info. 502 */ 503 struct tgsi_shader_info * 504 draw_get_shader_info(const struct draw_context *draw) 505 { 506 507 if (draw->gs.geometry_shader) { 508 return &draw->gs.geometry_shader->info; 509 } else { 510 return &draw->vs.vertex_shader->info; 511 } 512 } 513 514 515 /** 516 * Ask the draw module for the location/slot of the given vertex attribute in 517 * a post-transformed vertex. 518 * 519 * With this function, drivers that use the draw module should have no reason 520 * to track the current vertex/geometry shader. 521 * 522 * Note that the draw module may sometimes generate vertices with extra 523 * attributes (such as texcoords for AA lines). The driver can call this 524 * function to find those attributes. 525 * 526 * Zero is returned if the attribute is not found since this is 527 * a don't care / undefined situtation. Returning -1 would be a bit more 528 * work for the drivers. 529 */ 530 int 531 draw_find_shader_output(const struct draw_context *draw, 532 uint semantic_name, uint semantic_index) 533 { 534 const struct tgsi_shader_info *info = draw_get_shader_info(draw); 535 uint i; 536 537 for (i = 0; i < info->num_outputs; i++) { 538 if (info->output_semantic_name[i] == semantic_name && 539 info->output_semantic_index[i] == semantic_index) 540 return i; 541 } 542 543 /* Search the extra vertex attributes */ 544 for (i = 0; i < draw->extra_shader_outputs.num; i++) { 545 if (draw->extra_shader_outputs.semantic_name[i] == semantic_name && 546 draw->extra_shader_outputs.semantic_index[i] == semantic_index) { 547 return draw->extra_shader_outputs.slot[i]; 548 } 549 } 550 551 return 0; 552 } 553 554 555 /** 556 * Return total number of the shader outputs. This function is similar to 557 * draw_current_shader_outputs() but this function also counts any extra 558 * vertex/geometry output attributes that may be filled in by some draw 559 * stages (such as AA point, AA line). 560 * 561 * If geometry shader is present, its output will be returned, 562 * if not vertex shader is used. 563 */ 564 uint 565 draw_num_shader_outputs(const struct draw_context *draw) 566 { 567 const struct tgsi_shader_info *info = draw_get_shader_info(draw); 568 uint count; 569 570 count = info->num_outputs; 571 count += draw->extra_shader_outputs.num; 572 573 return count; 574 } 575 576 577 /** 578 * Provide TGSI sampler objects for vertex/geometry shaders that use 579 * texture fetches. This state only needs to be set once per context. 580 * This might only be used by software drivers for the time being. 581 */ 582 void 583 draw_texture_samplers(struct draw_context *draw, 584 uint shader, 585 uint num_samplers, 586 struct tgsi_sampler **samplers) 587 { 588 if (shader == PIPE_SHADER_VERTEX) { 589 draw->vs.tgsi.num_samplers = num_samplers; 590 draw->vs.tgsi.samplers = samplers; 591 } else { 592 debug_assert(shader == PIPE_SHADER_GEOMETRY); 593 draw->gs.tgsi.num_samplers = num_samplers; 594 draw->gs.tgsi.samplers = samplers; 595 } 596 } 597 598 599 600 601 void draw_set_render( struct draw_context *draw, 602 struct vbuf_render *render ) 603 { 604 draw->render = render; 605 } 606 607 608 /** 609 * Tell the draw module where vertex indexes/elements are located, and 610 * their size (in bytes). 611 * 612 * Note: the caller must apply the pipe_index_buffer::offset value to 613 * the address. The draw module doesn't do that. 614 */ 615 void 616 draw_set_indexes(struct draw_context *draw, 617 const void *elements, unsigned elem_size) 618 { 619 assert(elem_size == 0 || 620 elem_size == 1 || 621 elem_size == 2 || 622 elem_size == 4); 623 draw->pt.user.elts = elements; 624 draw->pt.user.eltSizeIB = elem_size; 625 } 626 627 628 /* Revamp me please: 629 */ 630 void draw_do_flush( struct draw_context *draw, unsigned flags ) 631 { 632 if (!draw->suspend_flushing) 633 { 634 assert(!draw->flushing); /* catch inadvertant recursion */ 635 636 draw->flushing = TRUE; 637 638 draw_pipeline_flush( draw, flags ); 639 640 draw_pt_flush( draw, flags ); 641 642 draw->flushing = FALSE; 643 } 644 } 645 646 647 /** 648 * Return the number of output attributes produced by the geometry 649 * shader, if present. If no geometry shader, return the number of 650 * outputs from the vertex shader. 651 * \sa draw_num_shader_outputs 652 */ 653 uint 654 draw_current_shader_outputs(const struct draw_context *draw) 655 { 656 if (draw->gs.geometry_shader) 657 return draw->gs.num_gs_outputs; 658 return draw->vs.num_vs_outputs; 659 } 660 661 662 /** 663 * Return the index of the shader output which will contain the 664 * vertex position. 665 */ 666 uint 667 draw_current_shader_position_output(const struct draw_context *draw) 668 { 669 if (draw->gs.geometry_shader) 670 return draw->gs.position_output; 671 return draw->vs.position_output; 672 } 673 674 675 /** 676 * Return the index of the shader output which will contain the 677 * vertex position. 678 */ 679 uint 680 draw_current_shader_clipvertex_output(const struct draw_context *draw) 681 { 682 return draw->vs.clipvertex_output; 683 } 684 685 uint 686 draw_current_shader_clipdistance_output(const struct draw_context *draw, int index) 687 { 688 return draw->vs.clipdistance_output[index]; 689 } 690 691 /** 692 * Return a pointer/handle for a driver/CSO rasterizer object which 693 * disabled culling, stippling, unfilled tris, etc. 694 * This is used by some pipeline stages (such as wide_point, aa_line 695 * and aa_point) which convert points/lines into triangles. In those 696 * cases we don't want to accidentally cull the triangles. 697 * 698 * \param scissor should the rasterizer state enable scissoring? 699 * \param flatshade should the rasterizer state use flat shading? 700 * \return rasterizer CSO handle 701 */ 702 void * 703 draw_get_rasterizer_no_cull( struct draw_context *draw, 704 boolean scissor, 705 boolean flatshade ) 706 { 707 if (!draw->rasterizer_no_cull[scissor][flatshade]) { 708 /* create now */ 709 struct pipe_context *pipe = draw->pipe; 710 struct pipe_rasterizer_state rast; 711 712 memset(&rast, 0, sizeof(rast)); 713 rast.scissor = scissor; 714 rast.flatshade = flatshade; 715 rast.front_ccw = 1; 716 rast.gl_rasterization_rules = draw->rasterizer->gl_rasterization_rules; 717 718 draw->rasterizer_no_cull[scissor][flatshade] = 719 pipe->create_rasterizer_state(pipe, &rast); 720 } 721 return draw->rasterizer_no_cull[scissor][flatshade]; 722 } 723 724 void 725 draw_set_mapped_so_targets(struct draw_context *draw, 726 int num_targets, 727 struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS]) 728 { 729 int i; 730 731 for (i = 0; i < num_targets; i++) 732 draw->so.targets[i] = targets[i]; 733 for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++) 734 draw->so.targets[i] = NULL; 735 736 draw->so.num_targets = num_targets; 737 } 738 739 void 740 draw_set_mapped_so_buffers(struct draw_context *draw, 741 void *buffers[PIPE_MAX_SO_BUFFERS], 742 unsigned num_buffers) 743 { 744 } 745 746 void 747 draw_set_so_state(struct draw_context *draw, 748 struct pipe_stream_output_info *state) 749 { 750 memcpy(&draw->so.state, 751 state, 752 sizeof(struct pipe_stream_output_info)); 753 } 754 755 void 756 draw_set_sampler_views(struct draw_context *draw, 757 unsigned shader_stage, 758 struct pipe_sampler_view **views, 759 unsigned num) 760 { 761 unsigned i; 762 763 debug_assert(shader_stage < PIPE_SHADER_TYPES); 764 debug_assert(num <= PIPE_MAX_SAMPLERS); 765 766 for (i = 0; i < num; ++i) 767 draw->sampler_views[shader_stage][i] = views[i]; 768 for (i = num; i < PIPE_MAX_SAMPLERS; ++i) 769 draw->sampler_views[shader_stage][i] = NULL; 770 771 draw->num_sampler_views[shader_stage] = num; 772 } 773 774 void 775 draw_set_samplers(struct draw_context *draw, 776 unsigned shader_stage, 777 struct pipe_sampler_state **samplers, 778 unsigned num) 779 { 780 unsigned i; 781 782 debug_assert(shader_stage < PIPE_SHADER_TYPES); 783 debug_assert(num <= PIPE_MAX_SAMPLERS); 784 785 for (i = 0; i < num; ++i) 786 draw->samplers[shader_stage][i] = samplers[i]; 787 for (i = num; i < PIPE_MAX_SAMPLERS; ++i) 788 draw->samplers[shader_stage][i] = NULL; 789 790 draw->num_samplers[shader_stage] = num; 791 792 #ifdef HAVE_LLVM 793 if (draw->llvm && shader_stage == PIPE_SHADER_VERTEX) 794 draw_llvm_set_sampler_state(draw); 795 #endif 796 } 797 798 void 799 draw_set_mapped_texture(struct draw_context *draw, 800 unsigned shader_stage, 801 unsigned sampler_idx, 802 uint32_t width, uint32_t height, uint32_t depth, 803 uint32_t first_level, uint32_t last_level, 804 uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS], 805 uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS], 806 const void *data[PIPE_MAX_TEXTURE_LEVELS]) 807 { 808 if (shader_stage == PIPE_SHADER_VERTEX) { 809 #ifdef HAVE_LLVM 810 if (draw->llvm) 811 draw_llvm_set_mapped_texture(draw, 812 sampler_idx, 813 width, height, depth, first_level, last_level, 814 row_stride, img_stride, data); 815 #endif 816 } 817 } 818 819 /** 820 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two 821 * different ways of setting textures, and drivers typically only support one. 822 */ 823 int 824 draw_get_shader_param_no_llvm(unsigned shader, enum pipe_shader_cap param) 825 { 826 switch(shader) { 827 case PIPE_SHADER_VERTEX: 828 case PIPE_SHADER_GEOMETRY: 829 return tgsi_exec_get_shader_param(param); 830 default: 831 return 0; 832 } 833 } 834 835 /** 836 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two 837 * different ways of setting textures, and drivers typically only support one. 838 */ 839 int 840 draw_get_shader_param(unsigned shader, enum pipe_shader_cap param) 841 { 842 843 #ifdef HAVE_LLVM 844 if (draw_get_option_use_llvm()) { 845 switch(shader) { 846 case PIPE_SHADER_VERTEX: 847 case PIPE_SHADER_GEOMETRY: 848 return gallivm_get_shader_param(param); 849 default: 850 return 0; 851 } 852 } 853 #endif 854 855 return draw_get_shader_param_no_llvm(shader, param); 856 } 857 858