1 /************************************************************************** 2 * 3 * Copyright 2011 Christian Knig 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <assert.h> 29 30 #include "pipe/p_screen.h" 31 #include "pipe/p_context.h" 32 33 #include "util/u_draw.h" 34 #include "util/u_sampler.h" 35 #include "util/u_inlines.h" 36 #include "util/u_memory.h" 37 38 #include "tgsi/tgsi_ureg.h" 39 40 #include "vl_defines.h" 41 #include "vl_types.h" 42 43 #include "vl_zscan.h" 44 #include "vl_vertex_buffers.h" 45 46 enum VS_OUTPUT 47 { 48 VS_O_VPOS = 0, 49 VS_O_VTEX = 0 50 }; 51 52 const int vl_zscan_linear[] = 53 { 54 /* Linear scan pattern */ 55 0, 1, 2, 3, 4, 5, 6, 7, 56 8, 9,10,11,12,13,14,15, 57 16,17,18,19,20,21,22,23, 58 24,25,26,27,28,29,30,31, 59 32,33,34,35,36,37,38,39, 60 40,41,42,43,44,45,46,47, 61 48,49,50,51,52,53,54,55, 62 56,57,58,59,60,61,62,63 63 }; 64 65 const int vl_zscan_normal[] = 66 { 67 /* Zig-Zag scan pattern */ 68 0, 1, 8,16, 9, 2, 3,10, 69 17,24,32,25,18,11, 4, 5, 70 12,19,26,33,40,48,41,34, 71 27,20,13, 6, 7,14,21,28, 72 35,42,49,56,57,50,43,36, 73 29,22,15,23,30,37,44,51, 74 58,59,52,45,38,31,39,46, 75 53,60,61,54,47,55,62,63 76 }; 77 78 const int vl_zscan_alternate[] = 79 { 80 /* Alternate scan pattern */ 81 0, 8,16,24, 1, 9, 2,10, 82 17,25,32,40,48,56,57,49, 83 41,33,26,18, 3,11, 4,12, 84 19,27,34,42,50,58,35,43, 85 51,59,20,28, 5,13, 6,14, 86 21,29,36,44,52,60,37,45, 87 53,61,22,30, 7,15,23,31, 88 38,46,54,62,39,47,55,63 89 }; 90 91 static void * 92 create_vert_shader(struct vl_zscan *zscan) 93 { 94 struct ureg_program *shader; 95 96 struct ureg_src scale; 97 struct ureg_src vrect, vpos, block_num; 98 99 struct ureg_dst tmp; 100 struct ureg_dst o_vpos; 101 struct ureg_dst *o_vtex; 102 103 signed i; 104 105 shader = ureg_create(TGSI_PROCESSOR_VERTEX); 106 if (!shader) 107 return NULL; 108 109 o_vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_dst)); 110 111 scale = ureg_imm2f(shader, 112 (float)VL_BLOCK_WIDTH / zscan->buffer_width, 113 (float)VL_BLOCK_HEIGHT / zscan->buffer_height); 114 115 vrect = ureg_DECL_vs_input(shader, VS_I_RECT); 116 vpos = ureg_DECL_vs_input(shader, VS_I_VPOS); 117 block_num = ureg_DECL_vs_input(shader, VS_I_BLOCK_NUM); 118 119 tmp = ureg_DECL_temporary(shader); 120 121 o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS); 122 123 for (i = 0; i < zscan->num_channels; ++i) 124 o_vtex[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i); 125 126 /* 127 * o_vpos.xy = (vpos + vrect) * scale 128 * o_vpos.zw = 1.0f 129 * 130 * tmp.xy = InstanceID / blocks_per_line 131 * tmp.x = frac(tmp.x) 132 * tmp.y = floor(tmp.y) 133 * 134 * o_vtex.x = vrect.x / blocks_per_line + tmp.x 135 * o_vtex.y = vrect.y 136 * o_vtex.z = tmp.z * blocks_per_line / blocks_total 137 */ 138 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), vpos, vrect); 139 ureg_MUL(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(tmp), scale); 140 ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f)); 141 142 ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XW), ureg_scalar(block_num, TGSI_SWIZZLE_X), 143 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line)); 144 145 ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X)); 146 ureg_FLR(shader, ureg_writemask(tmp, TGSI_WRITEMASK_W), ureg_src(tmp)); 147 148 for (i = 0; i < zscan->num_channels; ++i) { 149 ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), 150 ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * VL_BLOCK_WIDTH) 151 * (i - (signed)zscan->num_channels / 2))); 152 153 ureg_MAD(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_X), vrect, 154 ureg_imm1f(shader, 1.0f / zscan->blocks_per_line), ureg_src(tmp)); 155 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Y), vrect); 156 ureg_MOV(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_Z), vpos); 157 ureg_MUL(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_W), ureg_src(tmp), 158 ureg_imm1f(shader, (float)zscan->blocks_per_line / zscan->blocks_total)); 159 } 160 161 ureg_release_temporary(shader, tmp); 162 ureg_END(shader); 163 164 FREE(o_vtex); 165 166 return ureg_create_shader_and_destroy(shader, zscan->pipe); 167 } 168 169 static void * 170 create_frag_shader(struct vl_zscan *zscan) 171 { 172 struct ureg_program *shader; 173 struct ureg_src *vtex; 174 175 struct ureg_src samp_src, samp_scan, samp_quant; 176 177 struct ureg_dst *tmp; 178 struct ureg_dst quant, fragment; 179 180 unsigned i; 181 182 shader = ureg_create(TGSI_PROCESSOR_FRAGMENT); 183 if (!shader) 184 return NULL; 185 186 vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_src)); 187 tmp = MALLOC(zscan->num_channels * sizeof(struct ureg_dst)); 188 189 for (i = 0; i < zscan->num_channels; ++i) 190 vtex[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX + i, TGSI_INTERPOLATE_LINEAR); 191 192 samp_src = ureg_DECL_sampler(shader, 0); 193 samp_scan = ureg_DECL_sampler(shader, 1); 194 samp_quant = ureg_DECL_sampler(shader, 2); 195 196 for (i = 0; i < zscan->num_channels; ++i) 197 tmp[i] = ureg_DECL_temporary(shader); 198 quant = ureg_DECL_temporary(shader); 199 200 fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0); 201 202 /* 203 * tmp.x = tex(vtex, 1) 204 * tmp.y = vtex.z 205 * fragment = tex(tmp, 0) * quant 206 */ 207 for (i = 0; i < zscan->num_channels; ++i) 208 ureg_TEX(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_X), TGSI_TEXTURE_2D, vtex[i], samp_scan); 209 210 for (i = 0; i < zscan->num_channels; ++i) 211 ureg_MOV(shader, ureg_writemask(tmp[i], TGSI_WRITEMASK_Y), ureg_scalar(vtex[i], TGSI_SWIZZLE_W)); 212 213 for (i = 0; i < zscan->num_channels; ++i) { 214 ureg_TEX(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D, ureg_src(tmp[i]), samp_src); 215 ureg_TEX(shader, ureg_writemask(quant, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, vtex[i], samp_quant); 216 } 217 218 ureg_MUL(shader, quant, ureg_src(quant), ureg_imm1f(shader, 16.0f)); 219 ureg_MUL(shader, fragment, ureg_src(tmp[0]), ureg_src(quant)); 220 221 for (i = 0; i < zscan->num_channels; ++i) 222 ureg_release_temporary(shader, tmp[i]); 223 ureg_END(shader); 224 225 FREE(vtex); 226 FREE(tmp); 227 228 return ureg_create_shader_and_destroy(shader, zscan->pipe); 229 } 230 231 static bool 232 init_shaders(struct vl_zscan *zscan) 233 { 234 assert(zscan); 235 236 zscan->vs = create_vert_shader(zscan); 237 if (!zscan->vs) 238 goto error_vs; 239 240 zscan->fs = create_frag_shader(zscan); 241 if (!zscan->fs) 242 goto error_fs; 243 244 return true; 245 246 error_fs: 247 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs); 248 249 error_vs: 250 return false; 251 } 252 253 static void 254 cleanup_shaders(struct vl_zscan *zscan) 255 { 256 assert(zscan); 257 258 zscan->pipe->delete_vs_state(zscan->pipe, zscan->vs); 259 zscan->pipe->delete_fs_state(zscan->pipe, zscan->fs); 260 } 261 262 static bool 263 init_state(struct vl_zscan *zscan) 264 { 265 struct pipe_blend_state blend; 266 struct pipe_rasterizer_state rs_state; 267 struct pipe_sampler_state sampler; 268 unsigned i; 269 270 assert(zscan); 271 272 memset(&rs_state, 0, sizeof(rs_state)); 273 rs_state.gl_rasterization_rules = true; 274 rs_state.depth_clip = 1; 275 zscan->rs_state = zscan->pipe->create_rasterizer_state(zscan->pipe, &rs_state); 276 if (!zscan->rs_state) 277 goto error_rs_state; 278 279 memset(&blend, 0, sizeof blend); 280 281 blend.independent_blend_enable = 0; 282 blend.rt[0].blend_enable = 0; 283 blend.rt[0].rgb_func = PIPE_BLEND_ADD; 284 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE; 285 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE; 286 blend.rt[0].alpha_func = PIPE_BLEND_ADD; 287 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE; 288 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE; 289 blend.logicop_enable = 0; 290 blend.logicop_func = PIPE_LOGICOP_CLEAR; 291 /* Needed to allow color writes to FB, even if blending disabled */ 292 blend.rt[0].colormask = PIPE_MASK_RGBA; 293 blend.dither = 0; 294 zscan->blend = zscan->pipe->create_blend_state(zscan->pipe, &blend); 295 if (!zscan->blend) 296 goto error_blend; 297 298 for (i = 0; i < 3; ++i) { 299 memset(&sampler, 0, sizeof(sampler)); 300 sampler.wrap_s = PIPE_TEX_WRAP_REPEAT; 301 sampler.wrap_t = PIPE_TEX_WRAP_REPEAT; 302 sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE; 303 sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST; 304 sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 305 sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST; 306 sampler.compare_mode = PIPE_TEX_COMPARE_NONE; 307 sampler.compare_func = PIPE_FUNC_ALWAYS; 308 sampler.normalized_coords = 1; 309 zscan->samplers[i] = zscan->pipe->create_sampler_state(zscan->pipe, &sampler); 310 if (!zscan->samplers[i]) 311 goto error_samplers; 312 } 313 314 return true; 315 316 error_samplers: 317 for (i = 0; i < 2; ++i) 318 if (zscan->samplers[i]) 319 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]); 320 321 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state); 322 323 error_blend: 324 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend); 325 326 error_rs_state: 327 return false; 328 } 329 330 static void 331 cleanup_state(struct vl_zscan *zscan) 332 { 333 unsigned i; 334 335 assert(zscan); 336 337 for (i = 0; i < 3; ++i) 338 zscan->pipe->delete_sampler_state(zscan->pipe, zscan->samplers[i]); 339 340 zscan->pipe->delete_rasterizer_state(zscan->pipe, zscan->rs_state); 341 zscan->pipe->delete_blend_state(zscan->pipe, zscan->blend); 342 } 343 344 struct pipe_sampler_view * 345 vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks_per_line) 346 { 347 const unsigned total_size = blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT; 348 349 int patched_layout[64]; 350 351 struct pipe_resource res_tmpl, *res; 352 struct pipe_sampler_view sv_tmpl, *sv; 353 struct pipe_transfer *buf_transfer; 354 unsigned x, y, i, pitch; 355 float *f; 356 357 struct pipe_box rect = 358 { 359 0, 0, 0, 360 VL_BLOCK_WIDTH * blocks_per_line, 361 VL_BLOCK_HEIGHT, 362 1 363 }; 364 365 assert(pipe && layout && blocks_per_line); 366 367 for (i = 0; i < 64; ++i) 368 patched_layout[layout[i]] = i; 369 370 memset(&res_tmpl, 0, sizeof(res_tmpl)); 371 res_tmpl.target = PIPE_TEXTURE_2D; 372 res_tmpl.format = PIPE_FORMAT_R32_FLOAT; 373 res_tmpl.width0 = VL_BLOCK_WIDTH * blocks_per_line; 374 res_tmpl.height0 = VL_BLOCK_HEIGHT; 375 res_tmpl.depth0 = 1; 376 res_tmpl.array_size = 1; 377 res_tmpl.usage = PIPE_USAGE_IMMUTABLE; 378 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW; 379 380 res = pipe->screen->resource_create(pipe->screen, &res_tmpl); 381 if (!res) 382 goto error_resource; 383 384 buf_transfer = pipe->get_transfer 385 ( 386 pipe, res, 387 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, 388 &rect 389 ); 390 if (!buf_transfer) 391 goto error_transfer; 392 393 pitch = buf_transfer->stride / sizeof(float); 394 395 f = pipe->transfer_map(pipe, buf_transfer); 396 if (!f) 397 goto error_map; 398 399 for (i = 0; i < blocks_per_line; ++i) 400 for (y = 0; y < VL_BLOCK_HEIGHT; ++y) 401 for (x = 0; x < VL_BLOCK_WIDTH; ++x) { 402 float addr = patched_layout[x + y * VL_BLOCK_WIDTH] + 403 i * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT; 404 405 addr /= total_size; 406 407 f[i * VL_BLOCK_WIDTH + y * pitch + x] = addr; 408 } 409 410 pipe->transfer_unmap(pipe, buf_transfer); 411 pipe->transfer_destroy(pipe, buf_transfer); 412 413 memset(&sv_tmpl, 0, sizeof(sv_tmpl)); 414 u_sampler_view_default_template(&sv_tmpl, res, res->format); 415 sv = pipe->create_sampler_view(pipe, res, &sv_tmpl); 416 pipe_resource_reference(&res, NULL); 417 if (!sv) 418 goto error_map; 419 420 return sv; 421 422 error_map: 423 pipe->transfer_destroy(pipe, buf_transfer); 424 425 error_transfer: 426 pipe_resource_reference(&res, NULL); 427 428 error_resource: 429 return NULL; 430 } 431 432 bool 433 vl_zscan_init(struct vl_zscan *zscan, struct pipe_context *pipe, 434 unsigned buffer_width, unsigned buffer_height, 435 unsigned blocks_per_line, unsigned blocks_total, 436 unsigned num_channels) 437 { 438 assert(zscan && pipe); 439 440 zscan->pipe = pipe; 441 zscan->buffer_width = buffer_width; 442 zscan->buffer_height = buffer_height; 443 zscan->num_channels = num_channels; 444 zscan->blocks_per_line = blocks_per_line; 445 zscan->blocks_total = blocks_total; 446 447 if(!init_shaders(zscan)) 448 return false; 449 450 if(!init_state(zscan)) { 451 cleanup_shaders(zscan); 452 return false; 453 } 454 455 return true; 456 } 457 458 void 459 vl_zscan_cleanup(struct vl_zscan *zscan) 460 { 461 assert(zscan); 462 463 cleanup_shaders(zscan); 464 cleanup_state(zscan); 465 } 466 467 bool 468 vl_zscan_init_buffer(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer, 469 struct pipe_sampler_view *src, struct pipe_surface *dst) 470 { 471 struct pipe_resource res_tmpl, *res; 472 struct pipe_sampler_view sv_tmpl; 473 474 assert(zscan && buffer); 475 476 memset(buffer, 0, sizeof(struct vl_zscan_buffer)); 477 478 pipe_sampler_view_reference(&buffer->src, src); 479 480 buffer->viewport.scale[0] = dst->width; 481 buffer->viewport.scale[1] = dst->height; 482 buffer->viewport.scale[2] = 1; 483 buffer->viewport.scale[3] = 1; 484 buffer->viewport.translate[0] = 0; 485 buffer->viewport.translate[1] = 0; 486 buffer->viewport.translate[2] = 0; 487 buffer->viewport.translate[3] = 0; 488 489 buffer->fb_state.width = dst->width; 490 buffer->fb_state.height = dst->height; 491 buffer->fb_state.nr_cbufs = 1; 492 pipe_surface_reference(&buffer->fb_state.cbufs[0], dst); 493 494 memset(&res_tmpl, 0, sizeof(res_tmpl)); 495 res_tmpl.target = PIPE_TEXTURE_3D; 496 res_tmpl.format = PIPE_FORMAT_R8_UNORM; 497 res_tmpl.width0 = VL_BLOCK_WIDTH * zscan->blocks_per_line; 498 res_tmpl.height0 = VL_BLOCK_HEIGHT; 499 res_tmpl.depth0 = 2; 500 res_tmpl.array_size = 1; 501 res_tmpl.usage = PIPE_USAGE_IMMUTABLE; 502 res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW; 503 504 res = zscan->pipe->screen->resource_create(zscan->pipe->screen, &res_tmpl); 505 if (!res) 506 return false; 507 508 memset(&sv_tmpl, 0, sizeof(sv_tmpl)); 509 u_sampler_view_default_template(&sv_tmpl, res, res->format); 510 sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = TGSI_SWIZZLE_X; 511 buffer->quant = zscan->pipe->create_sampler_view(zscan->pipe, res, &sv_tmpl); 512 pipe_resource_reference(&res, NULL); 513 if (!buffer->quant) 514 return false; 515 516 return true; 517 } 518 519 void 520 vl_zscan_cleanup_buffer(struct vl_zscan_buffer *buffer) 521 { 522 assert(buffer); 523 524 pipe_sampler_view_reference(&buffer->src, NULL); 525 pipe_sampler_view_reference(&buffer->layout, NULL); 526 pipe_sampler_view_reference(&buffer->quant, NULL); 527 pipe_surface_reference(&buffer->fb_state.cbufs[0], NULL); 528 } 529 530 void 531 vl_zscan_set_layout(struct vl_zscan_buffer *buffer, struct pipe_sampler_view *layout) 532 { 533 assert(buffer); 534 assert(layout); 535 536 pipe_sampler_view_reference(&buffer->layout, layout); 537 } 538 539 void 540 vl_zscan_upload_quant(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer, 541 const uint8_t matrix[64], bool intra) 542 { 543 struct pipe_context *pipe; 544 struct pipe_transfer *buf_transfer; 545 unsigned x, y, i, pitch; 546 uint8_t *data; 547 548 struct pipe_box rect = 549 { 550 0, 0, intra ? 1 : 0, 551 VL_BLOCK_WIDTH, 552 VL_BLOCK_HEIGHT, 553 1 554 }; 555 556 assert(buffer); 557 assert(matrix); 558 559 pipe = zscan->pipe; 560 561 rect.width *= zscan->blocks_per_line; 562 563 buf_transfer = pipe->get_transfer 564 ( 565 pipe, buffer->quant->texture, 566 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, 567 &rect 568 ); 569 if (!buf_transfer) 570 goto error_transfer; 571 572 pitch = buf_transfer->stride; 573 574 data = pipe->transfer_map(pipe, buf_transfer); 575 if (!data) 576 goto error_map; 577 578 for (i = 0; i < zscan->blocks_per_line; ++i) 579 for (y = 0; y < VL_BLOCK_HEIGHT; ++y) 580 for (x = 0; x < VL_BLOCK_WIDTH; ++x) 581 data[i * VL_BLOCK_WIDTH + y * pitch + x] = matrix[x + y * VL_BLOCK_WIDTH]; 582 583 pipe->transfer_unmap(pipe, buf_transfer); 584 585 error_map: 586 pipe->transfer_destroy(pipe, buf_transfer); 587 588 error_transfer: 589 return; 590 } 591 592 void 593 vl_zscan_render(struct vl_zscan *zscan, struct vl_zscan_buffer *buffer, unsigned num_instances) 594 { 595 assert(buffer); 596 597 zscan->pipe->bind_rasterizer_state(zscan->pipe, zscan->rs_state); 598 zscan->pipe->bind_blend_state(zscan->pipe, zscan->blend); 599 zscan->pipe->bind_fragment_sampler_states(zscan->pipe, 3, zscan->samplers); 600 zscan->pipe->set_framebuffer_state(zscan->pipe, &buffer->fb_state); 601 zscan->pipe->set_viewport_state(zscan->pipe, &buffer->viewport); 602 zscan->pipe->set_fragment_sampler_views(zscan->pipe, 3, &buffer->src); 603 zscan->pipe->bind_vs_state(zscan->pipe, zscan->vs); 604 zscan->pipe->bind_fs_state(zscan->pipe, zscan->fs); 605 util_draw_arrays_instanced(zscan->pipe, PIPE_PRIM_QUADS, 0, 4, 0, num_instances); 606 } 607