1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 * 24 */ 25 26 #include "draw/draw_context.h" 27 #include "draw/draw_vertex.h" 28 #include "draw/draw_pipe.h" 29 #include "draw/draw_vbuf.h" 30 #include "draw/draw_private.h" 31 32 #include "nv_object.xml.h" 33 #include "nv30/nv30-40_3d.xml.h" 34 #include "nv30/nv30_context.h" 35 #include "nv30/nv30_format.h" 36 37 struct nv30_render { 38 struct vbuf_render base; 39 struct nv30_context *nv30; 40 41 struct pipe_transfer *transfer; 42 struct pipe_resource *buffer; 43 unsigned offset; 44 unsigned length; 45 46 struct vertex_info vertex_info; 47 48 struct nouveau_heap *vertprog; 49 uint32_t vtxprog[16][4]; 50 uint32_t vtxfmt[16]; 51 uint32_t vtxptr[16]; 52 uint32_t prim; 53 }; 54 55 static inline struct nv30_render * 56 nv30_render(struct vbuf_render *render) 57 { 58 return (struct nv30_render *)render; 59 } 60 61 static const struct vertex_info * 62 nv30_render_get_vertex_info(struct vbuf_render *render) 63 { 64 return &nv30_render(render)->vertex_info; 65 } 66 67 static boolean 68 nv30_render_allocate_vertices(struct vbuf_render *render, 69 ushort vertex_size, ushort nr_vertices) 70 { 71 struct nv30_render *r = nv30_render(render); 72 struct nv30_context *nv30 = r->nv30; 73 74 r->length = (uint32_t)vertex_size * (uint32_t)nr_vertices; 75 76 if (r->offset + r->length >= render->max_vertex_buffer_bytes) { 77 pipe_resource_reference(&r->buffer, NULL); 78 r->buffer = pipe_buffer_create(&nv30->screen->base.base, 79 PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM, 80 render->max_vertex_buffer_bytes); 81 if (!r->buffer) 82 return false; 83 84 r->offset = 0; 85 } 86 87 return true; 88 } 89 90 static void * 91 nv30_render_map_vertices(struct vbuf_render *render) 92 { 93 struct nv30_render *r = nv30_render(render); 94 char *map = pipe_buffer_map_range( 95 &r->nv30->base.pipe, r->buffer, 96 r->offset, r->length, 97 PIPE_TRANSFER_WRITE | 98 PIPE_TRANSFER_DISCARD_RANGE, 99 &r->transfer); 100 assert(map); 101 return map; 102 } 103 104 static void 105 nv30_render_unmap_vertices(struct vbuf_render *render, 106 ushort min_index, ushort max_index) 107 { 108 struct nv30_render *r = nv30_render(render); 109 pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer); 110 r->transfer = NULL; 111 } 112 113 static void 114 nv30_render_set_primitive(struct vbuf_render *render, unsigned prim) 115 { 116 struct nv30_render *r = nv30_render(render); 117 118 r->prim = nv30_prim_gl(prim); 119 } 120 121 static void 122 nv30_render_draw_elements(struct vbuf_render *render, 123 const ushort *indices, uint count) 124 { 125 struct nv30_render *r = nv30_render(render); 126 struct nv30_context *nv30 = r->nv30; 127 struct nouveau_pushbuf *push = nv30->screen->base.pushbuf; 128 unsigned i; 129 130 BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs); 131 for (i = 0; i < r->vertex_info.num_attribs; i++) { 132 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, 133 nv04_resource(r->buffer), r->offset + r->vtxptr[i], 134 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1); 135 } 136 137 if (!nv30_state_validate(nv30, ~0, false)) 138 return; 139 140 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); 141 PUSH_DATA (push, r->prim); 142 143 if (count & 1) { 144 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1); 145 PUSH_DATA (push, *indices++); 146 } 147 148 count >>= 1; 149 while (count) { 150 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN); 151 count -= npush; 152 153 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush); 154 while (npush--) { 155 PUSH_DATA(push, (indices[1] << 16) | indices[0]); 156 indices += 2; 157 } 158 } 159 160 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); 161 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP); 162 PUSH_RESET(push, BUFCTX_VTXTMP); 163 } 164 165 static void 166 nv30_render_draw_arrays(struct vbuf_render *render, unsigned start, uint nr) 167 { 168 struct nv30_render *r = nv30_render(render); 169 struct nv30_context *nv30 = r->nv30; 170 struct nouveau_pushbuf *push = nv30->base.pushbuf; 171 unsigned fn = nr >> 8, pn = nr & 0xff; 172 unsigned ps = fn + (pn ? 1 : 0); 173 unsigned i; 174 175 BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs); 176 for (i = 0; i < r->vertex_info.num_attribs; i++) { 177 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, 178 nv04_resource(r->buffer), r->offset + r->vtxptr[i], 179 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1); 180 } 181 182 if (!nv30_state_validate(nv30, ~0, false)) 183 return; 184 185 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); 186 PUSH_DATA (push, r->prim); 187 188 BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), ps); 189 while (fn--) { 190 PUSH_DATA (push, 0xff000000 | start); 191 start += 256; 192 } 193 194 if (pn) 195 PUSH_DATA (push, ((pn - 1) << 24) | start); 196 197 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); 198 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP); 199 PUSH_RESET(push, BUFCTX_VTXTMP); 200 } 201 202 static void 203 nv30_render_release_vertices(struct vbuf_render *render) 204 { 205 struct nv30_render *r = nv30_render(render); 206 r->offset += r->length; 207 } 208 209 static const struct { 210 unsigned emit; 211 unsigned vp30; 212 unsigned vp40; 213 unsigned ow40; 214 } vroute [] = { 215 [TGSI_SEMANTIC_POSITION] = { EMIT_4F, 0, 0, 0x00000000 }, 216 [TGSI_SEMANTIC_COLOR ] = { EMIT_4F, 3, 1, 0x00000001 }, 217 [TGSI_SEMANTIC_BCOLOR ] = { EMIT_4F, 1, 3, 0x00000004 }, 218 [TGSI_SEMANTIC_FOG ] = { EMIT_4F, 5, 5, 0x00000010 }, 219 [TGSI_SEMANTIC_PSIZE ] = { EMIT_1F_PSIZE, 6, 6, 0x00000020 }, 220 [TGSI_SEMANTIC_TEXCOORD] = { EMIT_4F, 8, 7, 0x00004000 }, 221 }; 222 223 static bool 224 vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx) 225 { 226 struct nv30_screen *screen = r->nv30->screen; 227 struct nv30_fragprog *fp = r->nv30->fragprog.program; 228 struct vertex_info *vinfo = &r->vertex_info; 229 enum pipe_format format; 230 uint emit = EMIT_OMIT; 231 uint result = *idx; 232 233 if (sem == TGSI_SEMANTIC_GENERIC) { 234 uint num_texcoords = (screen->eng3d->oclass < NV40_3D_CLASS) ? 8 : 10; 235 for (result = 0; result < num_texcoords; result++) { 236 if (fp->texcoord[result] == *idx + 8) { 237 sem = TGSI_SEMANTIC_TEXCOORD; 238 emit = vroute[sem].emit; 239 break; 240 } 241 } 242 } else { 243 emit = vroute[sem].emit; 244 } 245 246 if (emit == EMIT_OMIT) 247 return false; 248 249 draw_emit_vertex_attr(vinfo, emit, attrib); 250 format = draw_translate_vinfo_format(emit); 251 252 r->vtxfmt[attrib] = nv30_vtxfmt(&screen->base.base, format)->hw; 253 r->vtxptr[attrib] = vinfo->size; 254 vinfo->size += draw_translate_vinfo_size(emit); 255 256 if (screen->eng3d->oclass < NV40_3D_CLASS) { 257 r->vtxprog[attrib][0] = 0x001f38d8; 258 r->vtxprog[attrib][1] = 0x0080001b | (attrib << 9); 259 r->vtxprog[attrib][2] = 0x0836106c; 260 r->vtxprog[attrib][3] = 0x2000f800 | (result + vroute[sem].vp30) << 2; 261 } else { 262 r->vtxprog[attrib][0] = 0x401f9c6c; 263 r->vtxprog[attrib][1] = 0x0040000d | (attrib << 8); 264 r->vtxprog[attrib][2] = 0x8106c083; 265 r->vtxprog[attrib][3] = 0x6041ff80 | (result + vroute[sem].vp40) << 2; 266 } 267 268 if (result < 8) 269 *idx = vroute[sem].ow40 << result; 270 else { 271 assert(sem == TGSI_SEMANTIC_TEXCOORD); 272 *idx = 0x00001000 << (result - 8); 273 } 274 return true; 275 } 276 277 static bool 278 nv30_render_validate(struct nv30_context *nv30) 279 { 280 struct nv30_render *r = nv30_render(nv30->draw->render); 281 struct nv30_rasterizer_stateobj *rast = nv30->rast; 282 struct pipe_screen *pscreen = &nv30->screen->base.base; 283 struct nouveau_pushbuf *push = nv30->screen->base.pushbuf; 284 struct nouveau_object *eng3d = nv30->screen->eng3d; 285 struct nv30_vertprog *vp = nv30->vertprog.program; 286 struct vertex_info *vinfo = &r->vertex_info; 287 unsigned vp_attribs = 0; 288 unsigned vp_results = 0; 289 unsigned attrib = 0; 290 unsigned pntc; 291 int i; 292 293 if (!r->vertprog) { 294 struct nouveau_heap *heap = nv30_screen(pscreen)->vp_exec_heap; 295 if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog)) { 296 while (heap->next && heap->size < 16) { 297 struct nouveau_heap **evict = heap->next->priv; 298 nouveau_heap_free(evict); 299 } 300 301 if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog)) 302 return false; 303 } 304 } 305 306 vinfo->num_attribs = 0; 307 vinfo->size = 0; 308 309 /* setup routing for all necessary vp outputs */ 310 for (i = 0; i < vp->info.num_outputs && attrib < 16; i++) { 311 uint semantic = vp->info.output_semantic_name[i]; 312 uint index = vp->info.output_semantic_index[i]; 313 if (vroute_add(r, attrib, semantic, &index)) { 314 vp_attribs |= (1 << attrib++); 315 vp_results |= index; 316 } 317 } 318 319 /* setup routing for replaced point coords not written by vp */ 320 if (rast && rast->pipe.point_quad_rasterization) 321 pntc = rast->pipe.sprite_coord_enable & 0x000002ff; 322 else 323 pntc = 0; 324 325 while (pntc && attrib < 16) { 326 uint index = ffs(pntc) - 1; pntc &= ~(1 << index); 327 if (vroute_add(r, attrib, TGSI_SEMANTIC_TEXCOORD, &index)) { 328 vp_attribs |= (1 << attrib++); 329 vp_results |= index; 330 } 331 } 332 333 /* modify vertex format for correct stride, and stub out unused ones */ 334 BEGIN_NV04(push, NV30_3D(VP_UPLOAD_FROM_ID), 1); 335 PUSH_DATA (push, r->vertprog->start); 336 r->vtxprog[attrib - 1][3] |= 1; 337 for (i = 0; i < attrib; i++) { 338 BEGIN_NV04(push, NV30_3D(VP_UPLOAD_INST(0)), 4); 339 PUSH_DATAp(push, r->vtxprog[i], 4); 340 r->vtxfmt[i] |= vinfo->size << 8; 341 } 342 for (; i < 16; i++) 343 r->vtxfmt[i] = NV30_3D_VTXFMT_TYPE_V32_FLOAT; 344 345 BEGIN_NV04(push, NV30_3D(VIEWPORT_TRANSLATE_X), 8); 346 PUSH_DATAf(push, 0.0); 347 PUSH_DATAf(push, 0.0); 348 PUSH_DATAf(push, 0.0); 349 PUSH_DATAf(push, 0.0); 350 PUSH_DATAf(push, 1.0); 351 PUSH_DATAf(push, 1.0); 352 PUSH_DATAf(push, 1.0); 353 PUSH_DATAf(push, 1.0); 354 BEGIN_NV04(push, NV30_3D(DEPTH_RANGE_NEAR), 2); 355 PUSH_DATAf(push, 0.0); 356 PUSH_DATAf(push, 1.0); 357 BEGIN_NV04(push, NV30_3D(VIEWPORT_HORIZ), 2); 358 PUSH_DATA (push, nv30->framebuffer.width << 16); 359 PUSH_DATA (push, nv30->framebuffer.height << 16); 360 361 BEGIN_NV04(push, NV30_3D(VTXFMT(0)), 16); 362 PUSH_DATAp(push, r->vtxfmt, 16); 363 364 BEGIN_NV04(push, NV30_3D(VP_START_FROM_ID), 1); 365 PUSH_DATA (push, r->vertprog->start); 366 BEGIN_NV04(push, NV30_3D(ENGINE), 1); 367 PUSH_DATA (push, 0x00000103); 368 if (eng3d->oclass >= NV40_3D_CLASS) { 369 BEGIN_NV04(push, NV40_3D(VP_ATTRIB_EN), 2); 370 PUSH_DATA (push, vp_attribs); 371 PUSH_DATA (push, vp_results); 372 } 373 374 vinfo->size /= 4; 375 return true; 376 } 377 378 void 379 nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) 380 { 381 struct nv30_context *nv30 = nv30_context(pipe); 382 struct draw_context *draw = nv30->draw; 383 struct pipe_transfer *transfer[PIPE_MAX_ATTRIBS] = {NULL}; 384 struct pipe_transfer *transferi = NULL; 385 int i; 386 387 nv30_render_validate(nv30); 388 389 if (nv30->draw_dirty & NV30_NEW_VIEWPORT) 390 draw_set_viewport_states(draw, 0, 1, &nv30->viewport); 391 if (nv30->draw_dirty & NV30_NEW_RASTERIZER) 392 draw_set_rasterizer_state(draw, &nv30->rast->pipe, NULL); 393 if (nv30->draw_dirty & NV30_NEW_CLIP) 394 draw_set_clip_state(draw, &nv30->clip); 395 if (nv30->draw_dirty & NV30_NEW_ARRAYS) { 396 draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, nv30->vtxbuf); 397 draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe); 398 } 399 if (nv30->draw_dirty & NV30_NEW_FRAGPROG) { 400 struct nv30_fragprog *fp = nv30->fragprog.program; 401 if (!fp->draw) 402 fp->draw = draw_create_fragment_shader(draw, &fp->pipe); 403 draw_bind_fragment_shader(draw, fp->draw); 404 } 405 if (nv30->draw_dirty & NV30_NEW_VERTPROG) { 406 struct nv30_vertprog *vp = nv30->vertprog.program; 407 if (!vp->draw) 408 vp->draw = draw_create_vertex_shader(draw, &vp->pipe); 409 draw_bind_vertex_shader(draw, vp->draw); 410 } 411 if (nv30->draw_dirty & NV30_NEW_VERTCONST) { 412 if (nv30->vertprog.constbuf) { 413 void *map = nv04_resource(nv30->vertprog.constbuf)->data; 414 draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, 415 map, nv30->vertprog.constbuf_nr * 16); 416 } else { 417 draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0); 418 } 419 } 420 421 for (i = 0; i < nv30->num_vtxbufs; i++) { 422 const void *map = nv30->vtxbuf[i].user_buffer; 423 if (!map) { 424 if (nv30->vtxbuf[i].buffer) 425 map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer, 426 PIPE_TRANSFER_UNSYNCHRONIZED | 427 PIPE_TRANSFER_READ, &transfer[i]); 428 } 429 draw_set_mapped_vertex_buffer(draw, i, map, ~0); 430 } 431 432 if (info->indexed) { 433 const void *map = nv30->idxbuf.user_buffer; 434 if (!map) 435 map = pipe_buffer_map(pipe, nv30->idxbuf.buffer, 436 PIPE_TRANSFER_UNSYNCHRONIZED | 437 PIPE_TRANSFER_READ, &transferi); 438 draw_set_indexes(draw, 439 (ubyte *) map + nv30->idxbuf.offset, 440 nv30->idxbuf.index_size, ~0); 441 } else { 442 draw_set_indexes(draw, NULL, 0, 0); 443 } 444 445 draw_vbo(draw, info); 446 draw_flush(draw); 447 448 if (info->indexed && transferi) 449 pipe_buffer_unmap(pipe, transferi); 450 for (i = 0; i < nv30->num_vtxbufs; i++) 451 if (transfer[i]) 452 pipe_buffer_unmap(pipe, transfer[i]); 453 454 nv30->draw_dirty = 0; 455 nv30_state_release(nv30); 456 } 457 458 static void 459 nv30_render_destroy(struct vbuf_render *render) 460 { 461 struct nv30_render *r = nv30_render(render); 462 463 if (r->transfer) 464 pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer); 465 pipe_resource_reference(&r->buffer, NULL); 466 nouveau_heap_free(&r->vertprog); 467 FREE(render); 468 } 469 470 static struct vbuf_render * 471 nv30_render_create(struct nv30_context *nv30) 472 { 473 struct nv30_render *r = CALLOC_STRUCT(nv30_render); 474 if (!r) 475 return NULL; 476 477 r->nv30 = nv30; 478 r->offset = 1 * 1024 * 1024; 479 480 r->base.max_indices = 16 * 1024; 481 r->base.max_vertex_buffer_bytes = r->offset; 482 483 r->base.get_vertex_info = nv30_render_get_vertex_info; 484 r->base.allocate_vertices = nv30_render_allocate_vertices; 485 r->base.map_vertices = nv30_render_map_vertices; 486 r->base.unmap_vertices = nv30_render_unmap_vertices; 487 r->base.set_primitive = nv30_render_set_primitive; 488 r->base.draw_elements = nv30_render_draw_elements; 489 r->base.draw_arrays = nv30_render_draw_arrays; 490 r->base.release_vertices = nv30_render_release_vertices; 491 r->base.destroy = nv30_render_destroy; 492 return &r->base; 493 } 494 495 void 496 nv30_draw_init(struct pipe_context *pipe) 497 { 498 struct nv30_context *nv30 = nv30_context(pipe); 499 struct vbuf_render *render; 500 struct draw_context *draw; 501 struct draw_stage *stage; 502 503 draw = draw_create(pipe); 504 if (!draw) 505 return; 506 507 render = nv30_render_create(nv30); 508 if (!render) { 509 draw_destroy(draw); 510 return; 511 } 512 513 stage = draw_vbuf_stage(draw, render); 514 if (!stage) { 515 render->destroy(render); 516 draw_destroy(draw); 517 return; 518 } 519 520 draw_set_render(draw, render); 521 draw_set_rasterize_stage(draw, stage); 522 draw_wide_line_threshold(draw, 10000000.f); 523 draw_wide_point_threshold(draw, 10000000.f); 524 draw_wide_point_sprites(draw, true); 525 nv30->draw = draw; 526 } 527