1 2 #include "util/u_format.h" 3 4 #include "nvc0/nvc0_context.h" 5 6 struct nvc0_transfer { 7 struct pipe_transfer base; 8 struct nv50_m2mf_rect rect[2]; 9 uint32_t nblocksx; 10 uint16_t nblocksy; 11 uint16_t nlayers; 12 }; 13 14 static void 15 nvc0_m2mf_transfer_rect(struct nvc0_context *nvc0, 16 const struct nv50_m2mf_rect *dst, 17 const struct nv50_m2mf_rect *src, 18 uint32_t nblocksx, uint32_t nblocksy) 19 { 20 struct nouveau_pushbuf *push = nvc0->base.pushbuf; 21 struct nouveau_bufctx *bctx = nvc0->bufctx; 22 const int cpp = dst->cpp; 23 uint32_t src_ofst = src->base; 24 uint32_t dst_ofst = dst->base; 25 uint32_t height = nblocksy; 26 uint32_t sy = src->y; 27 uint32_t dy = dst->y; 28 uint32_t exec = (1 << 20); 29 30 assert(dst->cpp == src->cpp); 31 32 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD); 33 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR); 34 nouveau_pushbuf_bufctx(push, bctx); 35 nouveau_pushbuf_validate(push); 36 37 if (nouveau_bo_memtype(src->bo)) { 38 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_IN), 5); 39 PUSH_DATA (push, src->tile_mode); 40 PUSH_DATA (push, src->width * cpp); 41 PUSH_DATA (push, src->height); 42 PUSH_DATA (push, src->depth); 43 PUSH_DATA (push, src->z); 44 } else { 45 src_ofst += src->y * src->pitch + src->x * cpp; 46 47 BEGIN_NVC0(push, NVC0_M2MF(PITCH_IN), 1); 48 PUSH_DATA (push, src->width * cpp); 49 50 exec |= NVC0_M2MF_EXEC_LINEAR_IN; 51 } 52 53 if (nouveau_bo_memtype(dst->bo)) { 54 BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_OUT), 5); 55 PUSH_DATA (push, dst->tile_mode); 56 PUSH_DATA (push, dst->width * cpp); 57 PUSH_DATA (push, dst->height); 58 PUSH_DATA (push, dst->depth); 59 PUSH_DATA (push, dst->z); 60 } else { 61 dst_ofst += dst->y * dst->pitch + dst->x * cpp; 62 63 BEGIN_NVC0(push, NVC0_M2MF(PITCH_OUT), 1); 64 PUSH_DATA (push, dst->width * cpp); 65 66 exec |= NVC0_M2MF_EXEC_LINEAR_OUT; 67 } 68 69 while (height) { 70 int line_count = height > 2047 ? 2047 : height; 71 72 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2); 73 PUSH_DATAh(push, src->bo->offset + src_ofst); 74 PUSH_DATA (push, src->bo->offset + src_ofst); 75 76 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2); 77 PUSH_DATAh(push, dst->bo->offset + dst_ofst); 78 PUSH_DATA (push, dst->bo->offset + dst_ofst); 79 80 if (!(exec & NVC0_M2MF_EXEC_LINEAR_IN)) { 81 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_IN_X), 2); 82 PUSH_DATA (push, src->x * cpp); 83 PUSH_DATA (push, sy); 84 } else { 85 src_ofst += line_count * src->pitch; 86 } 87 if (!(exec & NVC0_M2MF_EXEC_LINEAR_OUT)) { 88 BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_OUT_X), 2); 89 PUSH_DATA (push, dst->x * cpp); 90 PUSH_DATA (push, dy); 91 } else { 92 dst_ofst += line_count * dst->pitch; 93 } 94 95 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2); 96 PUSH_DATA (push, nblocksx * cpp); 97 PUSH_DATA (push, line_count); 98 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1); 99 PUSH_DATA (push, exec); 100 101 height -= line_count; 102 sy += line_count; 103 dy += line_count; 104 } 105 106 nouveau_bufctx_reset(bctx, 0); 107 } 108 109 static void 110 nve4_m2mf_transfer_rect(struct nvc0_context *nvc0, 111 const struct nv50_m2mf_rect *dst, 112 const struct nv50_m2mf_rect *src, 113 uint32_t nblocksx, uint32_t nblocksy) 114 { 115 static const struct { 116 int cs; 117 int nc; 118 } cpbs[] = { 119 [ 1] = { 1, 1 }, 120 [ 2] = { 1, 2 }, 121 [ 3] = { 1, 3 }, 122 [ 4] = { 1, 4 }, 123 [ 6] = { 2, 3 }, 124 [ 8] = { 2, 4 }, 125 [ 9] = { 3, 3 }, 126 [12] = { 3, 4 }, 127 [16] = { 4, 4 }, 128 }; 129 struct nouveau_pushbuf *push = nvc0->base.pushbuf; 130 struct nouveau_bufctx *bctx = nvc0->bufctx; 131 uint32_t exec; 132 uint32_t src_base = src->base; 133 uint32_t dst_base = dst->base; 134 135 assert(dst->cpp < ARRAY_SIZE(cpbs) && cpbs[dst->cpp].cs); 136 assert(dst->cpp == src->cpp); 137 138 nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR); 139 nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD); 140 nouveau_pushbuf_bufctx(push, bctx); 141 nouveau_pushbuf_validate(push); 142 143 exec = 0x400 /* REMAP_ENABLE */ | 0x200 /* 2D_ENABLE */ | 0x6 /* UNK */; 144 145 BEGIN_NVC0(push, SUBC_COPY(0x0708), 1); 146 PUSH_DATA (push, (cpbs[dst->cpp].nc - 1) << 24 | 147 (cpbs[src->cpp].nc - 1) << 20 | 148 (cpbs[src->cpp].cs - 1) << 16 | 149 3 << 12 /* DST_W = SRC_W */ | 150 2 << 8 /* DST_Z = SRC_Z */ | 151 1 << 4 /* DST_Y = SRC_Y */ | 152 0 << 0 /* DST_X = SRC_X */); 153 154 if (nouveau_bo_memtype(dst->bo)) { 155 BEGIN_NVC0(push, SUBC_COPY(0x070c), 6); 156 PUSH_DATA (push, 0x1000 | dst->tile_mode); 157 PUSH_DATA (push, dst->width); 158 PUSH_DATA (push, dst->height); 159 PUSH_DATA (push, dst->depth); 160 PUSH_DATA (push, dst->z); 161 PUSH_DATA (push, (dst->y << 16) | dst->x); 162 } else { 163 assert(!dst->z); 164 dst_base += dst->y * dst->pitch + dst->x * dst->cpp; 165 exec |= 0x100; /* DST_MODE_2D_LINEAR */ 166 } 167 168 if (nouveau_bo_memtype(src->bo)) { 169 BEGIN_NVC0(push, SUBC_COPY(0x0728), 6); 170 PUSH_DATA (push, 0x1000 | src->tile_mode); 171 PUSH_DATA (push, src->width); 172 PUSH_DATA (push, src->height); 173 PUSH_DATA (push, src->depth); 174 PUSH_DATA (push, src->z); 175 PUSH_DATA (push, (src->y << 16) | src->x); 176 } else { 177 assert(!src->z); 178 src_base += src->y * src->pitch + src->x * src->cpp; 179 exec |= 0x080; /* SRC_MODE_2D_LINEAR */ 180 } 181 182 BEGIN_NVC0(push, SUBC_COPY(0x0400), 8); 183 PUSH_DATAh(push, src->bo->offset + src_base); 184 PUSH_DATA (push, src->bo->offset + src_base); 185 PUSH_DATAh(push, dst->bo->offset + dst_base); 186 PUSH_DATA (push, dst->bo->offset + dst_base); 187 PUSH_DATA (push, src->pitch); 188 PUSH_DATA (push, dst->pitch); 189 PUSH_DATA (push, nblocksx); 190 PUSH_DATA (push, nblocksy); 191 192 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1); 193 PUSH_DATA (push, exec); 194 195 nouveau_bufctx_reset(bctx, 0); 196 } 197 198 void 199 nvc0_m2mf_push_linear(struct nouveau_context *nv, 200 struct nouveau_bo *dst, unsigned offset, unsigned domain, 201 unsigned size, const void *data) 202 { 203 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); 204 struct nouveau_pushbuf *push = nv->pushbuf; 205 uint32_t *src = (uint32_t *)data; 206 unsigned count = (size + 3) / 4; 207 208 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR); 209 nouveau_pushbuf_bufctx(push, nvc0->bufctx); 210 nouveau_pushbuf_validate(push); 211 212 while (count) { 213 unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN); 214 215 if (!PUSH_SPACE(push, nr + 9)) 216 break; 217 218 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2); 219 PUSH_DATAh(push, dst->offset + offset); 220 PUSH_DATA (push, dst->offset + offset); 221 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2); 222 PUSH_DATA (push, MIN2(size, nr * 4)); 223 PUSH_DATA (push, 1); 224 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1); 225 PUSH_DATA (push, 0x100111); 226 227 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */ 228 BEGIN_NIC0(push, NVC0_M2MF(DATA), nr); 229 PUSH_DATAp(push, src, nr); 230 231 count -= nr; 232 src += nr; 233 offset += nr * 4; 234 size -= nr * 4; 235 } 236 237 nouveau_bufctx_reset(nvc0->bufctx, 0); 238 } 239 240 void 241 nve4_p2mf_push_linear(struct nouveau_context *nv, 242 struct nouveau_bo *dst, unsigned offset, unsigned domain, 243 unsigned size, const void *data) 244 { 245 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); 246 struct nouveau_pushbuf *push = nv->pushbuf; 247 uint32_t *src = (uint32_t *)data; 248 unsigned count = (size + 3) / 4; 249 250 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR); 251 nouveau_pushbuf_bufctx(push, nvc0->bufctx); 252 nouveau_pushbuf_validate(push); 253 254 while (count) { 255 unsigned nr = MIN2(count, (NV04_PFIFO_MAX_PACKET_LEN - 1)); 256 257 if (!PUSH_SPACE(push, nr + 10)) 258 break; 259 260 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2); 261 PUSH_DATAh(push, dst->offset + offset); 262 PUSH_DATA (push, dst->offset + offset); 263 BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2); 264 PUSH_DATA (push, MIN2(size, nr * 4)); 265 PUSH_DATA (push, 1); 266 /* must not be interrupted (trap on QUERY fence, 0x50 works however) */ 267 BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), nr + 1); 268 PUSH_DATA (push, 0x1001); 269 PUSH_DATAp(push, src, nr); 270 271 count -= nr; 272 src += nr; 273 offset += nr * 4; 274 size -= nr * 4; 275 } 276 277 nouveau_bufctx_reset(nvc0->bufctx, 0); 278 } 279 280 static void 281 nvc0_m2mf_copy_linear(struct nouveau_context *nv, 282 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom, 283 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom, 284 unsigned size) 285 { 286 struct nouveau_pushbuf *push = nv->pushbuf; 287 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx; 288 289 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD); 290 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR); 291 nouveau_pushbuf_bufctx(push, bctx); 292 nouveau_pushbuf_validate(push); 293 294 while (size) { 295 unsigned bytes = MIN2(size, 1 << 17); 296 297 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2); 298 PUSH_DATAh(push, dst->offset + dstoff); 299 PUSH_DATA (push, dst->offset + dstoff); 300 BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2); 301 PUSH_DATAh(push, src->offset + srcoff); 302 PUSH_DATA (push, src->offset + srcoff); 303 BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2); 304 PUSH_DATA (push, bytes); 305 PUSH_DATA (push, 1); 306 BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1); 307 PUSH_DATA (push, NVC0_M2MF_EXEC_QUERY_SHORT | 308 NVC0_M2MF_EXEC_LINEAR_IN | NVC0_M2MF_EXEC_LINEAR_OUT); 309 310 srcoff += bytes; 311 dstoff += bytes; 312 size -= bytes; 313 } 314 315 nouveau_bufctx_reset(bctx, 0); 316 } 317 318 static void 319 nve4_m2mf_copy_linear(struct nouveau_context *nv, 320 struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom, 321 struct nouveau_bo *src, unsigned srcoff, unsigned srcdom, 322 unsigned size) 323 { 324 struct nouveau_pushbuf *push = nv->pushbuf; 325 struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx; 326 327 nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD); 328 nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR); 329 nouveau_pushbuf_bufctx(push, bctx); 330 nouveau_pushbuf_validate(push); 331 332 BEGIN_NVC0(push, SUBC_COPY(0x0400), 4); 333 PUSH_DATAh(push, src->offset + srcoff); 334 PUSH_DATA (push, src->offset + srcoff); 335 PUSH_DATAh(push, dst->offset + dstoff); 336 PUSH_DATA (push, dst->offset + dstoff); 337 BEGIN_NVC0(push, SUBC_COPY(0x0418), 1); 338 PUSH_DATA (push, size); 339 BEGIN_NVC0(push, SUBC_COPY(0x0300), 1); 340 PUSH_DATA (push, 0x186); 341 342 nouveau_bufctx_reset(bctx, 0); 343 } 344 345 346 static inline bool 347 nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt) 348 { 349 if (mt->base.domain == NOUVEAU_BO_VRAM) 350 return false; 351 if (mt->base.base.usage != PIPE_USAGE_STAGING) 352 return false; 353 return !nouveau_bo_memtype(mt->base.bo); 354 } 355 356 static inline bool 357 nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage) 358 { 359 if (!mt->base.mm) { 360 uint32_t access = (usage & PIPE_TRANSFER_WRITE) ? 361 NOUVEAU_BO_WR : NOUVEAU_BO_RD; 362 return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client); 363 } 364 if (usage & PIPE_TRANSFER_WRITE) 365 return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug); 366 return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug); 367 } 368 369 void * 370 nvc0_miptree_transfer_map(struct pipe_context *pctx, 371 struct pipe_resource *res, 372 unsigned level, 373 unsigned usage, 374 const struct pipe_box *box, 375 struct pipe_transfer **ptransfer) 376 { 377 struct nvc0_context *nvc0 = nvc0_context(pctx); 378 struct nouveau_device *dev = nvc0->screen->base.device; 379 struct nv50_miptree *mt = nv50_miptree(res); 380 struct nvc0_transfer *tx; 381 uint32_t size; 382 int ret; 383 unsigned flags = 0; 384 385 if (nvc0_mt_transfer_can_map_directly(mt)) { 386 ret = !nvc0_mt_sync(nvc0, mt, usage); 387 if (!ret) 388 ret = nouveau_bo_map(mt->base.bo, 0, NULL); 389 if (ret && 390 (usage & PIPE_TRANSFER_MAP_DIRECTLY)) 391 return NULL; 392 if (!ret) 393 usage |= PIPE_TRANSFER_MAP_DIRECTLY; 394 } else 395 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) 396 return NULL; 397 398 tx = CALLOC_STRUCT(nvc0_transfer); 399 if (!tx) 400 return NULL; 401 402 pipe_resource_reference(&tx->base.resource, res); 403 404 tx->base.level = level; 405 tx->base.usage = usage; 406 tx->base.box = *box; 407 408 if (util_format_is_plain(res->format)) { 409 tx->nblocksx = box->width << mt->ms_x; 410 tx->nblocksy = box->height << mt->ms_y; 411 } else { 412 tx->nblocksx = util_format_get_nblocksx(res->format, box->width); 413 tx->nblocksy = util_format_get_nblocksy(res->format, box->height); 414 } 415 tx->nlayers = box->depth; 416 417 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) { 418 tx->base.stride = mt->level[level].pitch; 419 tx->base.layer_stride = mt->layer_stride; 420 uint32_t offset = box->y * tx->base.stride + 421 util_format_get_stride(res->format, box->x); 422 if (!mt->layout_3d) 423 offset += mt->layer_stride * box->z; 424 else 425 offset += nvc0_mt_zslice_offset(mt, level, box->z); 426 *ptransfer = &tx->base; 427 return mt->base.bo->map + mt->base.offset + offset; 428 } 429 430 tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format); 431 tx->base.layer_stride = tx->nblocksy * tx->base.stride; 432 433 nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z); 434 435 size = tx->base.layer_stride; 436 437 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 438 size * tx->nlayers, NULL, &tx->rect[1].bo); 439 if (ret) { 440 pipe_resource_reference(&tx->base.resource, NULL); 441 FREE(tx); 442 return NULL; 443 } 444 445 tx->rect[1].cpp = tx->rect[0].cpp; 446 tx->rect[1].width = tx->nblocksx; 447 tx->rect[1].height = tx->nblocksy; 448 tx->rect[1].depth = 1; 449 tx->rect[1].pitch = tx->base.stride; 450 tx->rect[1].domain = NOUVEAU_BO_GART; 451 452 if (usage & PIPE_TRANSFER_READ) { 453 unsigned base = tx->rect[0].base; 454 unsigned z = tx->rect[0].z; 455 unsigned i; 456 for (i = 0; i < tx->nlayers; ++i) { 457 nvc0->m2mf_copy_rect(nvc0, &tx->rect[1], &tx->rect[0], 458 tx->nblocksx, tx->nblocksy); 459 if (mt->layout_3d) 460 tx->rect[0].z++; 461 else 462 tx->rect[0].base += mt->layer_stride; 463 tx->rect[1].base += size; 464 } 465 tx->rect[0].z = z; 466 tx->rect[0].base = base; 467 tx->rect[1].base = 0; 468 } 469 470 if (tx->rect[1].bo->map) { 471 *ptransfer = &tx->base; 472 return tx->rect[1].bo->map; 473 } 474 475 if (usage & PIPE_TRANSFER_READ) 476 flags = NOUVEAU_BO_RD; 477 if (usage & PIPE_TRANSFER_WRITE) 478 flags |= NOUVEAU_BO_WR; 479 480 ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client); 481 if (ret) { 482 pipe_resource_reference(&tx->base.resource, NULL); 483 nouveau_bo_ref(NULL, &tx->rect[1].bo); 484 FREE(tx); 485 return NULL; 486 } 487 488 *ptransfer = &tx->base; 489 return tx->rect[1].bo->map; 490 } 491 492 void 493 nvc0_miptree_transfer_unmap(struct pipe_context *pctx, 494 struct pipe_transfer *transfer) 495 { 496 struct nvc0_context *nvc0 = nvc0_context(pctx); 497 struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer; 498 struct nv50_miptree *mt = nv50_miptree(tx->base.resource); 499 unsigned i; 500 501 if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) { 502 pipe_resource_reference(&transfer->resource, NULL); 503 504 FREE(tx); 505 return; 506 } 507 508 if (tx->base.usage & PIPE_TRANSFER_WRITE) { 509 for (i = 0; i < tx->nlayers; ++i) { 510 nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1], 511 tx->nblocksx, tx->nblocksy); 512 if (mt->layout_3d) 513 tx->rect[0].z++; 514 else 515 tx->rect[0].base += mt->layer_stride; 516 tx->rect[1].base += tx->nblocksy * tx->base.stride; 517 } 518 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1); 519 520 /* Allow the copies above to finish executing before freeing the source */ 521 nouveau_fence_work(nvc0->screen->base.fence.current, 522 nouveau_fence_unref_bo, tx->rect[1].bo); 523 } else { 524 nouveau_bo_ref(NULL, &tx->rect[1].bo); 525 } 526 if (tx->base.usage & PIPE_TRANSFER_READ) 527 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1); 528 529 pipe_resource_reference(&transfer->resource, NULL); 530 531 FREE(tx); 532 } 533 534 /* This happens rather often with DTD9/st. */ 535 static void 536 nvc0_cb_push(struct nouveau_context *nv, 537 struct nv04_resource *res, 538 unsigned offset, unsigned words, const uint32_t *data) 539 { 540 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); 541 struct nvc0_constbuf *cb = NULL; 542 int s; 543 544 /* Go through all the constbuf binding points of this buffer and try to 545 * find one which contains the region to be updated. 546 */ 547 for (s = 0; s < 6 && !cb; s++) { 548 uint16_t bindings = res->cb_bindings[s]; 549 while (bindings) { 550 int i = ffs(bindings) - 1; 551 uint32_t cb_offset = nvc0->constbuf[s][i].offset; 552 553 bindings &= ~(1 << i); 554 if (cb_offset <= offset && 555 cb_offset + nvc0->constbuf[s][i].size >= offset + words * 4) { 556 cb = &nvc0->constbuf[s][i]; 557 break; 558 } 559 } 560 } 561 562 if (cb) { 563 nvc0_cb_bo_push(nv, res->bo, res->domain, 564 res->offset + cb->offset, cb->size, 565 offset - cb->offset, words, data); 566 } else { 567 nv->push_data(nv, res->bo, res->offset + offset, res->domain, 568 words * 4, data); 569 } 570 } 571 572 void 573 nvc0_cb_bo_push(struct nouveau_context *nv, 574 struct nouveau_bo *bo, unsigned domain, 575 unsigned base, unsigned size, 576 unsigned offset, unsigned words, const uint32_t *data) 577 { 578 struct nouveau_pushbuf *push = nv->pushbuf; 579 580 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1); 581 NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4); 582 583 assert(!(offset & 3)); 584 size = align(size, 0x100); 585 586 assert(offset < size); 587 assert(offset + words * 4 <= size); 588 589 BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3); 590 PUSH_DATA (push, size); 591 PUSH_DATAh(push, bo->offset + base); 592 PUSH_DATA (push, bo->offset + base); 593 594 while (words) { 595 unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN - 1); 596 597 PUSH_SPACE(push, nr + 2); 598 PUSH_REFN (push, bo, NOUVEAU_BO_WR | domain); 599 BEGIN_1IC0(push, NVC0_3D(CB_POS), nr + 1); 600 PUSH_DATA (push, offset); 601 PUSH_DATAp(push, data, nr); 602 603 words -= nr; 604 data += nr; 605 offset += nr * 4; 606 } 607 } 608 609 void 610 nvc0_init_transfer_functions(struct nvc0_context *nvc0) 611 { 612 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) { 613 nvc0->m2mf_copy_rect = nve4_m2mf_transfer_rect; 614 nvc0->base.copy_data = nve4_m2mf_copy_linear; 615 nvc0->base.push_data = nve4_p2mf_push_linear; 616 } else { 617 nvc0->m2mf_copy_rect = nvc0_m2mf_transfer_rect; 618 nvc0->base.copy_data = nvc0_m2mf_copy_linear; 619 nvc0->base.push_data = nvc0_m2mf_push_linear; 620 } 621 nvc0->base.push_cb = nvc0_cb_push; 622 } 623