1 /********************************************************** 2 * Copyright 2009-2015 VMware, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person 5 * obtaining a copy of this software and associated documentation 6 * files (the "Software"), to deal in the Software without 7 * restriction, including without limitation the rights to use, copy, 8 * modify, merge, publish, distribute, sublicense, and/or sell copies 9 * of the Software, and to permit persons to whom the Software is 10 * furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be 13 * included in all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 * 24 **********************************************************/ 25 26 /** 27 * @file 28 * 29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw 30 * drm winsys. 31 * 32 * Based on svgaicd_escape.c 33 */ 34 35 36 #include "svga_cmd.h" 37 #include "util/u_memory.h" 38 #include "util/u_math.h" 39 #include "svgadump/svga_dump.h" 40 #include "state_tracker/drm_driver.h" 41 #include "vmw_screen.h" 42 #include "vmw_context.h" 43 #include "vmw_fence.h" 44 #include "xf86drm.h" 45 #include "vmwgfx_drm.h" 46 #include "svga3d_caps.h" 47 #include "svga3d_reg.h" 48 49 #include "os/os_mman.h" 50 51 #include <errno.h> 52 #include <unistd.h> 53 54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024) 55 #define VMW_FENCE_TIMEOUT_SECONDS 60 56 57 struct vmw_region 58 { 59 uint32_t handle; 60 uint64_t map_handle; 61 void *data; 62 uint32_t map_count; 63 int drm_fd; 64 uint32_t size; 65 }; 66 67 uint32_t 68 vmw_region_size(struct vmw_region *region) 69 { 70 return region->size; 71 } 72 73 uint32 74 vmw_ioctl_context_create(struct vmw_winsys_screen *vws) 75 { 76 struct drm_vmw_context_arg c_arg; 77 int ret; 78 79 VMW_FUNC; 80 81 ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT, 82 &c_arg, sizeof(c_arg)); 83 84 if (ret) 85 return -1; 86 87 vmw_printf("Context id is %d\n", c_arg.cid); 88 return c_arg.cid; 89 } 90 91 uint32 92 vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws, 93 boolean vgpu10) 94 { 95 union drm_vmw_extended_context_arg c_arg; 96 int ret; 97 98 VMW_FUNC; 99 memset(&c_arg, 0, sizeof(c_arg)); 100 c_arg.req = (vgpu10 ? drm_vmw_context_vgpu10 : drm_vmw_context_legacy); 101 ret = drmCommandWriteRead(vws->ioctl.drm_fd, 102 DRM_VMW_CREATE_EXTENDED_CONTEXT, 103 &c_arg, sizeof(c_arg)); 104 105 if (ret) 106 return -1; 107 108 vmw_printf("Context id is %d\n", c_arg.cid); 109 return c_arg.rep.cid; 110 } 111 112 void 113 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid) 114 { 115 struct drm_vmw_context_arg c_arg; 116 117 VMW_FUNC; 118 119 memset(&c_arg, 0, sizeof(c_arg)); 120 c_arg.cid = cid; 121 122 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT, 123 &c_arg, sizeof(c_arg)); 124 125 } 126 127 uint32 128 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws, 129 SVGA3dSurfaceFlags flags, 130 SVGA3dSurfaceFormat format, 131 unsigned usage, 132 SVGA3dSize size, 133 uint32_t numFaces, uint32_t numMipLevels, 134 unsigned sampleCount) 135 { 136 union drm_vmw_surface_create_arg s_arg; 137 struct drm_vmw_surface_create_req *req = &s_arg.req; 138 struct drm_vmw_surface_arg *rep = &s_arg.rep; 139 struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES* 140 DRM_VMW_MAX_MIP_LEVELS]; 141 struct drm_vmw_size *cur_size; 142 uint32_t iFace; 143 uint32_t iMipLevel; 144 int ret; 145 146 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format); 147 148 memset(&s_arg, 0, sizeof(s_arg)); 149 req->flags = (uint32_t) flags; 150 req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT); 151 req->format = (uint32_t) format; 152 req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED); 153 154 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES* 155 DRM_VMW_MAX_MIP_LEVELS); 156 cur_size = sizes; 157 for (iFace = 0; iFace < numFaces; ++iFace) { 158 SVGA3dSize mipSize = size; 159 160 req->mip_levels[iFace] = numMipLevels; 161 for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) { 162 cur_size->width = mipSize.width; 163 cur_size->height = mipSize.height; 164 cur_size->depth = mipSize.depth; 165 mipSize.width = MAX2(mipSize.width >> 1, 1); 166 mipSize.height = MAX2(mipSize.height >> 1, 1); 167 mipSize.depth = MAX2(mipSize.depth >> 1, 1); 168 cur_size++; 169 } 170 } 171 for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) { 172 req->mip_levels[iFace] = 0; 173 } 174 175 req->size_addr = (unsigned long)&sizes; 176 177 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE, 178 &s_arg, sizeof(s_arg)); 179 180 if (ret) 181 return -1; 182 183 vmw_printf("Surface id is %d\n", rep->sid); 184 185 return rep->sid; 186 } 187 188 189 uint32 190 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws, 191 SVGA3dSurfaceFlags flags, 192 SVGA3dSurfaceFormat format, 193 unsigned usage, 194 SVGA3dSize size, 195 uint32_t numFaces, 196 uint32_t numMipLevels, 197 unsigned sampleCount, 198 uint32_t buffer_handle, 199 struct vmw_region **p_region) 200 { 201 union drm_vmw_gb_surface_create_arg s_arg; 202 struct drm_vmw_gb_surface_create_req *req = &s_arg.req; 203 struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep; 204 struct vmw_region *region = NULL; 205 int ret; 206 207 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format); 208 209 if (p_region) { 210 region = CALLOC_STRUCT(vmw_region); 211 if (!region) 212 return SVGA3D_INVALID_ID; 213 } 214 215 memset(&s_arg, 0, sizeof(s_arg)); 216 req->svga3d_flags = (uint32_t) flags; 217 if (usage & SVGA_SURFACE_USAGE_SCANOUT) 218 req->drm_surface_flags |= drm_vmw_surface_flag_scanout; 219 req->format = (uint32_t) format; 220 if (usage & SVGA_SURFACE_USAGE_SHARED) 221 req->drm_surface_flags |= drm_vmw_surface_flag_shareable; 222 req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer; 223 req->base_size.width = size.width; 224 req->base_size.height = size.height; 225 req->base_size.depth = size.depth; 226 req->mip_levels = numMipLevels; 227 req->multisample_count = 0; 228 req->autogen_filter = SVGA3D_TEX_FILTER_NONE; 229 230 if (vws->base.have_vgpu10) { 231 req->array_size = numFaces; 232 req->multisample_count = sampleCount; 233 } else { 234 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES* 235 DRM_VMW_MAX_MIP_LEVELS); 236 req->array_size = 0; 237 } 238 239 if (buffer_handle) 240 req->buffer_handle = buffer_handle; 241 else 242 req->buffer_handle = SVGA3D_INVALID_ID; 243 244 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE, 245 &s_arg, sizeof(s_arg)); 246 247 if (ret) 248 goto out_fail_create; 249 250 if (p_region) { 251 region->handle = rep->buffer_handle; 252 region->map_handle = rep->buffer_map_handle; 253 region->drm_fd = vws->ioctl.drm_fd; 254 region->size = rep->backup_size; 255 *p_region = region; 256 } 257 258 vmw_printf("Surface id is %d\n", rep->sid); 259 return rep->handle; 260 261 out_fail_create: 262 FREE(region); 263 return SVGA3D_INVALID_ID; 264 } 265 266 /** 267 * vmw_ioctl_surface_req - Fill in a struct surface_req 268 * 269 * @vws: Winsys screen 270 * @whandle: Surface handle 271 * @req: The struct surface req to fill in 272 * @needs_unref: This call takes a kernel surface reference that needs to 273 * be unreferenced. 274 * 275 * Returns 0 on success, negative error type otherwise. 276 * Fills in the surface_req structure according to handle type and kernel 277 * capabilities. 278 */ 279 static int 280 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws, 281 const struct winsys_handle *whandle, 282 struct drm_vmw_surface_arg *req, 283 boolean *needs_unref) 284 { 285 int ret; 286 287 switch(whandle->type) { 288 case DRM_API_HANDLE_TYPE_SHARED: 289 case DRM_API_HANDLE_TYPE_KMS: 290 *needs_unref = FALSE; 291 req->handle_type = DRM_VMW_HANDLE_LEGACY; 292 req->sid = whandle->handle; 293 break; 294 case DRM_API_HANDLE_TYPE_FD: 295 if (!vws->ioctl.have_drm_2_6) { 296 uint32_t handle; 297 298 ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle); 299 if (ret) { 300 vmw_error("Failed to get handle from prime fd %d.\n", 301 (int) whandle->handle); 302 return -EINVAL; 303 } 304 305 *needs_unref = TRUE; 306 req->handle_type = DRM_VMW_HANDLE_LEGACY; 307 req->sid = handle; 308 } else { 309 *needs_unref = FALSE; 310 req->handle_type = DRM_VMW_HANDLE_PRIME; 311 req->sid = whandle->handle; 312 } 313 break; 314 default: 315 vmw_error("Attempt to import unsupported handle type %d.\n", 316 whandle->type); 317 return -EINVAL; 318 } 319 320 return 0; 321 } 322 323 /** 324 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and 325 * get surface information 326 * 327 * @vws: Screen to register the reference on 328 * @handle: Kernel handle of the guest-backed surface 329 * @flags: flags used when the surface was created 330 * @format: Format used when the surface was created 331 * @numMipLevels: Number of mipmap levels of the surface 332 * @p_region: On successful return points to a newly allocated 333 * struct vmw_region holding a reference to the surface backup buffer. 334 * 335 * Returns 0 on success, a system error on failure. 336 */ 337 int 338 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws, 339 const struct winsys_handle *whandle, 340 SVGA3dSurfaceFlags *flags, 341 SVGA3dSurfaceFormat *format, 342 uint32_t *numMipLevels, 343 uint32_t *handle, 344 struct vmw_region **p_region) 345 { 346 union drm_vmw_gb_surface_reference_arg s_arg; 347 struct drm_vmw_surface_arg *req = &s_arg.req; 348 struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep; 349 struct vmw_region *region = NULL; 350 boolean needs_unref = FALSE; 351 int ret; 352 353 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format); 354 355 assert(p_region != NULL); 356 region = CALLOC_STRUCT(vmw_region); 357 if (!region) 358 return -ENOMEM; 359 360 memset(&s_arg, 0, sizeof(s_arg)); 361 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref); 362 if (ret) 363 goto out_fail_req; 364 365 *handle = req->sid; 366 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF, 367 &s_arg, sizeof(s_arg)); 368 369 if (ret) 370 goto out_fail_ref; 371 372 region->handle = rep->crep.buffer_handle; 373 region->map_handle = rep->crep.buffer_map_handle; 374 region->drm_fd = vws->ioctl.drm_fd; 375 region->size = rep->crep.backup_size; 376 *p_region = region; 377 378 *handle = rep->crep.handle; 379 *flags = rep->creq.svga3d_flags; 380 *format = rep->creq.format; 381 *numMipLevels = rep->creq.mip_levels; 382 383 if (needs_unref) 384 vmw_ioctl_surface_destroy(vws, *handle); 385 386 return 0; 387 out_fail_ref: 388 if (needs_unref) 389 vmw_ioctl_surface_destroy(vws, *handle); 390 out_fail_req: 391 FREE(region); 392 return ret; 393 } 394 395 void 396 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid) 397 { 398 struct drm_vmw_surface_arg s_arg; 399 400 VMW_FUNC; 401 402 memset(&s_arg, 0, sizeof(s_arg)); 403 s_arg.sid = sid; 404 405 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE, 406 &s_arg, sizeof(s_arg)); 407 } 408 409 void 410 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid, 411 uint32_t throttle_us, void *commands, uint32_t size, 412 struct pipe_fence_handle **pfence) 413 { 414 struct drm_vmw_execbuf_arg arg; 415 struct drm_vmw_fence_rep rep; 416 int ret; 417 int argsize; 418 419 #ifdef DEBUG 420 { 421 static boolean firsttime = TRUE; 422 static boolean debug = FALSE; 423 static boolean skip = FALSE; 424 if (firsttime) { 425 debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE); 426 skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE); 427 } 428 if (debug) { 429 VMW_FUNC; 430 svga_dump_commands(commands, size); 431 } 432 firsttime = FALSE; 433 if (skip) { 434 size = 0; 435 } 436 } 437 #endif 438 439 memset(&arg, 0, sizeof(arg)); 440 memset(&rep, 0, sizeof(rep)); 441 442 rep.error = -EFAULT; 443 if (pfence) 444 arg.fence_rep = (unsigned long)&rep; 445 arg.commands = (unsigned long)commands; 446 arg.command_size = size; 447 arg.throttle_us = throttle_us; 448 arg.version = vws->ioctl.drm_execbuf_version; 449 arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID); 450 451 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with 452 * the flags field. The structure size sent to drmCommandWrite must match 453 * the drm_execbuf_version. Otherwise, an invalid value will be returned. 454 */ 455 argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) : 456 offsetof(struct drm_vmw_execbuf_arg, context_handle); 457 do { 458 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize); 459 } while(ret == -ERESTART); 460 if (ret) { 461 vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret)); 462 abort(); 463 } 464 465 if (rep.error) { 466 467 /* 468 * Kernel has already synced, or caller requested no fence. 469 */ 470 if (pfence) 471 *pfence = NULL; 472 } else { 473 if (pfence) { 474 vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno, 475 TRUE); 476 477 *pfence = vmw_fence_create(vws->fence_ops, rep.handle, 478 rep.seqno, rep.mask); 479 if (*pfence == NULL) { 480 /* 481 * Fence creation failed. Need to sync. 482 */ 483 (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask); 484 vmw_ioctl_fence_unref(vws, rep.handle); 485 } 486 } 487 } 488 } 489 490 491 struct vmw_region * 492 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size) 493 { 494 struct vmw_region *region; 495 union drm_vmw_alloc_dmabuf_arg arg; 496 struct drm_vmw_alloc_dmabuf_req *req = &arg.req; 497 struct drm_vmw_dmabuf_rep *rep = &arg.rep; 498 int ret; 499 500 vmw_printf("%s: size = %u\n", __FUNCTION__, size); 501 502 region = CALLOC_STRUCT(vmw_region); 503 if (!region) 504 goto out_err1; 505 506 memset(&arg, 0, sizeof(arg)); 507 req->size = size; 508 do { 509 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg, 510 sizeof(arg)); 511 } while (ret == -ERESTART); 512 513 if (ret) { 514 vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret)); 515 goto out_err1; 516 } 517 518 region->data = NULL; 519 region->handle = rep->handle; 520 region->map_handle = rep->map_handle; 521 region->map_count = 0; 522 region->size = size; 523 region->drm_fd = vws->ioctl.drm_fd; 524 525 vmw_printf(" gmrId = %u, offset = %u\n", 526 region->ptr.gmrId, region->ptr.offset); 527 528 return region; 529 530 out_err1: 531 FREE(region); 532 return NULL; 533 } 534 535 void 536 vmw_ioctl_region_destroy(struct vmw_region *region) 537 { 538 struct drm_vmw_unref_dmabuf_arg arg; 539 540 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__, 541 region->ptr.gmrId, region->ptr.offset); 542 543 if (region->data) { 544 os_munmap(region->data, region->size); 545 region->data = NULL; 546 } 547 548 memset(&arg, 0, sizeof(arg)); 549 arg.handle = region->handle; 550 drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg)); 551 552 FREE(region); 553 } 554 555 SVGAGuestPtr 556 vmw_ioctl_region_ptr(struct vmw_region *region) 557 { 558 SVGAGuestPtr ptr = {region->handle, 0}; 559 return ptr; 560 } 561 562 void * 563 vmw_ioctl_region_map(struct vmw_region *region) 564 { 565 void *map; 566 567 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__, 568 region->ptr.gmrId, region->ptr.offset); 569 570 if (region->data == NULL) { 571 map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED, 572 region->drm_fd, region->map_handle); 573 if (map == MAP_FAILED) { 574 vmw_error("%s: Map failed.\n", __FUNCTION__); 575 return NULL; 576 } 577 578 region->data = map; 579 } 580 581 ++region->map_count; 582 583 return region->data; 584 } 585 586 void 587 vmw_ioctl_region_unmap(struct vmw_region *region) 588 { 589 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__, 590 region->ptr.gmrId, region->ptr.offset); 591 --region->map_count; 592 } 593 594 /** 595 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage 596 * 597 * @region: Pointer to a struct vmw_region representing the buffer object. 598 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the 599 * GPU is busy with the buffer object. 600 * @readonly: Hint that the CPU access is read-only. 601 * @allow_cs: Allow concurrent command submission while the buffer is 602 * synchronized for CPU. If FALSE command submissions referencing the 603 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu. 604 * 605 * This function idles any GPU activities touching the buffer and blocks 606 * command submission of commands referencing the buffer, even from 607 * other processes. 608 */ 609 int 610 vmw_ioctl_syncforcpu(struct vmw_region *region, 611 boolean dont_block, 612 boolean readonly, 613 boolean allow_cs) 614 { 615 struct drm_vmw_synccpu_arg arg; 616 617 memset(&arg, 0, sizeof(arg)); 618 arg.op = drm_vmw_synccpu_grab; 619 arg.handle = region->handle; 620 arg.flags = drm_vmw_synccpu_read; 621 if (!readonly) 622 arg.flags |= drm_vmw_synccpu_write; 623 if (dont_block) 624 arg.flags |= drm_vmw_synccpu_dontblock; 625 if (allow_cs) 626 arg.flags |= drm_vmw_synccpu_allow_cs; 627 628 return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg)); 629 } 630 631 /** 632 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu. 633 * 634 * @region: Pointer to a struct vmw_region representing the buffer object. 635 * @readonly: Should hold the same value as the matching syncforcpu call. 636 * @allow_cs: Should hold the same value as the matching syncforcpu call. 637 */ 638 void 639 vmw_ioctl_releasefromcpu(struct vmw_region *region, 640 boolean readonly, 641 boolean allow_cs) 642 { 643 struct drm_vmw_synccpu_arg arg; 644 645 memset(&arg, 0, sizeof(arg)); 646 arg.op = drm_vmw_synccpu_release; 647 arg.handle = region->handle; 648 arg.flags = drm_vmw_synccpu_read; 649 if (!readonly) 650 arg.flags |= drm_vmw_synccpu_write; 651 if (allow_cs) 652 arg.flags |= drm_vmw_synccpu_allow_cs; 653 654 (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg)); 655 } 656 657 void 658 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws, 659 uint32_t handle) 660 { 661 struct drm_vmw_fence_arg arg; 662 int ret; 663 664 memset(&arg, 0, sizeof(arg)); 665 arg.handle = handle; 666 667 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF, 668 &arg, sizeof(arg)); 669 if (ret != 0) 670 vmw_error("%s Failed\n", __FUNCTION__); 671 } 672 673 static inline uint32_t 674 vmw_drm_fence_flags(uint32_t flags) 675 { 676 uint32_t dflags = 0; 677 678 if (flags & SVGA_FENCE_FLAG_EXEC) 679 dflags |= DRM_VMW_FENCE_FLAG_EXEC; 680 if (flags & SVGA_FENCE_FLAG_QUERY) 681 dflags |= DRM_VMW_FENCE_FLAG_QUERY; 682 683 return dflags; 684 } 685 686 687 int 688 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws, 689 uint32_t handle, 690 uint32_t flags) 691 { 692 struct drm_vmw_fence_signaled_arg arg; 693 uint32_t vflags = vmw_drm_fence_flags(flags); 694 int ret; 695 696 memset(&arg, 0, sizeof(arg)); 697 arg.handle = handle; 698 arg.flags = vflags; 699 700 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED, 701 &arg, sizeof(arg)); 702 703 if (ret != 0) 704 return ret; 705 706 vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, FALSE); 707 708 return (arg.signaled) ? 0 : -1; 709 } 710 711 712 713 int 714 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws, 715 uint32_t handle, 716 uint32_t flags) 717 { 718 struct drm_vmw_fence_wait_arg arg; 719 uint32_t vflags = vmw_drm_fence_flags(flags); 720 int ret; 721 722 memset(&arg, 0, sizeof(arg)); 723 724 arg.handle = handle; 725 arg.timeout_us = VMW_FENCE_TIMEOUT_SECONDS*1000000; 726 arg.lazy = 0; 727 arg.flags = vflags; 728 729 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT, 730 &arg, sizeof(arg)); 731 732 if (ret != 0) 733 vmw_error("%s Failed\n", __FUNCTION__); 734 735 return 0; 736 } 737 738 uint32 739 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws, 740 SVGA3dShaderType type, 741 uint32 code_len) 742 { 743 struct drm_vmw_shader_create_arg sh_arg; 744 int ret; 745 746 VMW_FUNC; 747 748 memset(&sh_arg, 0, sizeof(sh_arg)); 749 750 sh_arg.size = code_len; 751 sh_arg.buffer_handle = SVGA3D_INVALID_ID; 752 sh_arg.shader_handle = SVGA3D_INVALID_ID; 753 switch (type) { 754 case SVGA3D_SHADERTYPE_VS: 755 sh_arg.shader_type = drm_vmw_shader_type_vs; 756 break; 757 case SVGA3D_SHADERTYPE_PS: 758 sh_arg.shader_type = drm_vmw_shader_type_ps; 759 break; 760 default: 761 assert(!"Invalid shader type."); 762 break; 763 } 764 765 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER, 766 &sh_arg, sizeof(sh_arg)); 767 768 if (ret) 769 return SVGA3D_INVALID_ID; 770 771 return sh_arg.shader_handle; 772 } 773 774 void 775 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid) 776 { 777 struct drm_vmw_shader_arg sh_arg; 778 779 VMW_FUNC; 780 781 memset(&sh_arg, 0, sizeof(sh_arg)); 782 sh_arg.handle = shid; 783 784 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER, 785 &sh_arg, sizeof(sh_arg)); 786 787 } 788 789 static int 790 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws, 791 const uint32_t *cap_buffer) 792 { 793 int i; 794 795 if (vws->base.have_gb_objects) { 796 for (i = 0; i < vws->ioctl.num_cap_3d; ++i) { 797 vws->ioctl.cap_3d[i].has_cap = TRUE; 798 vws->ioctl.cap_3d[i].result.u = cap_buffer[i]; 799 } 800 return 0; 801 } else { 802 const uint32 *capsBlock; 803 const SVGA3dCapsRecord *capsRecord = NULL; 804 uint32 offset; 805 const SVGA3dCapPair *capArray; 806 int numCaps, index; 807 808 /* 809 * Search linearly through the caps block records for the specified type. 810 */ 811 capsBlock = cap_buffer; 812 for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) { 813 const SVGA3dCapsRecord *record; 814 assert(offset < SVGA_FIFO_3D_CAPS_SIZE); 815 record = (const SVGA3dCapsRecord *) (capsBlock + offset); 816 if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) && 817 (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) && 818 (!capsRecord || (record->header.type > capsRecord->header.type))) { 819 capsRecord = record; 820 } 821 } 822 823 if(!capsRecord) 824 return -1; 825 826 /* 827 * Calculate the number of caps from the size of the record. 828 */ 829 capArray = (const SVGA3dCapPair *) capsRecord->data; 830 numCaps = (int) ((capsRecord->header.length * sizeof(uint32) - 831 sizeof capsRecord->header) / (2 * sizeof(uint32))); 832 833 for (i = 0; i < numCaps; i++) { 834 index = capArray[i][0]; 835 if (index < vws->ioctl.num_cap_3d) { 836 vws->ioctl.cap_3d[index].has_cap = TRUE; 837 vws->ioctl.cap_3d[index].result.u = capArray[i][1]; 838 } else { 839 debug_printf("Unknown devcaps seen: %d\n", index); 840 } 841 } 842 } 843 return 0; 844 } 845 846 boolean 847 vmw_ioctl_init(struct vmw_winsys_screen *vws) 848 { 849 struct drm_vmw_getparam_arg gp_arg; 850 struct drm_vmw_get_3d_cap_arg cap_arg; 851 unsigned int size; 852 int ret; 853 uint32_t *cap_buffer; 854 drmVersionPtr version; 855 boolean drm_gb_capable; 856 boolean have_drm_2_5; 857 858 VMW_FUNC; 859 860 version = drmGetVersion(vws->ioctl.drm_fd); 861 if (!version) 862 goto out_no_version; 863 864 have_drm_2_5 = version->version_major > 2 || 865 (version->version_major == 2 && version->version_minor > 4); 866 vws->ioctl.have_drm_2_6 = version->version_major > 2 || 867 (version->version_major == 2 && version->version_minor > 5); 868 vws->ioctl.have_drm_2_9 = version->version_major > 2 || 869 (version->version_major == 2 && version->version_minor > 8); 870 871 vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1; 872 873 drm_gb_capable = have_drm_2_5; 874 875 memset(&gp_arg, 0, sizeof(gp_arg)); 876 gp_arg.param = DRM_VMW_PARAM_3D; 877 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 878 &gp_arg, sizeof(gp_arg)); 879 if (ret || gp_arg.value == 0) { 880 vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret)); 881 goto out_no_3d; 882 } 883 884 memset(&gp_arg, 0, sizeof(gp_arg)); 885 gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION; 886 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 887 &gp_arg, sizeof(gp_arg)); 888 if (ret) { 889 vmw_error("Failed to get fifo hw version (%i, %s).\n", 890 ret, strerror(-ret)); 891 goto out_no_3d; 892 } 893 vws->ioctl.hwversion = gp_arg.value; 894 895 memset(&gp_arg, 0, sizeof(gp_arg)); 896 gp_arg.param = DRM_VMW_PARAM_HW_CAPS; 897 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 898 &gp_arg, sizeof(gp_arg)); 899 if (ret) 900 vws->base.have_gb_objects = FALSE; 901 else 902 vws->base.have_gb_objects = 903 !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS); 904 905 if (vws->base.have_gb_objects && !drm_gb_capable) 906 goto out_no_3d; 907 908 vws->base.have_vgpu10 = FALSE; 909 if (vws->base.have_gb_objects) { 910 memset(&gp_arg, 0, sizeof(gp_arg)); 911 gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE; 912 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 913 &gp_arg, sizeof(gp_arg)); 914 if (ret) 915 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); 916 else 917 size = gp_arg.value; 918 919 if (vws->base.have_gb_objects) 920 vws->ioctl.num_cap_3d = size / sizeof(uint32_t); 921 else 922 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; 923 924 925 memset(&gp_arg, 0, sizeof(gp_arg)); 926 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY; 927 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 928 &gp_arg, sizeof(gp_arg)); 929 if (ret) { 930 /* Just guess a large enough value. */ 931 vws->ioctl.max_mob_memory = 256*1024*1024; 932 } else { 933 vws->ioctl.max_mob_memory = gp_arg.value; 934 } 935 936 memset(&gp_arg, 0, sizeof(gp_arg)); 937 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE; 938 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 939 &gp_arg, sizeof(gp_arg)); 940 941 if (ret || gp_arg.value == 0) { 942 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; 943 } else { 944 vws->ioctl.max_texture_size = gp_arg.value; 945 } 946 947 /* Never early flush surfaces, mobs do accounting. */ 948 vws->ioctl.max_surface_memory = -1; 949 950 if (vws->ioctl.have_drm_2_9) { 951 952 memset(&gp_arg, 0, sizeof(gp_arg)); 953 gp_arg.param = DRM_VMW_PARAM_VGPU10; 954 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 955 &gp_arg, sizeof(gp_arg)); 956 if (ret == 0 && gp_arg.value != 0) { 957 const char *vgpu10_val; 958 959 debug_printf("Have VGPU10 interface and hardware.\n"); 960 vws->base.have_vgpu10 = TRUE; 961 vgpu10_val = getenv("SVGA_VGPU10"); 962 if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) { 963 debug_printf("Disabling VGPU10 interface.\n"); 964 vws->base.have_vgpu10 = FALSE; 965 } else { 966 debug_printf("Enabling VGPU10 interface.\n"); 967 } 968 } 969 } 970 } else { 971 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; 972 973 memset(&gp_arg, 0, sizeof(gp_arg)); 974 gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY; 975 if (have_drm_2_5) 976 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, 977 &gp_arg, sizeof(gp_arg)); 978 if (!have_drm_2_5 || ret) { 979 /* Just guess a large enough value, around 800mb. */ 980 vws->ioctl.max_surface_memory = 0x30000000; 981 } else { 982 vws->ioctl.max_surface_memory = gp_arg.value; 983 } 984 985 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; 986 987 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); 988 } 989 990 debug_printf("VGPU10 interface is %s.\n", 991 vws->base.have_vgpu10 ? "on" : "off"); 992 993 cap_buffer = calloc(1, size); 994 if (!cap_buffer) { 995 debug_printf("Failed alloc fifo 3D caps buffer.\n"); 996 goto out_no_3d; 997 } 998 999 vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d, 1000 sizeof(*vws->ioctl.cap_3d)); 1001 if (!vws->ioctl.cap_3d) { 1002 debug_printf("Failed alloc fifo 3D caps buffer.\n"); 1003 goto out_no_caparray; 1004 } 1005 1006 memset(&cap_arg, 0, sizeof(cap_arg)); 1007 cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer); 1008 cap_arg.max_size = size; 1009 1010 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP, 1011 &cap_arg, sizeof(cap_arg)); 1012 1013 if (ret) { 1014 debug_printf("Failed to get 3D capabilities" 1015 " (%i, %s).\n", ret, strerror(-ret)); 1016 goto out_no_caps; 1017 } 1018 1019 ret = vmw_ioctl_parse_caps(vws, cap_buffer); 1020 if (ret) { 1021 debug_printf("Failed to parse 3D capabilities" 1022 " (%i, %s).\n", ret, strerror(-ret)); 1023 goto out_no_caps; 1024 } 1025 1026 if (((version->version_major == 2 && version->version_minor >= 10) 1027 || version->version_major > 2) && vws->base.have_vgpu10) { 1028 1029 /* support for these commands didn't make it into vmwgfx kernel 1030 * modules before 2.10. 1031 */ 1032 vws->base.have_generate_mipmap_cmd = TRUE; 1033 vws->base.have_set_predication_cmd = TRUE; 1034 } 1035 1036 free(cap_buffer); 1037 drmFreeVersion(version); 1038 vmw_printf("%s OK\n", __FUNCTION__); 1039 return TRUE; 1040 out_no_caps: 1041 free(vws->ioctl.cap_3d); 1042 out_no_caparray: 1043 free(cap_buffer); 1044 out_no_3d: 1045 drmFreeVersion(version); 1046 out_no_version: 1047 vws->ioctl.num_cap_3d = 0; 1048 debug_printf("%s Failed\n", __FUNCTION__); 1049 return FALSE; 1050 } 1051 1052 1053 1054 void 1055 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws) 1056 { 1057 VMW_FUNC; 1058 } 1059