1 /* 2 * Copyright 2014 Broadcom 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 /** 25 * @file vc4_simulator.c 26 * 27 * Implements VC4 simulation on top of a non-VC4 GEM fd. 28 * 29 * This file's goal is to emulate the VC4 ioctls' behavior in the kernel on 30 * top of the simpenrose software simulator. Generally, VC4 driver BOs have a 31 * GEM-side copy of their contents and a simulator-side memory area that the 32 * GEM contents get copied into during simulation. Once simulation is done, 33 * the simulator's data is copied back out to the GEM BOs, so that rendering 34 * appears on the screen as if actual hardware rendering had been done. 35 * 36 * One of the limitations of this code is that we shouldn't really need a 37 * GEM-side BO for non-window-system BOs. However, do we need unique BO 38 * handles for each of our GEM bos so that this file can look up its state 39 * from the handle passed in at submit ioctl time (also, a couple of places 40 * outside of this file still call ioctls directly on the fd). 41 * 42 * Another limitation is that BO import doesn't work unless the underlying 43 * window system's BO size matches what VC4 is going to use, which of course 44 * doesn't work out in practice. This means that for now, only DRI3 (VC4 45 * makes the winsys BOs) is supported, not DRI2 (window system makes the winys 46 * BOs). 47 */ 48 49 #ifdef USE_VC4_SIMULATOR 50 51 #include <sys/mman.h> 52 #include "xf86drm.h" 53 #include "util/u_memory.h" 54 #include "util/u_mm.h" 55 #include "util/ralloc.h" 56 57 #include "vc4_screen.h" 58 #include "vc4_context.h" 59 #include "kernel/vc4_drv.h" 60 #include "vc4_simulator_validate.h" 61 #include "simpenrose/simpenrose.h" 62 63 /** Global (across GEM fds) state for the simulator */ 64 static struct vc4_simulator_state { 65 mtx_t mutex; 66 67 void *mem; 68 ssize_t mem_size; 69 struct mem_block *heap; 70 struct mem_block *overflow; 71 72 /** Mapping from GEM handle to struct vc4_simulator_bo * */ 73 struct hash_table *fd_map; 74 75 int refcount; 76 } sim_state = { 77 .mutex = _MTX_INITIALIZER_NP, 78 }; 79 80 /** Per-GEM-fd state for the simulator. */ 81 struct vc4_simulator_file { 82 int fd; 83 84 /* This is weird -- we make a "vc4_device" per file, even though on 85 * the kernel side this is a global. We do this so that kernel code 86 * calling us for BO allocation can get to our screen. 87 */ 88 struct drm_device dev; 89 90 /** Mapping from GEM handle to struct vc4_simulator_bo * */ 91 struct hash_table *bo_map; 92 }; 93 94 /** Wrapper for drm_vc4_bo tracking the simulator-specific state. */ 95 struct vc4_simulator_bo { 96 struct drm_vc4_bo base; 97 struct vc4_simulator_file *file; 98 99 /** Area for this BO within sim_state->mem */ 100 struct mem_block *block; 101 void *winsys_map; 102 uint32_t winsys_stride; 103 104 int handle; 105 }; 106 107 static void * 108 int_to_key(int key) 109 { 110 return (void *)(uintptr_t)key; 111 } 112 113 static struct vc4_simulator_file * 114 vc4_get_simulator_file_for_fd(int fd) 115 { 116 struct hash_entry *entry = _mesa_hash_table_search(sim_state.fd_map, 117 int_to_key(fd + 1)); 118 return entry ? entry->data : NULL; 119 } 120 121 /* A marker placed just after each BO, then checked after rendering to make 122 * sure it's still there. 123 */ 124 #define BO_SENTINEL 0xfedcba98 125 126 #define PAGE_ALIGN2 12 127 128 /** 129 * Allocates space in simulator memory and returns a tracking struct for it 130 * that also contains the drm_gem_cma_object struct. 131 */ 132 static struct vc4_simulator_bo * 133 vc4_create_simulator_bo(int fd, int handle, unsigned size) 134 { 135 struct vc4_simulator_file *file = vc4_get_simulator_file_for_fd(fd); 136 struct vc4_simulator_bo *sim_bo = rzalloc(file, 137 struct vc4_simulator_bo); 138 struct drm_vc4_bo *bo = &sim_bo->base; 139 struct drm_gem_cma_object *obj = &bo->base; 140 size = align(size, 4096); 141 142 sim_bo->file = file; 143 sim_bo->handle = handle; 144 145 mtx_lock(&sim_state.mutex); 146 sim_bo->block = u_mmAllocMem(sim_state.heap, size + 4, PAGE_ALIGN2, 0); 147 mtx_unlock(&sim_state.mutex); 148 assert(sim_bo->block); 149 150 obj->base.size = size; 151 obj->base.dev = &file->dev; 152 obj->vaddr = sim_state.mem + sim_bo->block->ofs; 153 obj->paddr = simpenrose_hw_addr(obj->vaddr); 154 155 *(uint32_t *)(obj->vaddr + size) = BO_SENTINEL; 156 157 /* A handle of 0 is used for vc4_gem.c internal allocations that 158 * don't need to go in the lookup table. 159 */ 160 if (handle != 0) { 161 mtx_lock(&sim_state.mutex); 162 _mesa_hash_table_insert(file->bo_map, int_to_key(handle), bo); 163 mtx_unlock(&sim_state.mutex); 164 } 165 166 return sim_bo; 167 } 168 169 static void 170 vc4_free_simulator_bo(struct vc4_simulator_bo *sim_bo) 171 { 172 struct vc4_simulator_file *sim_file = sim_bo->file; 173 struct drm_vc4_bo *bo = &sim_bo->base; 174 struct drm_gem_cma_object *obj = &bo->base; 175 176 if (sim_bo->winsys_map) 177 munmap(sim_bo->winsys_map, obj->base.size); 178 179 mtx_lock(&sim_state.mutex); 180 u_mmFreeMem(sim_bo->block); 181 if (sim_bo->handle) { 182 struct hash_entry *entry = 183 _mesa_hash_table_search(sim_file->bo_map, 184 int_to_key(sim_bo->handle)); 185 _mesa_hash_table_remove(sim_file->bo_map, entry); 186 } 187 mtx_unlock(&sim_state.mutex); 188 ralloc_free(sim_bo); 189 } 190 191 static struct vc4_simulator_bo * 192 vc4_get_simulator_bo(struct vc4_simulator_file *file, int gem_handle) 193 { 194 mtx_lock(&sim_state.mutex); 195 struct hash_entry *entry = 196 _mesa_hash_table_search(file->bo_map, int_to_key(gem_handle)); 197 mtx_unlock(&sim_state.mutex); 198 199 return entry ? entry->data : NULL; 200 } 201 202 struct drm_gem_cma_object * 203 drm_gem_cma_create(struct drm_device *dev, size_t size) 204 { 205 struct vc4_screen *screen = dev->screen; 206 struct vc4_simulator_bo *sim_bo = vc4_create_simulator_bo(screen->fd, 207 0, size); 208 return &sim_bo->base.base; 209 } 210 211 static int 212 vc4_simulator_pin_bos(struct drm_device *dev, struct vc4_job *job, 213 struct vc4_exec_info *exec) 214 { 215 int fd = dev->screen->fd; 216 struct vc4_simulator_file *file = vc4_get_simulator_file_for_fd(fd); 217 struct drm_vc4_submit_cl *args = exec->args; 218 struct vc4_bo **bos = job->bo_pointers.base; 219 220 exec->bo_count = args->bo_handle_count; 221 exec->bo = calloc(exec->bo_count, sizeof(void *)); 222 for (int i = 0; i < exec->bo_count; i++) { 223 struct vc4_bo *bo = bos[i]; 224 struct vc4_simulator_bo *sim_bo = 225 vc4_get_simulator_bo(file, bo->handle); 226 struct drm_vc4_bo *drm_bo = &sim_bo->base; 227 struct drm_gem_cma_object *obj = &drm_bo->base; 228 229 drm_bo->bo = bo; 230 #if 0 231 fprintf(stderr, "bo hindex %d: %s\n", i, bo->name); 232 #endif 233 234 vc4_bo_map(bo); 235 memcpy(obj->vaddr, bo->map, bo->size); 236 237 exec->bo[i] = obj; 238 239 /* The kernel does this validation at shader create ioctl 240 * time. 241 */ 242 if (strcmp(bo->name, "code") == 0) { 243 drm_bo->validated_shader = vc4_validate_shader(obj); 244 if (!drm_bo->validated_shader) 245 abort(); 246 } 247 } 248 return 0; 249 } 250 251 static int 252 vc4_simulator_unpin_bos(struct vc4_exec_info *exec) 253 { 254 for (int i = 0; i < exec->bo_count; i++) { 255 struct drm_gem_cma_object *obj = exec->bo[i]; 256 struct drm_vc4_bo *drm_bo = to_vc4_bo(&obj->base); 257 struct vc4_bo *bo = drm_bo->bo; 258 259 assert(*(uint32_t *)(obj->vaddr + 260 obj->base.size) == BO_SENTINEL); 261 memcpy(bo->map, obj->vaddr, bo->size); 262 263 if (drm_bo->validated_shader) { 264 free(drm_bo->validated_shader->texture_samples); 265 free(drm_bo->validated_shader); 266 } 267 } 268 269 free(exec->bo); 270 271 return 0; 272 } 273 274 static void 275 vc4_dump_to_file(struct vc4_exec_info *exec) 276 { 277 static int dumpno = 0; 278 struct drm_vc4_get_hang_state *state; 279 struct drm_vc4_get_hang_state_bo *bo_state; 280 unsigned int dump_version = 0; 281 282 if (!(vc4_debug & VC4_DEBUG_DUMP)) 283 return; 284 285 state = calloc(1, sizeof(*state)); 286 287 int unref_count = 0; 288 list_for_each_entry_safe(struct drm_vc4_bo, bo, &exec->unref_list, 289 unref_head) { 290 unref_count++; 291 } 292 293 /* Add one more for the overflow area that isn't wrapped in a BO. */ 294 state->bo_count = exec->bo_count + unref_count + 1; 295 bo_state = calloc(state->bo_count, sizeof(*bo_state)); 296 297 char *filename = NULL; 298 asprintf(&filename, "vc4-dri-%d.dump", dumpno++); 299 FILE *f = fopen(filename, "w+"); 300 if (!f) { 301 fprintf(stderr, "Couldn't open %s: %s", filename, 302 strerror(errno)); 303 return; 304 } 305 306 fwrite(&dump_version, sizeof(dump_version), 1, f); 307 308 state->ct0ca = exec->ct0ca; 309 state->ct0ea = exec->ct0ea; 310 state->ct1ca = exec->ct1ca; 311 state->ct1ea = exec->ct1ea; 312 state->start_bin = exec->ct0ca; 313 state->start_render = exec->ct1ca; 314 fwrite(state, sizeof(*state), 1, f); 315 316 int i; 317 for (i = 0; i < exec->bo_count; i++) { 318 struct drm_gem_cma_object *cma_bo = exec->bo[i]; 319 bo_state[i].handle = i; /* Not used by the parser. */ 320 bo_state[i].paddr = cma_bo->paddr; 321 bo_state[i].size = cma_bo->base.size; 322 } 323 324 list_for_each_entry_safe(struct drm_vc4_bo, bo, &exec->unref_list, 325 unref_head) { 326 struct drm_gem_cma_object *cma_bo = &bo->base; 327 bo_state[i].handle = 0; 328 bo_state[i].paddr = cma_bo->paddr; 329 bo_state[i].size = cma_bo->base.size; 330 i++; 331 } 332 333 /* Add the static overflow memory area. */ 334 bo_state[i].handle = exec->bo_count; 335 bo_state[i].paddr = sim_state.overflow->ofs; 336 bo_state[i].size = sim_state.overflow->size; 337 i++; 338 339 fwrite(bo_state, sizeof(*bo_state), state->bo_count, f); 340 341 for (int i = 0; i < exec->bo_count; i++) { 342 struct drm_gem_cma_object *cma_bo = exec->bo[i]; 343 fwrite(cma_bo->vaddr, cma_bo->base.size, 1, f); 344 } 345 346 list_for_each_entry_safe(struct drm_vc4_bo, bo, &exec->unref_list, 347 unref_head) { 348 struct drm_gem_cma_object *cma_bo = &bo->base; 349 fwrite(cma_bo->vaddr, cma_bo->base.size, 1, f); 350 } 351 352 void *overflow = calloc(1, sim_state.overflow->size); 353 fwrite(overflow, 1, sim_state.overflow->size, f); 354 free(overflow); 355 356 free(state); 357 free(bo_state); 358 fclose(f); 359 } 360 361 int 362 vc4_simulator_flush(struct vc4_context *vc4, 363 struct drm_vc4_submit_cl *args, struct vc4_job *job) 364 { 365 struct vc4_screen *screen = vc4->screen; 366 int fd = screen->fd; 367 struct vc4_simulator_file *file = vc4_get_simulator_file_for_fd(fd); 368 struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]); 369 struct vc4_resource *ctex = csurf ? vc4_resource(csurf->base.texture) : NULL; 370 struct vc4_simulator_bo *csim_bo = ctex ? vc4_get_simulator_bo(file, ctex->bo->handle) : NULL; 371 uint32_t winsys_stride = ctex ? csim_bo->winsys_stride : 0; 372 uint32_t sim_stride = ctex ? ctex->slices[0].stride : 0; 373 uint32_t row_len = MIN2(sim_stride, winsys_stride); 374 struct vc4_exec_info exec; 375 struct drm_device *dev = &file->dev; 376 int ret; 377 378 memset(&exec, 0, sizeof(exec)); 379 list_inithead(&exec.unref_list); 380 381 if (ctex && csim_bo->winsys_map) { 382 #if 0 383 fprintf(stderr, "%dx%d %d %d %d\n", 384 ctex->base.b.width0, ctex->base.b.height0, 385 winsys_stride, 386 sim_stride, 387 ctex->bo->size); 388 #endif 389 390 for (int y = 0; y < ctex->base.b.height0; y++) { 391 memcpy(ctex->bo->map + y * sim_stride, 392 csim_bo->winsys_map + y * winsys_stride, 393 row_len); 394 } 395 } 396 397 exec.args = args; 398 399 ret = vc4_simulator_pin_bos(dev, job, &exec); 400 if (ret) 401 return ret; 402 403 ret = vc4_cl_validate(dev, &exec); 404 if (ret) 405 return ret; 406 407 if (vc4_debug & VC4_DEBUG_CL) { 408 fprintf(stderr, "RCL:\n"); 409 vc4_dump_cl(sim_state.mem + exec.ct1ca, 410 exec.ct1ea - exec.ct1ca, true); 411 } 412 413 vc4_dump_to_file(&exec); 414 415 if (exec.ct0ca != exec.ct0ea) { 416 int bfc = simpenrose_do_binning(exec.ct0ca, exec.ct0ea); 417 if (bfc != 1) { 418 fprintf(stderr, "Binning returned %d flushes, should be 1.\n", 419 bfc); 420 fprintf(stderr, "Relocated binning command list:\n"); 421 vc4_dump_cl(sim_state.mem + exec.ct0ca, 422 exec.ct0ea - exec.ct0ca, false); 423 abort(); 424 } 425 } 426 int rfc = simpenrose_do_rendering(exec.ct1ca, exec.ct1ea); 427 if (rfc != 1) { 428 fprintf(stderr, "Rendering returned %d frames, should be 1.\n", 429 rfc); 430 fprintf(stderr, "Relocated render command list:\n"); 431 vc4_dump_cl(sim_state.mem + exec.ct1ca, 432 exec.ct1ea - exec.ct1ca, true); 433 abort(); 434 } 435 436 ret = vc4_simulator_unpin_bos(&exec); 437 if (ret) 438 return ret; 439 440 list_for_each_entry_safe(struct drm_vc4_bo, bo, &exec.unref_list, 441 unref_head) { 442 struct vc4_simulator_bo *sim_bo = (struct vc4_simulator_bo *)bo; 443 struct drm_gem_cma_object *obj = &sim_bo->base.base; 444 list_del(&bo->unref_head); 445 assert(*(uint32_t *)(obj->vaddr + obj->base.size) == 446 BO_SENTINEL); 447 vc4_free_simulator_bo(sim_bo); 448 } 449 450 if (ctex && csim_bo->winsys_map) { 451 for (int y = 0; y < ctex->base.b.height0; y++) { 452 memcpy(csim_bo->winsys_map + y * winsys_stride, 453 ctex->bo->map + y * sim_stride, 454 row_len); 455 } 456 } 457 458 return 0; 459 } 460 461 /** 462 * Map the underlying GEM object from the real hardware GEM handle. 463 */ 464 static void * 465 vc4_simulator_map_winsys_bo(int fd, struct vc4_simulator_bo *sim_bo) 466 { 467 struct drm_vc4_bo *bo = &sim_bo->base; 468 struct drm_gem_cma_object *obj = &bo->base; 469 int ret; 470 void *map; 471 472 struct drm_mode_map_dumb map_dumb = { 473 .handle = sim_bo->handle, 474 }; 475 ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb); 476 if (ret != 0) { 477 fprintf(stderr, "map ioctl failure\n"); 478 abort(); 479 } 480 481 map = mmap(NULL, obj->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, 482 fd, map_dumb.offset); 483 if (map == MAP_FAILED) { 484 fprintf(stderr, 485 "mmap of bo %d (offset 0x%016llx, size %d) failed\n", 486 sim_bo->handle, (long long)map_dumb.offset, 487 (int)obj->base.size); 488 abort(); 489 } 490 491 return map; 492 } 493 494 /** 495 * Do fixups after a BO has been opened from a handle. 496 * 497 * This could be done at DRM_IOCTL_GEM_OPEN/DRM_IOCTL_GEM_PRIME_FD_TO_HANDLE 498 * time, but we're still using drmPrimeFDToHandle() so we have this helper to 499 * be called afterward instead. 500 */ 501 void vc4_simulator_open_from_handle(int fd, uint32_t winsys_stride, 502 int handle, uint32_t size) 503 { 504 struct vc4_simulator_bo *sim_bo = 505 vc4_create_simulator_bo(fd, handle, size); 506 507 sim_bo->winsys_stride = winsys_stride; 508 sim_bo->winsys_map = vc4_simulator_map_winsys_bo(fd, sim_bo); 509 } 510 511 /** 512 * Simulated ioctl(fd, DRM_VC4_CREATE_BO) implementation. 513 * 514 * Making a VC4 BO is just a matter of making a corresponding BO on the host. 515 */ 516 static int 517 vc4_simulator_create_bo_ioctl(int fd, struct drm_vc4_create_bo *args) 518 { 519 int ret; 520 struct drm_mode_create_dumb create = { 521 .width = 128, 522 .bpp = 8, 523 .height = (args->size + 127) / 128, 524 }; 525 526 ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create); 527 assert(create.size >= args->size); 528 529 args->handle = create.handle; 530 531 vc4_create_simulator_bo(fd, create.handle, args->size); 532 533 return ret; 534 } 535 536 /** 537 * Simulated ioctl(fd, DRM_VC4_CREATE_SHADER_BO) implementation. 538 * 539 * In simulation we defer shader validation until exec time. Just make a host 540 * BO and memcpy the contents in. 541 */ 542 static int 543 vc4_simulator_create_shader_bo_ioctl(int fd, 544 struct drm_vc4_create_shader_bo *args) 545 { 546 int ret; 547 struct drm_mode_create_dumb create = { 548 .width = 128, 549 .bpp = 8, 550 .height = (args->size + 127) / 128, 551 }; 552 553 ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create); 554 if (ret) 555 return ret; 556 assert(create.size >= args->size); 557 558 args->handle = create.handle; 559 560 vc4_create_simulator_bo(fd, create.handle, args->size); 561 562 struct drm_mode_map_dumb map = { 563 .handle = create.handle 564 }; 565 ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map); 566 if (ret) 567 return ret; 568 569 void *shader = mmap(NULL, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, 570 fd, map.offset); 571 memcpy(shader, (void *)(uintptr_t)args->data, args->size); 572 munmap(shader, args->size); 573 574 return 0; 575 } 576 577 /** 578 * Simulated ioctl(fd, DRM_VC4_MMAP_BO) implementation. 579 * 580 * We just pass this straight through to dumb mmap. 581 */ 582 static int 583 vc4_simulator_mmap_bo_ioctl(int fd, struct drm_vc4_mmap_bo *args) 584 { 585 int ret; 586 struct drm_mode_map_dumb map = { 587 .handle = args->handle, 588 }; 589 590 ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map); 591 args->offset = map.offset; 592 593 return ret; 594 } 595 596 static int 597 vc4_simulator_gem_close_ioctl(int fd, struct drm_gem_close *args) 598 { 599 /* Free the simulator's internal tracking. */ 600 struct vc4_simulator_file *file = vc4_get_simulator_file_for_fd(fd); 601 struct vc4_simulator_bo *sim_bo = vc4_get_simulator_bo(file, 602 args->handle); 603 604 vc4_free_simulator_bo(sim_bo); 605 606 /* Pass the call on down. */ 607 return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, args); 608 } 609 610 static int 611 vc4_simulator_get_param_ioctl(int fd, struct drm_vc4_get_param *args) 612 { 613 switch (args->param) { 614 case DRM_VC4_PARAM_SUPPORTS_BRANCHES: 615 case DRM_VC4_PARAM_SUPPORTS_ETC1: 616 case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: 617 args->value = true; 618 return 0; 619 620 case DRM_VC4_PARAM_V3D_IDENT0: 621 args->value = 0x02000000; 622 return 0; 623 624 case DRM_VC4_PARAM_V3D_IDENT1: 625 args->value = 0x00000001; 626 return 0; 627 628 default: 629 fprintf(stderr, "Unknown DRM_IOCTL_VC4_GET_PARAM(%lld)\n", 630 (long long)args->param); 631 abort(); 632 }; 633 } 634 635 int 636 vc4_simulator_ioctl(int fd, unsigned long request, void *args) 637 { 638 switch (request) { 639 case DRM_IOCTL_VC4_CREATE_BO: 640 return vc4_simulator_create_bo_ioctl(fd, args); 641 case DRM_IOCTL_VC4_CREATE_SHADER_BO: 642 return vc4_simulator_create_shader_bo_ioctl(fd, args); 643 case DRM_IOCTL_VC4_MMAP_BO: 644 return vc4_simulator_mmap_bo_ioctl(fd, args); 645 646 case DRM_IOCTL_VC4_WAIT_BO: 647 case DRM_IOCTL_VC4_WAIT_SEQNO: 648 /* We do all of the vc4 rendering synchronously, so we just 649 * return immediately on the wait ioctls. This ignores any 650 * native rendering to the host BO, so it does mean we race on 651 * front buffer rendering. 652 */ 653 return 0; 654 655 case DRM_IOCTL_VC4_GET_PARAM: 656 return vc4_simulator_get_param_ioctl(fd, args); 657 658 case DRM_IOCTL_GEM_CLOSE: 659 return vc4_simulator_gem_close_ioctl(fd, args); 660 661 case DRM_IOCTL_GEM_OPEN: 662 case DRM_IOCTL_GEM_FLINK: 663 return drmIoctl(fd, request, args); 664 default: 665 fprintf(stderr, "Unknown ioctl 0x%08x\n", (int)request); 666 abort(); 667 } 668 } 669 670 static void 671 vc4_simulator_init_global(void) 672 { 673 mtx_lock(&sim_state.mutex); 674 if (sim_state.refcount++) { 675 mtx_unlock(&sim_state.mutex); 676 return; 677 } 678 679 sim_state.mem_size = 256 * 1024 * 1024; 680 sim_state.mem = calloc(sim_state.mem_size, 1); 681 if (!sim_state.mem) 682 abort(); 683 sim_state.heap = u_mmInit(0, sim_state.mem_size); 684 685 /* We supply our own memory so that we can have more aperture 686 * available (256MB instead of simpenrose's default 64MB). 687 */ 688 simpenrose_init_hardware_supply_mem(sim_state.mem, sim_state.mem_size); 689 690 /* Carve out low memory for tile allocation overflow. The kernel 691 * should be automatically handling overflow memory setup on real 692 * hardware, but for simulation we just get one shot to set up enough 693 * overflow memory before execution. This overflow mem will be used 694 * up over the whole lifetime of simpenrose (not reused on each 695 * flush), so it had better be big. 696 */ 697 sim_state.overflow = u_mmAllocMem(sim_state.heap, 32 * 1024 * 1024, 698 PAGE_ALIGN2, 0); 699 simpenrose_supply_overflow_mem(sim_state.overflow->ofs, 700 sim_state.overflow->size); 701 702 mtx_unlock(&sim_state.mutex); 703 704 sim_state.fd_map = 705 _mesa_hash_table_create(NULL, 706 _mesa_hash_pointer, 707 _mesa_key_pointer_equal); 708 } 709 710 void 711 vc4_simulator_init(struct vc4_screen *screen) 712 { 713 vc4_simulator_init_global(); 714 715 screen->sim_file = rzalloc(screen, struct vc4_simulator_file); 716 717 screen->sim_file->bo_map = 718 _mesa_hash_table_create(screen->sim_file, 719 _mesa_hash_pointer, 720 _mesa_key_pointer_equal); 721 722 mtx_lock(&sim_state.mutex); 723 _mesa_hash_table_insert(sim_state.fd_map, int_to_key(screen->fd + 1), 724 screen->sim_file); 725 mtx_unlock(&sim_state.mutex); 726 727 screen->sim_file->dev.screen = screen; 728 } 729 730 void 731 vc4_simulator_destroy(struct vc4_screen *screen) 732 { 733 mtx_lock(&sim_state.mutex); 734 if (!--sim_state.refcount) { 735 _mesa_hash_table_destroy(sim_state.fd_map, NULL); 736 u_mmDestroy(sim_state.heap); 737 free(sim_state.mem); 738 /* No memsetting it, because it contains the mutex. */ 739 } 740 mtx_unlock(&sim_state.mutex); 741 } 742 743 #endif /* USE_VC4_SIMULATOR */ 744