Home | History | Annotate | Download | only in amdgpu

Lines Matching refs:bo

56 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
59 pthread_mutex_lock(&bo->dev->bo_table_mutex);
60 util_hash_table_remove(bo->dev->bo_handles,
61 (void*)(uintptr_t)bo->handle);
62 if (bo->flink_name) {
63 util_hash_table_remove(bo->dev->bo_flink_names,
64 (void*)(uintptr_t)bo->flink_name);
66 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
69 if (bo->cpu_map_count > 0) {
70 bo->cpu_map_count = 1;
71 amdgpu_bo_cpu_unmap(bo);
74 amdgpu_close_kms_handle(bo->dev, bo->handle);
75 pthread_mutex_destroy(&bo->cpu_access_mutex);
76 free(bo);
83 struct amdgpu_bo *bo;
92 bo = calloc(1, sizeof(struct amdgpu_bo));
93 if (!bo)
96 atomic_set(&bo->refcount, 1);
97 bo->dev = dev;
98 bo->alloc_size = alloc_buffer->alloc_size;
112 free(bo);
116 bo->handle = args.out.handle;
118 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
120 *buf_handle = bo;
124 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
129 args.handle = bo->handle;
142 return drmCommandWriteRead(bo->dev->fd,
147 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
155 /* Validate the BO passed in */
156 if (!bo->handle)
160 metadata.handle = bo->handle;
163 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
173 gem_op.handle = bo->handle;
177 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
198 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
200 pthread_mutex_lock(&bo->dev->bo_table_mutex);
201 util_hash_table_set(bo->dev->bo_handles,
202 (void*)(uintptr_t)bo->handle, bo);
203 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
206 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
213 fd = bo->dev->fd;
214 handle = bo->handle;
215 if (bo->flink_name)
219 if (bo->dev->flink_fd != bo->dev->fd) {
220 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
223 r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
228 fd = bo->dev->flink_fd;
237 bo->flink_name = flink.name;
239 if (bo->dev->flink_fd != bo->dev->fd) {
242 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
245 pthread_mutex_lock(&bo->dev->bo_table_mutex);
246 util_hash_table_set(bo->dev->bo_flink_names,
247 (void*)(uintptr_t)bo->flink_name,
248 bo);
249 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
254 int amdgpu_bo_export(amdgpu_bo_handle bo,
262 r = amdgpu_bo_export_flink(bo);
266 *shared_handle = bo->flink_name;
270 amdgpu_add_handle_to_table(bo);
271 *shared_handle = bo->handle;
275 amdgpu_add_handle_to_table(bo);
276 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
288 struct amdgpu_bo *bo = NULL;
293 /* We must maintain a list of pairs <handle, bo>, so that we always
324 bo = util_hash_table_get(dev->bo_flink_names,
329 bo = util_hash_table_get(dev->bo_handles,
343 if (bo) {
347 atomic_inc(&bo->refcount);
349 output->buf_handle = bo;
350 output->alloc_size = bo->alloc_size;
354 bo = calloc(1, sizeof(struct amdgpu_bo));
355 if (!bo) {
369 free(bo);
374 bo->handle = open_arg.handle;
376 r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
378 free(bo);
382 r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
387 free(bo);
392 bo->flink_name = shared_handle;
393 bo->alloc_size = open_arg.size;
395 (void*)(uintptr_t)bo->flink_name, bo);
399 bo->handle = shared_handle;
400 bo->alloc_size = dma_buf_size;
408 atomic_set(&bo->refcount, 1);
409 bo->dev = dev;
410 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
412 util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
415 output->buf_handle = bo;
416 output->alloc_size = bo->alloc_size;
427 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
433 pthread_mutex_lock(&bo->cpu_access_mutex);
435 if (bo->cpu_ptr) {
437 assert(bo->cpu_map_count > 0);
438 bo->cpu_map_count++;
439 *cpu = bo->cpu_ptr;
440 pthread_mutex_unlock(&bo->cpu_access_mutex);
444 assert(bo->cpu_map_count == 0);
450 args.in.handle = bo->handle;
452 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
455 pthread_mutex_unlock(&bo->cpu_access_mutex);
460 ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
461 bo->dev->fd, args.out.addr_ptr);
463 pthread_mutex_unlock(&bo->cpu_access_mutex);
467 bo->cpu_ptr = ptr;
468 bo->cpu_map_count = 1;
469 pthread_mutex_unlock(&bo->cpu_access_mutex);
475 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
479 pthread_mutex_lock(&bo->cpu_access_mutex);
480 assert(bo->cpu_map_count >= 0);
482 if (bo->cpu_map_count == 0) {
484 pthread_mutex_unlock(&bo->cpu_access_mutex);
488 bo->cpu_map_count--;
489 if (bo->cpu_map_count > 0) {
491 pthread_mutex_unlock(&bo->cpu_access_mutex);
495 r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
496 bo->cpu_ptr = NULL;
497 pthread_mutex_unlock(&bo->cpu_access_mutex);
509 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
517 args.in.handle = bo->handle;
520 r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
538 struct amdgpu_bo *bo;
550 bo = calloc(1, sizeof(struct amdgpu_bo));
551 if (!bo)
554 atomic_set(&bo->refcount, 1);
555 bo->dev = dev;
556 bo->alloc_size = size;
557 bo->handle = args.handle;
559 *buf_handle = bo;
678 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
685 amdgpu_device_handle dev = bo->dev;
693 va.handle = bo->handle;