Home | History | Annotate | Download | only in amdgpu
      1 /*
      2  * Copyright  2014 Advanced Micro Devices, Inc.
      3  * All Rights Reserved.
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining a
      6  * copy of this software and associated documentation files (the "Software"),
      7  * to deal in the Software without restriction, including without limitation
      8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9  * and/or sell copies of the Software, and to permit persons to whom the
     10  * Software is furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be included in
     13  * all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21  * OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  */
     24 
     25 #include <stdlib.h>
     26 #include <stdio.h>
     27 #include <stdint.h>
     28 #include <string.h>
     29 #include <errno.h>
     30 #include <fcntl.h>
     31 #include <unistd.h>
     32 #include <sys/ioctl.h>
     33 #include <sys/mman.h>
     34 #include <sys/time.h>
     35 
     36 #include "libdrm_macros.h"
     37 #include "xf86drm.h"
     38 #include "amdgpu_drm.h"
     39 #include "amdgpu_internal.h"
     40 #include "util_hash_table.h"
     41 #include "util_math.h"
     42 
     43 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
     44 				     uint32_t handle)
     45 {
     46 	struct drm_gem_close args = {};
     47 
     48 	args.handle = handle;
     49 	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
     50 }
     51 
     52 int amdgpu_bo_alloc(amdgpu_device_handle dev,
     53 		    struct amdgpu_bo_alloc_request *alloc_buffer,
     54 		    amdgpu_bo_handle *buf_handle)
     55 {
     56 	struct amdgpu_bo *bo;
     57 	union drm_amdgpu_gem_create args;
     58 	unsigned heap = alloc_buffer->preferred_heap;
     59 	int r = 0;
     60 
     61 	/* It's an error if the heap is not specified */
     62 	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
     63 		return -EINVAL;
     64 
     65 	bo = calloc(1, sizeof(struct amdgpu_bo));
     66 	if (!bo)
     67 		return -ENOMEM;
     68 
     69 	atomic_set(&bo->refcount, 1);
     70 	bo->dev = dev;
     71 	bo->alloc_size = alloc_buffer->alloc_size;
     72 
     73 	memset(&args, 0, sizeof(args));
     74 	args.in.bo_size = alloc_buffer->alloc_size;
     75 	args.in.alignment = alloc_buffer->phys_alignment;
     76 
     77 	/* Set the placement. */
     78 	args.in.domains = heap;
     79 	args.in.domain_flags = alloc_buffer->flags;
     80 
     81 	/* Allocate the buffer with the preferred heap. */
     82 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
     83 				&args, sizeof(args));
     84 	if (r) {
     85 		free(bo);
     86 		return r;
     87 	}
     88 
     89 	bo->handle = args.out.handle;
     90 
     91 	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
     92 
     93 	*buf_handle = bo;
     94 	return 0;
     95 }
     96 
     97 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
     98 			   struct amdgpu_bo_metadata *info)
     99 {
    100 	struct drm_amdgpu_gem_metadata args = {};
    101 
    102 	args.handle = bo->handle;
    103 	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
    104 	args.data.flags = info->flags;
    105 	args.data.tiling_info = info->tiling_info;
    106 
    107 	if (info->size_metadata > sizeof(args.data.data))
    108 		return -EINVAL;
    109 
    110 	if (info->size_metadata) {
    111 		args.data.data_size_bytes = info->size_metadata;
    112 		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
    113 	}
    114 
    115 	return drmCommandWriteRead(bo->dev->fd,
    116 				   DRM_AMDGPU_GEM_METADATA,
    117 				   &args, sizeof(args));
    118 }
    119 
    120 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
    121 			 struct amdgpu_bo_info *info)
    122 {
    123 	struct drm_amdgpu_gem_metadata metadata = {};
    124 	struct drm_amdgpu_gem_create_in bo_info = {};
    125 	struct drm_amdgpu_gem_op gem_op = {};
    126 	int r;
    127 
    128 	/* Validate the BO passed in */
    129 	if (!bo->handle)
    130 		return -EINVAL;
    131 
    132 	/* Query metadata. */
    133 	metadata.handle = bo->handle;
    134 	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
    135 
    136 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
    137 				&metadata, sizeof(metadata));
    138 	if (r)
    139 		return r;
    140 
    141 	if (metadata.data.data_size_bytes >
    142 	    sizeof(info->metadata.umd_metadata))
    143 		return -EINVAL;
    144 
    145 	/* Query buffer info. */
    146 	gem_op.handle = bo->handle;
    147 	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
    148 	gem_op.value = (uintptr_t)&bo_info;
    149 
    150 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
    151 				&gem_op, sizeof(gem_op));
    152 	if (r)
    153 		return r;
    154 
    155 	memset(info, 0, sizeof(*info));
    156 	info->alloc_size = bo_info.bo_size;
    157 	info->phys_alignment = bo_info.alignment;
    158 	info->preferred_heap = bo_info.domains;
    159 	info->alloc_flags = bo_info.domain_flags;
    160 	info->metadata.flags = metadata.data.flags;
    161 	info->metadata.tiling_info = metadata.data.tiling_info;
    162 
    163 	info->metadata.size_metadata = metadata.data.data_size_bytes;
    164 	if (metadata.data.data_size_bytes > 0)
    165 		memcpy(info->metadata.umd_metadata, metadata.data.data,
    166 		       metadata.data.data_size_bytes);
    167 
    168 	return 0;
    169 }
    170 
    171 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
    172 {
    173 	pthread_mutex_lock(&bo->dev->bo_table_mutex);
    174 	util_hash_table_set(bo->dev->bo_handles,
    175 			    (void*)(uintptr_t)bo->handle, bo);
    176 	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
    177 }
    178 
    179 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
    180 {
    181 	struct drm_gem_flink flink;
    182 	int fd, dma_fd;
    183 	uint32_t handle;
    184 	int r;
    185 
    186 	fd = bo->dev->fd;
    187 	handle = bo->handle;
    188 	if (bo->flink_name)
    189 		return 0;
    190 
    191 
    192 	if (bo->dev->flink_fd != bo->dev->fd) {
    193 		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
    194 				       &dma_fd);
    195 		if (!r) {
    196 			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
    197 			close(dma_fd);
    198 		}
    199 		if (r)
    200 			return r;
    201 		fd = bo->dev->flink_fd;
    202 	}
    203 	memset(&flink, 0, sizeof(flink));
    204 	flink.handle = handle;
    205 
    206 	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
    207 	if (r)
    208 		return r;
    209 
    210 	bo->flink_name = flink.name;
    211 
    212 	if (bo->dev->flink_fd != bo->dev->fd) {
    213 		struct drm_gem_close args = {};
    214 		args.handle = handle;
    215 		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
    216 	}
    217 
    218 	pthread_mutex_lock(&bo->dev->bo_table_mutex);
    219 	util_hash_table_set(bo->dev->bo_flink_names,
    220 			    (void*)(uintptr_t)bo->flink_name,
    221 			    bo);
    222 	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
    223 
    224 	return 0;
    225 }
    226 
    227 int amdgpu_bo_export(amdgpu_bo_handle bo,
    228 		     enum amdgpu_bo_handle_type type,
    229 		     uint32_t *shared_handle)
    230 {
    231 	int r;
    232 
    233 	switch (type) {
    234 	case amdgpu_bo_handle_type_gem_flink_name:
    235 		r = amdgpu_bo_export_flink(bo);
    236 		if (r)
    237 			return r;
    238 
    239 		*shared_handle = bo->flink_name;
    240 		return 0;
    241 
    242 	case amdgpu_bo_handle_type_kms:
    243 		amdgpu_add_handle_to_table(bo);
    244 		*shared_handle = bo->handle;
    245 		return 0;
    246 
    247 	case amdgpu_bo_handle_type_dma_buf_fd:
    248 		amdgpu_add_handle_to_table(bo);
    249 		return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
    250 					  DRM_CLOEXEC | DRM_RDWR,
    251 					  (int*)shared_handle);
    252 	}
    253 	return -EINVAL;
    254 }
    255 
    256 int amdgpu_bo_import(amdgpu_device_handle dev,
    257 		     enum amdgpu_bo_handle_type type,
    258 		     uint32_t shared_handle,
    259 		     struct amdgpu_bo_import_result *output)
    260 {
    261 	struct drm_gem_open open_arg = {};
    262 	struct amdgpu_bo *bo = NULL;
    263 	int r;
    264 	int dma_fd;
    265 	uint64_t dma_buf_size = 0;
    266 
    267 	/* We must maintain a list of pairs <handle, bo>, so that we always
    268 	 * return the same amdgpu_bo instance for the same handle. */
    269 	pthread_mutex_lock(&dev->bo_table_mutex);
    270 
    271 	/* Convert a DMA buf handle to a KMS handle now. */
    272 	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
    273 		uint32_t handle;
    274 		off_t size;
    275 
    276 		/* Get a KMS handle. */
    277 		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
    278 		if (r) {
    279 			pthread_mutex_unlock(&dev->bo_table_mutex);
    280 			return r;
    281 		}
    282 
    283 		/* Query the buffer size. */
    284 		size = lseek(shared_handle, 0, SEEK_END);
    285 		if (size == (off_t)-1) {
    286 			pthread_mutex_unlock(&dev->bo_table_mutex);
    287 			amdgpu_close_kms_handle(dev, handle);
    288 			return -errno;
    289 		}
    290 		lseek(shared_handle, 0, SEEK_SET);
    291 
    292 		dma_buf_size = size;
    293 		shared_handle = handle;
    294 	}
    295 
    296 	/* If we have already created a buffer with this handle, find it. */
    297 	switch (type) {
    298 	case amdgpu_bo_handle_type_gem_flink_name:
    299 		bo = util_hash_table_get(dev->bo_flink_names,
    300 					 (void*)(uintptr_t)shared_handle);
    301 		break;
    302 
    303 	case amdgpu_bo_handle_type_dma_buf_fd:
    304 		bo = util_hash_table_get(dev->bo_handles,
    305 					 (void*)(uintptr_t)shared_handle);
    306 		break;
    307 
    308 	case amdgpu_bo_handle_type_kms:
    309 		/* Importing a KMS handle in not allowed. */
    310 		pthread_mutex_unlock(&dev->bo_table_mutex);
    311 		return -EPERM;
    312 
    313 	default:
    314 		pthread_mutex_unlock(&dev->bo_table_mutex);
    315 		return -EINVAL;
    316 	}
    317 
    318 	if (bo) {
    319 		/* The buffer already exists, just bump the refcount. */
    320 		atomic_inc(&bo->refcount);
    321 		pthread_mutex_unlock(&dev->bo_table_mutex);
    322 
    323 		output->buf_handle = bo;
    324 		output->alloc_size = bo->alloc_size;
    325 		return 0;
    326 	}
    327 
    328 	bo = calloc(1, sizeof(struct amdgpu_bo));
    329 	if (!bo) {
    330 		pthread_mutex_unlock(&dev->bo_table_mutex);
    331 		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
    332 			amdgpu_close_kms_handle(dev, shared_handle);
    333 		}
    334 		return -ENOMEM;
    335 	}
    336 
    337 	/* Open the handle. */
    338 	switch (type) {
    339 	case amdgpu_bo_handle_type_gem_flink_name:
    340 		open_arg.name = shared_handle;
    341 		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
    342 		if (r) {
    343 			free(bo);
    344 			pthread_mutex_unlock(&dev->bo_table_mutex);
    345 			return r;
    346 		}
    347 
    348 		bo->handle = open_arg.handle;
    349 		if (dev->flink_fd != dev->fd) {
    350 			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
    351 			if (r) {
    352 				free(bo);
    353 				pthread_mutex_unlock(&dev->bo_table_mutex);
    354 				return r;
    355 			}
    356 			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
    357 
    358 			close(dma_fd);
    359 
    360 			if (r) {
    361 				free(bo);
    362 				pthread_mutex_unlock(&dev->bo_table_mutex);
    363 				return r;
    364 			}
    365 		}
    366 		bo->flink_name = shared_handle;
    367 		bo->alloc_size = open_arg.size;
    368 		util_hash_table_set(dev->bo_flink_names,
    369 				    (void*)(uintptr_t)bo->flink_name, bo);
    370 		break;
    371 
    372 	case amdgpu_bo_handle_type_dma_buf_fd:
    373 		bo->handle = shared_handle;
    374 		bo->alloc_size = dma_buf_size;
    375 		break;
    376 
    377 	case amdgpu_bo_handle_type_kms:
    378 		assert(0); /* unreachable */
    379 	}
    380 
    381 	/* Initialize it. */
    382 	atomic_set(&bo->refcount, 1);
    383 	bo->dev = dev;
    384 	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
    385 
    386 	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
    387 	pthread_mutex_unlock(&dev->bo_table_mutex);
    388 
    389 	output->buf_handle = bo;
    390 	output->alloc_size = bo->alloc_size;
    391 	return 0;
    392 }
    393 
    394 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
    395 {
    396 	struct amdgpu_device *dev;
    397 	struct amdgpu_bo *bo = buf_handle;
    398 
    399 	assert(bo != NULL);
    400 	dev = bo->dev;
    401 	pthread_mutex_lock(&dev->bo_table_mutex);
    402 
    403 	if (update_references(&bo->refcount, NULL)) {
    404 		/* Remove the buffer from the hash tables. */
    405 		util_hash_table_remove(dev->bo_handles,
    406 					(void*)(uintptr_t)bo->handle);
    407 
    408 		if (bo->flink_name) {
    409 			util_hash_table_remove(dev->bo_flink_names,
    410 						(void*)(uintptr_t)bo->flink_name);
    411 		}
    412 
    413 		/* Release CPU access. */
    414 		if (bo->cpu_map_count > 0) {
    415 			bo->cpu_map_count = 1;
    416 			amdgpu_bo_cpu_unmap(bo);
    417 		}
    418 
    419 		amdgpu_close_kms_handle(dev, bo->handle);
    420 		pthread_mutex_destroy(&bo->cpu_access_mutex);
    421 		free(bo);
    422 	}
    423 
    424 	pthread_mutex_unlock(&dev->bo_table_mutex);
    425 	return 0;
    426 }
    427 
    428 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
    429 {
    430 	union drm_amdgpu_gem_mmap args;
    431 	void *ptr;
    432 	int r;
    433 
    434 	pthread_mutex_lock(&bo->cpu_access_mutex);
    435 
    436 	if (bo->cpu_ptr) {
    437 		/* already mapped */
    438 		assert(bo->cpu_map_count > 0);
    439 		bo->cpu_map_count++;
    440 		*cpu = bo->cpu_ptr;
    441 		pthread_mutex_unlock(&bo->cpu_access_mutex);
    442 		return 0;
    443 	}
    444 
    445 	assert(bo->cpu_map_count == 0);
    446 
    447 	memset(&args, 0, sizeof(args));
    448 
    449 	/* Query the buffer address (args.addr_ptr).
    450 	 * The kernel driver ignores the offset and size parameters. */
    451 	args.in.handle = bo->handle;
    452 
    453 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
    454 				sizeof(args));
    455 	if (r) {
    456 		pthread_mutex_unlock(&bo->cpu_access_mutex);
    457 		return r;
    458 	}
    459 
    460 	/* Map the buffer. */
    461 	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
    462 		       bo->dev->fd, args.out.addr_ptr);
    463 	if (ptr == MAP_FAILED) {
    464 		pthread_mutex_unlock(&bo->cpu_access_mutex);
    465 		return -errno;
    466 	}
    467 
    468 	bo->cpu_ptr = ptr;
    469 	bo->cpu_map_count = 1;
    470 	pthread_mutex_unlock(&bo->cpu_access_mutex);
    471 
    472 	*cpu = ptr;
    473 	return 0;
    474 }
    475 
    476 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
    477 {
    478 	int r;
    479 
    480 	pthread_mutex_lock(&bo->cpu_access_mutex);
    481 	assert(bo->cpu_map_count >= 0);
    482 
    483 	if (bo->cpu_map_count == 0) {
    484 		/* not mapped */
    485 		pthread_mutex_unlock(&bo->cpu_access_mutex);
    486 		return -EINVAL;
    487 	}
    488 
    489 	bo->cpu_map_count--;
    490 	if (bo->cpu_map_count > 0) {
    491 		/* mapped multiple times */
    492 		pthread_mutex_unlock(&bo->cpu_access_mutex);
    493 		return 0;
    494 	}
    495 
    496 	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
    497 	bo->cpu_ptr = NULL;
    498 	pthread_mutex_unlock(&bo->cpu_access_mutex);
    499 	return r;
    500 }
    501 
    502 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
    503 				struct amdgpu_buffer_size_alignments *info)
    504 {
    505 	info->size_local = dev->dev_info.pte_fragment_size;
    506 	info->size_remote = dev->dev_info.gart_page_size;
    507 	return 0;
    508 }
    509 
    510 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
    511 			    uint64_t timeout_ns,
    512 			    bool *busy)
    513 {
    514 	union drm_amdgpu_gem_wait_idle args;
    515 	int r;
    516 
    517 	memset(&args, 0, sizeof(args));
    518 	args.in.handle = bo->handle;
    519 	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
    520 
    521 	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
    522 				&args, sizeof(args));
    523 
    524 	if (r == 0) {
    525 		*busy = args.out.status;
    526 		return 0;
    527 	} else {
    528 		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
    529 		return r;
    530 	}
    531 }
    532 
    533 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
    534 				    void *cpu,
    535 				    uint64_t size,
    536 				    amdgpu_bo_handle *buf_handle)
    537 {
    538 	int r;
    539 	struct amdgpu_bo *bo;
    540 	struct drm_amdgpu_gem_userptr args;
    541 
    542 	args.addr = (uintptr_t)cpu;
    543 	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
    544 		AMDGPU_GEM_USERPTR_VALIDATE;
    545 	args.size = size;
    546 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
    547 				&args, sizeof(args));
    548 	if (r)
    549 		return r;
    550 
    551 	bo = calloc(1, sizeof(struct amdgpu_bo));
    552 	if (!bo)
    553 		return -ENOMEM;
    554 
    555 	atomic_set(&bo->refcount, 1);
    556 	bo->dev = dev;
    557 	bo->alloc_size = size;
    558 	bo->handle = args.handle;
    559 
    560 	*buf_handle = bo;
    561 
    562 	return r;
    563 }
    564 
    565 int amdgpu_bo_list_create(amdgpu_device_handle dev,
    566 			  uint32_t number_of_resources,
    567 			  amdgpu_bo_handle *resources,
    568 			  uint8_t *resource_prios,
    569 			  amdgpu_bo_list_handle *result)
    570 {
    571 	struct drm_amdgpu_bo_list_entry *list;
    572 	union drm_amdgpu_bo_list args;
    573 	unsigned i;
    574 	int r;
    575 
    576 	if (!number_of_resources)
    577 		return -EINVAL;
    578 
    579 	/* overflow check for multiplication */
    580 	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
    581 		return -EINVAL;
    582 
    583 	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
    584 	if (!list)
    585 		return -ENOMEM;
    586 
    587 	*result = malloc(sizeof(struct amdgpu_bo_list));
    588 	if (!*result) {
    589 		free(list);
    590 		return -ENOMEM;
    591 	}
    592 
    593 	memset(&args, 0, sizeof(args));
    594 	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
    595 	args.in.bo_number = number_of_resources;
    596 	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
    597 	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
    598 
    599 	for (i = 0; i < number_of_resources; i++) {
    600 		list[i].bo_handle = resources[i]->handle;
    601 		if (resource_prios)
    602 			list[i].bo_priority = resource_prios[i];
    603 		else
    604 			list[i].bo_priority = 0;
    605 	}
    606 
    607 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
    608 				&args, sizeof(args));
    609 	free(list);
    610 	if (r) {
    611 		free(*result);
    612 		return r;
    613 	}
    614 
    615 	(*result)->dev = dev;
    616 	(*result)->handle = args.out.list_handle;
    617 	return 0;
    618 }
    619 
    620 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
    621 {
    622 	union drm_amdgpu_bo_list args;
    623 	int r;
    624 
    625 	memset(&args, 0, sizeof(args));
    626 	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
    627 	args.in.list_handle = list->handle;
    628 
    629 	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
    630 				&args, sizeof(args));
    631 
    632 	if (!r)
    633 		free(list);
    634 
    635 	return r;
    636 }
    637 
    638 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
    639 			  uint32_t number_of_resources,
    640 			  amdgpu_bo_handle *resources,
    641 			  uint8_t *resource_prios)
    642 {
    643 	struct drm_amdgpu_bo_list_entry *list;
    644 	union drm_amdgpu_bo_list args;
    645 	unsigned i;
    646 	int r;
    647 
    648 	if (!number_of_resources)
    649 		return -EINVAL;
    650 
    651 	/* overflow check for multiplication */
    652 	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
    653 		return -EINVAL;
    654 
    655 	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
    656 	if (!list)
    657 		return -ENOMEM;
    658 
    659 	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
    660 	args.in.list_handle = handle->handle;
    661 	args.in.bo_number = number_of_resources;
    662 	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
    663 	args.in.bo_info_ptr = (uintptr_t)list;
    664 
    665 	for (i = 0; i < number_of_resources; i++) {
    666 		list[i].bo_handle = resources[i]->handle;
    667 		if (resource_prios)
    668 			list[i].bo_priority = resource_prios[i];
    669 		else
    670 			list[i].bo_priority = 0;
    671 	}
    672 
    673 	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
    674 				&args, sizeof(args));
    675 	free(list);
    676 	return r;
    677 }
    678 
    679 int amdgpu_bo_va_op(amdgpu_bo_handle bo,
    680 		     uint64_t offset,
    681 		     uint64_t size,
    682 		     uint64_t addr,
    683 		     uint64_t flags,
    684 		     uint32_t ops)
    685 {
    686 	amdgpu_device_handle dev = bo->dev;
    687 
    688 	size = ALIGN(size, getpagesize());
    689 
    690 	return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
    691 				   AMDGPU_VM_PAGE_READABLE |
    692 				   AMDGPU_VM_PAGE_WRITEABLE |
    693 				   AMDGPU_VM_PAGE_EXECUTABLE, ops);
    694 }
    695 
    696 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
    697 			amdgpu_bo_handle bo,
    698 			uint64_t offset,
    699 			uint64_t size,
    700 			uint64_t addr,
    701 			uint64_t flags,
    702 			uint32_t ops)
    703 {
    704 	struct drm_amdgpu_gem_va va;
    705 	int r;
    706 
    707 	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
    708 	    ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
    709 		return -EINVAL;
    710 
    711 	memset(&va, 0, sizeof(va));
    712 	va.handle = bo ? bo->handle : 0;
    713 	va.operation = ops;
    714 	va.flags = flags;
    715 	va.va_address = addr;
    716 	va.offset_in_bo = offset;
    717 	va.map_size = size;
    718 
    719 	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
    720 
    721 	return r;
    722 }
    723