Home | History | Annotate | Download | only in freedreno
      1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
      2 
      3 /*
      4  * Copyright (C) 2012 Rob Clark <robclark (at) freedesktop.org>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Rob Clark <robclark (at) freedesktop.org>
     27  */
     28 
     29 #include "freedreno_drmif.h"
     30 #include "freedreno_priv.h"
     31 
     32 drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
     33 drm_private void bo_del(struct fd_bo *bo);
     34 
     35 /* set buffer name, and add to table, call w/ table_lock held: */
     36 static void set_name(struct fd_bo *bo, uint32_t name)
     37 {
     38 	bo->name = name;
     39 	/* add ourself into the handle table: */
     40 	drmHashInsert(bo->dev->name_table, name, bo);
     41 }
     42 
     43 /* lookup a buffer, call w/ table_lock held: */
     44 static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
     45 {
     46 	struct fd_bo *bo = NULL;
     47 	if (!drmHashLookup(tbl, key, (void **)&bo)) {
     48 		/* found, incr refcnt and return: */
     49 		bo = fd_bo_ref(bo);
     50 
     51 		/* don't break the bucket if this bo was found in one */
     52 		list_delinit(&bo->list);
     53 	}
     54 	return bo;
     55 }
     56 
     57 /* allocate a new buffer object, call w/ table_lock held */
     58 static struct fd_bo * bo_from_handle(struct fd_device *dev,
     59 		uint32_t size, uint32_t handle)
     60 {
     61 	struct fd_bo *bo;
     62 
     63 	bo = dev->funcs->bo_from_handle(dev, size, handle);
     64 	if (!bo) {
     65 		struct drm_gem_close req = {
     66 				.handle = handle,
     67 		};
     68 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
     69 		return NULL;
     70 	}
     71 	bo->dev = fd_device_ref(dev);
     72 	bo->size = size;
     73 	bo->handle = handle;
     74 	atomic_set(&bo->refcnt, 1);
     75 	list_inithead(&bo->list);
     76 	/* add ourself into the handle table: */
     77 	drmHashInsert(dev->handle_table, handle, bo);
     78 	return bo;
     79 }
     80 
     81 struct fd_bo *
     82 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
     83 {
     84 	struct fd_bo *bo = NULL;
     85 	uint32_t handle;
     86 	int ret;
     87 
     88 	bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
     89 	if (bo)
     90 		return bo;
     91 
     92 	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
     93 	if (ret)
     94 		return NULL;
     95 
     96 	pthread_mutex_lock(&table_lock);
     97 	bo = bo_from_handle(dev, size, handle);
     98 	bo->bo_reuse = TRUE;
     99 	pthread_mutex_unlock(&table_lock);
    100 
    101 	VG_BO_ALLOC(bo);
    102 
    103 	return bo;
    104 }
    105 
    106 struct fd_bo *
    107 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
    108 {
    109 	struct fd_bo *bo = NULL;
    110 
    111 	pthread_mutex_lock(&table_lock);
    112 
    113 	bo = lookup_bo(dev->handle_table, handle);
    114 	if (bo)
    115 		goto out_unlock;
    116 
    117 	bo = bo_from_handle(dev, size, handle);
    118 
    119 	VG_BO_ALLOC(bo);
    120 
    121 out_unlock:
    122 	pthread_mutex_unlock(&table_lock);
    123 
    124 	return bo;
    125 }
    126 
    127 struct fd_bo *
    128 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
    129 {
    130 	int ret, size;
    131 	uint32_t handle;
    132 	struct fd_bo *bo;
    133 
    134 	pthread_mutex_lock(&table_lock);
    135 	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
    136 	if (ret) {
    137 		pthread_mutex_unlock(&table_lock);
    138 		return NULL;
    139 	}
    140 
    141 	bo = lookup_bo(dev->handle_table, handle);
    142 	if (bo)
    143 		goto out_unlock;
    144 
    145 	/* lseek() to get bo size */
    146 	size = lseek(fd, 0, SEEK_END);
    147 	lseek(fd, 0, SEEK_CUR);
    148 
    149 	bo = bo_from_handle(dev, size, handle);
    150 
    151 	VG_BO_ALLOC(bo);
    152 
    153 out_unlock:
    154 	pthread_mutex_unlock(&table_lock);
    155 
    156 	return bo;
    157 }
    158 
    159 struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
    160 {
    161 	struct drm_gem_open req = {
    162 			.name = name,
    163 	};
    164 	struct fd_bo *bo;
    165 
    166 	pthread_mutex_lock(&table_lock);
    167 
    168 	/* check name table first, to see if bo is already open: */
    169 	bo = lookup_bo(dev->name_table, name);
    170 	if (bo)
    171 		goto out_unlock;
    172 
    173 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
    174 		ERROR_MSG("gem-open failed: %s", strerror(errno));
    175 		goto out_unlock;
    176 	}
    177 
    178 	bo = lookup_bo(dev->handle_table, req.handle);
    179 	if (bo)
    180 		goto out_unlock;
    181 
    182 	bo = bo_from_handle(dev, req.size, req.handle);
    183 	if (bo) {
    184 		set_name(bo, name);
    185 		VG_BO_ALLOC(bo);
    186 	}
    187 
    188 out_unlock:
    189 	pthread_mutex_unlock(&table_lock);
    190 
    191 	return bo;
    192 }
    193 
    194 uint64_t fd_bo_get_iova(struct fd_bo *bo)
    195 {
    196 	return bo->funcs->iova(bo);
    197 }
    198 
    199 void fd_bo_put_iova(struct fd_bo *bo)
    200 {
    201 	/* currently a no-op */
    202 }
    203 
    204 struct fd_bo * fd_bo_ref(struct fd_bo *bo)
    205 {
    206 	atomic_inc(&bo->refcnt);
    207 	return bo;
    208 }
    209 
    210 void fd_bo_del(struct fd_bo *bo)
    211 {
    212 	struct fd_device *dev = bo->dev;
    213 
    214 	if (!atomic_dec_and_test(&bo->refcnt))
    215 		return;
    216 
    217 	pthread_mutex_lock(&table_lock);
    218 
    219 	if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
    220 		goto out;
    221 
    222 	bo_del(bo);
    223 	fd_device_del_locked(dev);
    224 out:
    225 	pthread_mutex_unlock(&table_lock);
    226 }
    227 
    228 /* Called under table_lock */
    229 drm_private void bo_del(struct fd_bo *bo)
    230 {
    231 	VG_BO_FREE(bo);
    232 
    233 	if (bo->map)
    234 		drm_munmap(bo->map, bo->size);
    235 
    236 	/* TODO probably bo's in bucket list get removed from
    237 	 * handle table??
    238 	 */
    239 
    240 	if (bo->handle) {
    241 		struct drm_gem_close req = {
    242 				.handle = bo->handle,
    243 		};
    244 		drmHashDelete(bo->dev->handle_table, bo->handle);
    245 		if (bo->name)
    246 			drmHashDelete(bo->dev->name_table, bo->name);
    247 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
    248 	}
    249 
    250 	bo->funcs->destroy(bo);
    251 }
    252 
    253 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
    254 {
    255 	if (!bo->name) {
    256 		struct drm_gem_flink req = {
    257 				.handle = bo->handle,
    258 		};
    259 		int ret;
    260 
    261 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
    262 		if (ret) {
    263 			return ret;
    264 		}
    265 
    266 		pthread_mutex_lock(&table_lock);
    267 		set_name(bo, req.name);
    268 		pthread_mutex_unlock(&table_lock);
    269 		bo->bo_reuse = FALSE;
    270 	}
    271 
    272 	*name = bo->name;
    273 
    274 	return 0;
    275 }
    276 
    277 uint32_t fd_bo_handle(struct fd_bo *bo)
    278 {
    279 	return bo->handle;
    280 }
    281 
    282 int fd_bo_dmabuf(struct fd_bo *bo)
    283 {
    284 	int ret, prime_fd;
    285 
    286 	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
    287 			&prime_fd);
    288 	if (ret) {
    289 		ERROR_MSG("failed to get dmabuf fd: %d", ret);
    290 		return ret;
    291 	}
    292 
    293 	bo->bo_reuse = FALSE;
    294 
    295 	return prime_fd;
    296 }
    297 
    298 uint32_t fd_bo_size(struct fd_bo *bo)
    299 {
    300 	return bo->size;
    301 }
    302 
    303 void * fd_bo_map(struct fd_bo *bo)
    304 {
    305 	if (!bo->map) {
    306 		uint64_t offset;
    307 		int ret;
    308 
    309 		ret = bo->funcs->offset(bo, &offset);
    310 		if (ret) {
    311 			return NULL;
    312 		}
    313 
    314 		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
    315 				bo->dev->fd, offset);
    316 		if (bo->map == MAP_FAILED) {
    317 			ERROR_MSG("mmap failed: %s", strerror(errno));
    318 			bo->map = NULL;
    319 		}
    320 	}
    321 	return bo->map;
    322 }
    323 
    324 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
    325 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
    326 {
    327 	return bo->funcs->cpu_prep(bo, pipe, op);
    328 }
    329 
    330 void fd_bo_cpu_fini(struct fd_bo *bo)
    331 {
    332 	bo->funcs->cpu_fini(bo);
    333 }
    334 
    335 #if !HAVE_FREEDRENO_KGSL
    336 struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
    337 {
    338     return NULL;
    339 }
    340 #endif
    341