Home | History | Annotate | Download | only in freedreno
      1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
      2 
      3 /*
      4  * Copyright (C) 2012 Rob Clark <robclark (at) freedesktop.org>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Rob Clark <robclark (at) freedesktop.org>
     27  */
     28 
     29 #ifdef HAVE_CONFIG_H
     30 # include <config.h>
     31 #endif
     32 
     33 #include "freedreno_drmif.h"
     34 #include "freedreno_priv.h"
     35 
     36 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
     37 
     38 static void bo_del(struct fd_bo *bo);
     39 
     40 /* set buffer name, and add to table, call w/ table_lock held: */
     41 static void set_name(struct fd_bo *bo, uint32_t name)
     42 {
     43 	bo->name = name;
     44 	/* add ourself into the handle table: */
     45 	drmHashInsert(bo->dev->name_table, name, bo);
     46 }
     47 
     48 /* lookup a buffer, call w/ table_lock held: */
     49 static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
     50 {
     51 	struct fd_bo *bo = NULL;
     52 	if (!drmHashLookup(tbl, key, (void **)&bo)) {
     53 		/* found, incr refcnt and return: */
     54 		bo = fd_bo_ref(bo);
     55 	}
     56 	return bo;
     57 }
     58 
     59 /* allocate a new buffer object, call w/ table_lock held */
     60 static struct fd_bo * bo_from_handle(struct fd_device *dev,
     61 		uint32_t size, uint32_t handle)
     62 {
     63 	struct fd_bo *bo;
     64 
     65 	bo = dev->funcs->bo_from_handle(dev, size, handle);
     66 	if (!bo) {
     67 		struct drm_gem_close req = {
     68 				.handle = handle,
     69 		};
     70 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
     71 		return NULL;
     72 	}
     73 	bo->dev = fd_device_ref(dev);
     74 	bo->size = size;
     75 	bo->handle = handle;
     76 	atomic_set(&bo->refcnt, 1);
     77 	list_inithead(&bo->list);
     78 	/* add ourself into the handle table: */
     79 	drmHashInsert(dev->handle_table, handle, bo);
     80 	return bo;
     81 }
     82 
     83 /* Frees older cached buffers.  Called under table_lock */
     84 void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
     85 {
     86 	int i;
     87 
     88 	if (dev->time == time)
     89 		return;
     90 
     91 	for (i = 0; i < dev->num_buckets; i++) {
     92 		struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
     93 		struct fd_bo *bo;
     94 
     95 		while (!LIST_IS_EMPTY(&bucket->list)) {
     96 			bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
     97 
     98 			/* keep things in cache for at least 1 second: */
     99 			if (time && ((time - bo->free_time) <= 1))
    100 				break;
    101 
    102 			list_del(&bo->list);
    103 			bo_del(bo);
    104 		}
    105 	}
    106 
    107 	dev->time = time;
    108 }
    109 
    110 static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
    111 {
    112 	int i;
    113 
    114 	/* hmm, this is what intel does, but I suppose we could calculate our
    115 	 * way to the correct bucket size rather than looping..
    116 	 */
    117 	for (i = 0; i < dev->num_buckets; i++) {
    118 		struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
    119 		if (bucket->size >= size) {
    120 			return bucket;
    121 		}
    122 	}
    123 
    124 	return NULL;
    125 }
    126 
    127 static int is_idle(struct fd_bo *bo)
    128 {
    129 	return fd_bo_cpu_prep(bo, NULL,
    130 			DRM_FREEDRENO_PREP_READ |
    131 			DRM_FREEDRENO_PREP_WRITE |
    132 			DRM_FREEDRENO_PREP_NOSYNC) == 0;
    133 }
    134 
    135 static struct fd_bo *find_in_bucket(struct fd_device *dev,
    136 		struct fd_bo_bucket *bucket, uint32_t flags)
    137 {
    138 	struct fd_bo *bo = NULL;
    139 
    140 	/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
    141 	 * skip the busy check.. if it is only going to be a render target
    142 	 * then we probably don't need to stall..
    143 	 *
    144 	 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
    145 	 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
    146 	 */
    147 	pthread_mutex_lock(&table_lock);
    148 	while (!LIST_IS_EMPTY(&bucket->list)) {
    149 		bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
    150 		if (0 /* TODO: if madvise tells us bo is gone... */) {
    151 			list_del(&bo->list);
    152 			bo_del(bo);
    153 			bo = NULL;
    154 			continue;
    155 		}
    156 		/* TODO check for compatible flags? */
    157 		if (is_idle(bo)) {
    158 			list_del(&bo->list);
    159 			break;
    160 		}
    161 		bo = NULL;
    162 		break;
    163 	}
    164 	pthread_mutex_unlock(&table_lock);
    165 
    166 	return bo;
    167 }
    168 
    169 
    170 drm_public struct fd_bo *
    171 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
    172 {
    173 	struct fd_bo *bo = NULL;
    174 	struct fd_bo_bucket *bucket;
    175 	uint32_t handle;
    176 	int ret;
    177 
    178 	size = ALIGN(size, 4096);
    179 	bucket = get_bucket(dev, size);
    180 
    181 	/* see if we can be green and recycle: */
    182 	if (bucket) {
    183 		size = bucket->size;
    184 		bo = find_in_bucket(dev, bucket, flags);
    185 		if (bo) {
    186 			atomic_set(&bo->refcnt, 1);
    187 			fd_device_ref(bo->dev);
    188 			return bo;
    189 		}
    190 	}
    191 
    192 	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
    193 	if (ret)
    194 		return NULL;
    195 
    196 	pthread_mutex_lock(&table_lock);
    197 	bo = bo_from_handle(dev, size, handle);
    198 	bo->bo_reuse = 1;
    199 	pthread_mutex_unlock(&table_lock);
    200 
    201 	return bo;
    202 }
    203 
    204 drm_public struct fd_bo *
    205 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
    206 {
    207 	struct fd_bo *bo = NULL;
    208 
    209 	pthread_mutex_lock(&table_lock);
    210 
    211 	bo = lookup_bo(dev->handle_table, handle);
    212 	if (bo)
    213 		goto out_unlock;
    214 
    215 	bo = bo_from_handle(dev, size, handle);
    216 
    217 out_unlock:
    218 	pthread_mutex_unlock(&table_lock);
    219 
    220 	return bo;
    221 }
    222 
    223 drm_public struct fd_bo *
    224 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
    225 {
    226 	struct drm_prime_handle req = {
    227 			.fd = fd,
    228 	};
    229 	int ret, size;
    230 
    231 	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
    232 	if (ret) {
    233 		return NULL;
    234 	}
    235 
    236 	/* hmm, would be nice if we had a way to figure out the size.. */
    237 	size = 0;
    238 
    239 	return fd_bo_from_handle(dev, req.handle, size);
    240 }
    241 
    242 drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
    243 {
    244 	struct drm_gem_open req = {
    245 			.name = name,
    246 	};
    247 	struct fd_bo *bo;
    248 
    249 	pthread_mutex_lock(&table_lock);
    250 
    251 	/* check name table first, to see if bo is already open: */
    252 	bo = lookup_bo(dev->name_table, name);
    253 	if (bo)
    254 		goto out_unlock;
    255 
    256 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
    257 		ERROR_MSG("gem-open failed: %s", strerror(errno));
    258 		goto out_unlock;
    259 	}
    260 
    261 	bo = lookup_bo(dev->handle_table, req.handle);
    262 	if (bo)
    263 		goto out_unlock;
    264 
    265 	bo = bo_from_handle(dev, req.size, req.handle);
    266 	if (bo)
    267 		set_name(bo, name);
    268 
    269 out_unlock:
    270 	pthread_mutex_unlock(&table_lock);
    271 
    272 	return bo;
    273 }
    274 
    275 drm_public struct fd_bo * fd_bo_ref(struct fd_bo *bo)
    276 {
    277 	atomic_inc(&bo->refcnt);
    278 	return bo;
    279 }
    280 
    281 drm_public void fd_bo_del(struct fd_bo *bo)
    282 {
    283 	struct fd_device *dev = bo->dev;
    284 
    285 	if (!atomic_dec_and_test(&bo->refcnt))
    286 		return;
    287 
    288 	if (bo->fd) {
    289 		close(bo->fd);
    290 		bo->fd = 0;
    291 	}
    292 
    293 	pthread_mutex_lock(&table_lock);
    294 
    295 	if (bo->bo_reuse) {
    296 		struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
    297 
    298 		/* see if we can be green and recycle: */
    299 		if (bucket) {
    300 			struct timespec time;
    301 
    302 			clock_gettime(CLOCK_MONOTONIC, &time);
    303 
    304 			bo->free_time = time.tv_sec;
    305 			list_addtail(&bo->list, &bucket->list);
    306 			fd_cleanup_bo_cache(dev, time.tv_sec);
    307 
    308 			/* bo's in the bucket cache don't have a ref and
    309 			 * don't hold a ref to the dev:
    310 			 */
    311 
    312 			goto out;
    313 		}
    314 	}
    315 
    316 	bo_del(bo);
    317 out:
    318 	fd_device_del_locked(dev);
    319 	pthread_mutex_unlock(&table_lock);
    320 }
    321 
    322 /* Called under table_lock */
    323 static void bo_del(struct fd_bo *bo)
    324 {
    325 	if (bo->map)
    326 		drm_munmap(bo->map, bo->size);
    327 
    328 	/* TODO probably bo's in bucket list get removed from
    329 	 * handle table??
    330 	 */
    331 
    332 	if (bo->handle) {
    333 		struct drm_gem_close req = {
    334 				.handle = bo->handle,
    335 		};
    336 		drmHashDelete(bo->dev->handle_table, bo->handle);
    337 		if (bo->name)
    338 			drmHashDelete(bo->dev->name_table, bo->name);
    339 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
    340 	}
    341 
    342 	bo->funcs->destroy(bo);
    343 }
    344 
    345 drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
    346 {
    347 	if (!bo->name) {
    348 		struct drm_gem_flink req = {
    349 				.handle = bo->handle,
    350 		};
    351 		int ret;
    352 
    353 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
    354 		if (ret) {
    355 			return ret;
    356 		}
    357 
    358 		pthread_mutex_lock(&table_lock);
    359 		set_name(bo, req.name);
    360 		pthread_mutex_unlock(&table_lock);
    361 	}
    362 
    363 	*name = bo->name;
    364 
    365 	return 0;
    366 }
    367 
    368 drm_public uint32_t fd_bo_handle(struct fd_bo *bo)
    369 {
    370 	return bo->handle;
    371 }
    372 
    373 drm_public int fd_bo_dmabuf(struct fd_bo *bo)
    374 {
    375 	if (!bo->fd) {
    376 		struct drm_prime_handle req = {
    377 				.handle = bo->handle,
    378 				.flags = DRM_CLOEXEC,
    379 		};
    380 		int ret;
    381 
    382 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
    383 		if (ret) {
    384 			return ret;
    385 		}
    386 
    387 		bo->fd = req.fd;
    388 	}
    389 	return dup(bo->fd);
    390 }
    391 
    392 drm_public uint32_t fd_bo_size(struct fd_bo *bo)
    393 {
    394 	return bo->size;
    395 }
    396 
    397 drm_public void * fd_bo_map(struct fd_bo *bo)
    398 {
    399 	if (!bo->map) {
    400 		uint64_t offset;
    401 		int ret;
    402 
    403 		ret = bo->funcs->offset(bo, &offset);
    404 		if (ret) {
    405 			return NULL;
    406 		}
    407 
    408 		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
    409 				bo->dev->fd, offset);
    410 		if (bo->map == MAP_FAILED) {
    411 			ERROR_MSG("mmap failed: %s", strerror(errno));
    412 			bo->map = NULL;
    413 		}
    414 	}
    415 	return bo->map;
    416 }
    417 
    418 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
    419 drm_public int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
    420 {
    421 	return bo->funcs->cpu_prep(bo, pipe, op);
    422 }
    423 
    424 drm_public void fd_bo_cpu_fini(struct fd_bo *bo)
    425 {
    426 	bo->funcs->cpu_fini(bo);
    427 }
    428