Home | History | Annotate | Download | only in freedreno
      1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
      2 
      3 /*
      4  * Copyright (C) 2012 Rob Clark <robclark (at) freedesktop.org>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Rob Clark <robclark (at) freedesktop.org>
     27  */
     28 
     29 #ifdef HAVE_CONFIG_H
     30 # include <config.h>
     31 #endif
     32 
     33 #include "freedreno_drmif.h"
     34 #include "freedreno_priv.h"
     35 
     36 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
     37 
     38 static void bo_del(struct fd_bo *bo);
     39 
     40 /* set buffer name, and add to table, call w/ table_lock held: */
     41 static void set_name(struct fd_bo *bo, uint32_t name)
     42 {
     43 	bo->name = name;
     44 	/* add ourself into the handle table: */
     45 	drmHashInsert(bo->dev->name_table, name, bo);
     46 }
     47 
     48 /* lookup a buffer, call w/ table_lock held: */
     49 static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
     50 {
     51 	struct fd_bo *bo = NULL;
     52 	if (!drmHashLookup(tbl, key, (void **)&bo)) {
     53 		/* found, incr refcnt and return: */
     54 		bo = fd_bo_ref(bo);
     55 
     56 		/* don't break the bucket if this bo was found in one */
     57 		list_delinit(&bo->list);
     58 	}
     59 	return bo;
     60 }
     61 
     62 /* allocate a new buffer object, call w/ table_lock held */
     63 static struct fd_bo * bo_from_handle(struct fd_device *dev,
     64 		uint32_t size, uint32_t handle)
     65 {
     66 	struct fd_bo *bo;
     67 
     68 	bo = dev->funcs->bo_from_handle(dev, size, handle);
     69 	if (!bo) {
     70 		struct drm_gem_close req = {
     71 				.handle = handle,
     72 		};
     73 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
     74 		return NULL;
     75 	}
     76 	bo->dev = fd_device_ref(dev);
     77 	bo->size = size;
     78 	bo->handle = handle;
     79 	atomic_set(&bo->refcnt, 1);
     80 	list_inithead(&bo->list);
     81 	/* add ourself into the handle table: */
     82 	drmHashInsert(dev->handle_table, handle, bo);
     83 	return bo;
     84 }
     85 
     86 /* Frees older cached buffers.  Called under table_lock */
     87 drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
     88 {
     89 	int i;
     90 
     91 	if (dev->time == time)
     92 		return;
     93 
     94 	for (i = 0; i < dev->num_buckets; i++) {
     95 		struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
     96 		struct fd_bo *bo;
     97 
     98 		while (!LIST_IS_EMPTY(&bucket->list)) {
     99 			bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
    100 
    101 			/* keep things in cache for at least 1 second: */
    102 			if (time && ((time - bo->free_time) <= 1))
    103 				break;
    104 
    105 			list_del(&bo->list);
    106 			bo_del(bo);
    107 		}
    108 	}
    109 
    110 	dev->time = time;
    111 }
    112 
    113 static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
    114 {
    115 	int i;
    116 
    117 	/* hmm, this is what intel does, but I suppose we could calculate our
    118 	 * way to the correct bucket size rather than looping..
    119 	 */
    120 	for (i = 0; i < dev->num_buckets; i++) {
    121 		struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
    122 		if (bucket->size >= size) {
    123 			return bucket;
    124 		}
    125 	}
    126 
    127 	return NULL;
    128 }
    129 
    130 static int is_idle(struct fd_bo *bo)
    131 {
    132 	return fd_bo_cpu_prep(bo, NULL,
    133 			DRM_FREEDRENO_PREP_READ |
    134 			DRM_FREEDRENO_PREP_WRITE |
    135 			DRM_FREEDRENO_PREP_NOSYNC) == 0;
    136 }
    137 
    138 static struct fd_bo *find_in_bucket(struct fd_device *dev,
    139 		struct fd_bo_bucket *bucket, uint32_t flags)
    140 {
    141 	struct fd_bo *bo = NULL;
    142 
    143 	/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
    144 	 * skip the busy check.. if it is only going to be a render target
    145 	 * then we probably don't need to stall..
    146 	 *
    147 	 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
    148 	 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
    149 	 */
    150 	pthread_mutex_lock(&table_lock);
    151 	while (!LIST_IS_EMPTY(&bucket->list)) {
    152 		bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
    153 		if (0 /* TODO: if madvise tells us bo is gone... */) {
    154 			list_del(&bo->list);
    155 			bo_del(bo);
    156 			bo = NULL;
    157 			continue;
    158 		}
    159 		/* TODO check for compatible flags? */
    160 		if (is_idle(bo)) {
    161 			list_del(&bo->list);
    162 			break;
    163 		}
    164 		bo = NULL;
    165 		break;
    166 	}
    167 	pthread_mutex_unlock(&table_lock);
    168 
    169 	return bo;
    170 }
    171 
    172 
    173 struct fd_bo *
    174 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
    175 {
    176 	struct fd_bo *bo = NULL;
    177 	struct fd_bo_bucket *bucket;
    178 	uint32_t handle;
    179 	int ret;
    180 
    181 	size = ALIGN(size, 4096);
    182 	bucket = get_bucket(dev, size);
    183 
    184 	/* see if we can be green and recycle: */
    185 	if (bucket) {
    186 		size = bucket->size;
    187 		bo = find_in_bucket(dev, bucket, flags);
    188 		if (bo) {
    189 			atomic_set(&bo->refcnt, 1);
    190 			fd_device_ref(bo->dev);
    191 			return bo;
    192 		}
    193 	}
    194 
    195 	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
    196 	if (ret)
    197 		return NULL;
    198 
    199 	pthread_mutex_lock(&table_lock);
    200 	bo = bo_from_handle(dev, size, handle);
    201 	bo->bo_reuse = 1;
    202 	pthread_mutex_unlock(&table_lock);
    203 
    204 	return bo;
    205 }
    206 
    207 struct fd_bo *
    208 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
    209 {
    210 	struct fd_bo *bo = NULL;
    211 
    212 	pthread_mutex_lock(&table_lock);
    213 
    214 	bo = lookup_bo(dev->handle_table, handle);
    215 	if (bo)
    216 		goto out_unlock;
    217 
    218 	bo = bo_from_handle(dev, size, handle);
    219 
    220 out_unlock:
    221 	pthread_mutex_unlock(&table_lock);
    222 
    223 	return bo;
    224 }
    225 
    226 struct fd_bo *
    227 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
    228 {
    229 	int ret, size;
    230 	uint32_t handle;
    231 	struct fd_bo *bo;
    232 
    233 	pthread_mutex_lock(&table_lock);
    234 	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
    235 	if (ret) {
    236 		return NULL;
    237 	}
    238 
    239 	bo = lookup_bo(dev->handle_table, handle);
    240 	if (bo)
    241 		goto out_unlock;
    242 
    243 	/* lseek() to get bo size */
    244 	size = lseek(fd, 0, SEEK_END);
    245 	lseek(fd, 0, SEEK_CUR);
    246 
    247 	bo = bo_from_handle(dev, size, handle);
    248 
    249 out_unlock:
    250 	pthread_mutex_unlock(&table_lock);
    251 
    252 	return bo;
    253 }
    254 
    255 struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
    256 {
    257 	struct drm_gem_open req = {
    258 			.name = name,
    259 	};
    260 	struct fd_bo *bo;
    261 
    262 	pthread_mutex_lock(&table_lock);
    263 
    264 	/* check name table first, to see if bo is already open: */
    265 	bo = lookup_bo(dev->name_table, name);
    266 	if (bo)
    267 		goto out_unlock;
    268 
    269 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
    270 		ERROR_MSG("gem-open failed: %s", strerror(errno));
    271 		goto out_unlock;
    272 	}
    273 
    274 	bo = lookup_bo(dev->handle_table, req.handle);
    275 	if (bo)
    276 		goto out_unlock;
    277 
    278 	bo = bo_from_handle(dev, req.size, req.handle);
    279 	if (bo)
    280 		set_name(bo, name);
    281 
    282 out_unlock:
    283 	pthread_mutex_unlock(&table_lock);
    284 
    285 	return bo;
    286 }
    287 
    288 struct fd_bo * fd_bo_ref(struct fd_bo *bo)
    289 {
    290 	atomic_inc(&bo->refcnt);
    291 	return bo;
    292 }
    293 
    294 void fd_bo_del(struct fd_bo *bo)
    295 {
    296 	struct fd_device *dev = bo->dev;
    297 
    298 	if (!atomic_dec_and_test(&bo->refcnt))
    299 		return;
    300 
    301 	pthread_mutex_lock(&table_lock);
    302 
    303 	if (bo->bo_reuse) {
    304 		struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
    305 
    306 		/* see if we can be green and recycle: */
    307 		if (bucket) {
    308 			struct timespec time;
    309 
    310 			clock_gettime(CLOCK_MONOTONIC, &time);
    311 
    312 			bo->free_time = time.tv_sec;
    313 			list_addtail(&bo->list, &bucket->list);
    314 			fd_cleanup_bo_cache(dev, time.tv_sec);
    315 
    316 			/* bo's in the bucket cache don't have a ref and
    317 			 * don't hold a ref to the dev:
    318 			 */
    319 
    320 			goto out;
    321 		}
    322 	}
    323 
    324 	bo_del(bo);
    325 out:
    326 	fd_device_del_locked(dev);
    327 	pthread_mutex_unlock(&table_lock);
    328 }
    329 
    330 /* Called under table_lock */
    331 static void bo_del(struct fd_bo *bo)
    332 {
    333 	if (bo->map)
    334 		drm_munmap(bo->map, bo->size);
    335 
    336 	/* TODO probably bo's in bucket list get removed from
    337 	 * handle table??
    338 	 */
    339 
    340 	if (bo->handle) {
    341 		struct drm_gem_close req = {
    342 				.handle = bo->handle,
    343 		};
    344 		drmHashDelete(bo->dev->handle_table, bo->handle);
    345 		if (bo->name)
    346 			drmHashDelete(bo->dev->name_table, bo->name);
    347 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
    348 	}
    349 
    350 	bo->funcs->destroy(bo);
    351 }
    352 
    353 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
    354 {
    355 	if (!bo->name) {
    356 		struct drm_gem_flink req = {
    357 				.handle = bo->handle,
    358 		};
    359 		int ret;
    360 
    361 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
    362 		if (ret) {
    363 			return ret;
    364 		}
    365 
    366 		pthread_mutex_lock(&table_lock);
    367 		set_name(bo, req.name);
    368 		pthread_mutex_unlock(&table_lock);
    369 		bo->bo_reuse = 0;
    370 	}
    371 
    372 	*name = bo->name;
    373 
    374 	return 0;
    375 }
    376 
    377 uint32_t fd_bo_handle(struct fd_bo *bo)
    378 {
    379 	return bo->handle;
    380 }
    381 
    382 int fd_bo_dmabuf(struct fd_bo *bo)
    383 {
    384 	int ret, prime_fd;
    385 
    386 	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
    387 			&prime_fd);
    388 	if (ret) {
    389 		ERROR_MSG("failed to get dmabuf fd: %d", ret);
    390 		return ret;
    391 	}
    392 
    393 	bo->bo_reuse = 0;
    394 
    395 	return prime_fd;
    396 }
    397 
    398 uint32_t fd_bo_size(struct fd_bo *bo)
    399 {
    400 	return bo->size;
    401 }
    402 
    403 void * fd_bo_map(struct fd_bo *bo)
    404 {
    405 	if (!bo->map) {
    406 		uint64_t offset;
    407 		int ret;
    408 
    409 		ret = bo->funcs->offset(bo, &offset);
    410 		if (ret) {
    411 			return NULL;
    412 		}
    413 
    414 		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
    415 				bo->dev->fd, offset);
    416 		if (bo->map == MAP_FAILED) {
    417 			ERROR_MSG("mmap failed: %s", strerror(errno));
    418 			bo->map = NULL;
    419 		}
    420 	}
    421 	return bo->map;
    422 }
    423 
    424 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
    425 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
    426 {
    427 	return bo->funcs->cpu_prep(bo, pipe, op);
    428 }
    429 
    430 void fd_bo_cpu_fini(struct fd_bo *bo)
    431 {
    432 	bo->funcs->cpu_fini(bo);
    433 }
    434