Home | History | Annotate | Download | only in nouveau
      1 /*
      2  * Copyright 2007 Nouveau Project
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice shall be included in
     12  * all copies or substantial portions of the Software.
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
     18  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
     19  * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     20  * SOFTWARE.
     21  */
     22 
     23 #ifdef HAVE_CONFIG_H
     24 #include <config.h>
     25 #endif
     26 #include <stdint.h>
     27 #include <stdlib.h>
     28 #include <errno.h>
     29 #include <assert.h>
     30 
     31 #include <sys/mman.h>
     32 #include <sys/ioctl.h>
     33 
     34 #include "nouveau_private.h"
     35 
     36 int
     37 nouveau_bo_init(struct nouveau_device *dev)
     38 {
     39 	return 0;
     40 }
     41 
     42 void
     43 nouveau_bo_takedown(struct nouveau_device *dev)
     44 {
     45 }
     46 
     47 static int
     48 nouveau_bo_info(struct nouveau_bo_priv *nvbo, struct drm_nouveau_gem_info *arg)
     49 {
     50 	nvbo->handle = nvbo->base.handle = arg->handle;
     51 	nvbo->domain = arg->domain;
     52 	nvbo->size = arg->size;
     53 	nvbo->offset = arg->offset;
     54 	nvbo->map_handle = arg->map_handle;
     55 	nvbo->base.tile_mode = arg->tile_mode;
     56 	nvbo->base.tile_flags = arg->tile_flags;
     57 	return 0;
     58 }
     59 
     60 static int
     61 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
     62 {
     63 	if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
     64 		return 1;
     65 	return 0;
     66 }
     67 
     68 static int
     69 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
     70 {
     71 	if (nvbo->user || nvbo->sysmem) {
     72 		assert(nvbo->sysmem);
     73 		return 0;
     74 	}
     75 
     76 	nvbo->sysmem = malloc(nvbo->size);
     77 	if (!nvbo->sysmem)
     78 		return -ENOMEM;
     79 
     80 	return 0;
     81 }
     82 
     83 static void
     84 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
     85 {
     86 	if (nvbo->sysmem) {
     87 		if (!nvbo->user)
     88 			free(nvbo->sysmem);
     89 		nvbo->sysmem = NULL;
     90 	}
     91 }
     92 
     93 static void
     94 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
     95 {
     96 	struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
     97 	struct drm_gem_close req;
     98 
     99 	if (!nvbo->handle)
    100 		return;
    101 
    102 	if (nvbo->map) {
    103 		munmap(nvbo->map, nvbo->size);
    104 		nvbo->map = NULL;
    105 	}
    106 
    107 	req.handle = nvbo->handle;
    108 	nvbo->handle = 0;
    109 	ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
    110 }
    111 
    112 static int
    113 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
    114 {
    115 	struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
    116 	struct drm_nouveau_gem_new req;
    117 	struct drm_nouveau_gem_info *info = &req.info;
    118 	int ret;
    119 
    120 	if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
    121 		return 0;
    122 
    123 	req.channel_hint = chan ? chan->id : 0;
    124 	req.align = nvbo->align;
    125 
    126 
    127 	info->size = nvbo->size;
    128 	info->domain = 0;
    129 
    130 	if (nvbo->flags & NOUVEAU_BO_VRAM)
    131 		info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
    132 	if (nvbo->flags & NOUVEAU_BO_GART)
    133 		info->domain |= NOUVEAU_GEM_DOMAIN_GART;
    134 	if (!info->domain) {
    135 		info->domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
    136 				 NOUVEAU_GEM_DOMAIN_GART);
    137 	}
    138 
    139 	if (nvbo->flags & NOUVEAU_BO_MAP)
    140 		info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
    141 
    142 	info->tile_mode = nvbo->base.tile_mode;
    143 	info->tile_flags = nvbo->base.tile_flags;
    144 
    145 	ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
    146 				  &req, sizeof(req));
    147 	if (ret)
    148 		return ret;
    149 
    150 	nouveau_bo_info(nvbo, &req.info);
    151 	return 0;
    152 }
    153 
    154 static int
    155 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
    156 {
    157 	struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
    158 
    159 	if (nvbo->map)
    160 		return 0;
    161 
    162 	if (!nvbo->map_handle)
    163 		return -EINVAL;
    164 
    165 	nvbo->map = mmap(0, nvbo->size, PROT_READ | PROT_WRITE,
    166 			 MAP_SHARED, nvdev->fd, nvbo->map_handle);
    167 	if (nvbo->map == MAP_FAILED) {
    168 		nvbo->map = NULL;
    169 		return -errno;
    170 	}
    171 
    172 	return 0;
    173 }
    174 
    175 int
    176 nouveau_bo_new_tile(struct nouveau_device *dev, uint32_t flags, int align,
    177 		    int size, uint32_t tile_mode, uint32_t tile_flags,
    178 		    struct nouveau_bo **bo)
    179 {
    180 	struct nouveau_bo_priv *nvbo;
    181 	int ret;
    182 
    183 	if (!dev || !bo || *bo)
    184 		return -EINVAL;
    185 
    186 	nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
    187 	if (!nvbo)
    188 		return -ENOMEM;
    189 	nvbo->base.device = dev;
    190 	nvbo->base.size = size;
    191 	nvbo->base.tile_mode = tile_mode;
    192 	nvbo->base.tile_flags = tile_flags;
    193 
    194 	nvbo->refcount = 1;
    195 	/* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will
    196 	 * decided the buffer's already allocated when it's not.  The
    197 	 * call to nouveau_bo_pin() later will set this flag.
    198 	 */
    199 	nvbo->flags = (flags & ~NOUVEAU_BO_PIN);
    200 	nvbo->size = size;
    201 	nvbo->align = align;
    202 
    203 	if (flags & NOUVEAU_BO_PIN) {
    204 		ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
    205 		if (ret) {
    206 			nouveau_bo_ref(NULL, (void *)nvbo);
    207 			return ret;
    208 		}
    209 	}
    210 
    211 	*bo = &nvbo->base;
    212 	return 0;
    213 }
    214 
    215 int
    216 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
    217 	       int size, struct nouveau_bo **bo)
    218 {
    219 	uint32_t tile_flags = 0;
    220 
    221 	if (flags & NOUVEAU_BO_TILED) {
    222 		if (flags & NOUVEAU_BO_ZTILE)
    223 			tile_flags = 0x2800;
    224 		else
    225 			tile_flags = 0x7000;
    226 	}
    227 
    228 	return nouveau_bo_new_tile(dev, flags, align, size, 0, tile_flags, bo);
    229 }
    230 
    231 int
    232 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
    233 		struct nouveau_bo **bo)
    234 {
    235 	struct nouveau_bo_priv *nvbo;
    236 	int ret;
    237 
    238 	ret = nouveau_bo_new(dev, NOUVEAU_BO_MAP, 0, size, bo);
    239 	if (ret)
    240 		return ret;
    241 	nvbo = nouveau_bo(*bo);
    242 
    243 	nvbo->sysmem = ptr;
    244 	nvbo->user = 1;
    245 	return 0;
    246 }
    247 
    248 int
    249 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
    250 		struct nouveau_bo **bo)
    251 {
    252 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
    253 	struct drm_nouveau_gem_info req;
    254 	struct nouveau_bo_priv *nvbo;
    255 	int ret;
    256 
    257 	ret = nouveau_bo_new(dev, 0, 0, 0, bo);
    258 	if (ret)
    259 		return ret;
    260 	nvbo = nouveau_bo(*bo);
    261 
    262 	req.handle = handle;
    263 	ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_INFO,
    264 				  &req, sizeof(req));
    265 	if (ret) {
    266 		nouveau_bo_ref(NULL, bo);
    267 		return ret;
    268 	}
    269 
    270 	nouveau_bo_info(nvbo, &req);
    271 	nvbo->base.size = nvbo->size;
    272 	return 0;
    273 }
    274 
    275 int
    276 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
    277 {
    278 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
    279 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    280 	int ret;
    281 
    282 	if (!bo || !handle)
    283 		return -EINVAL;
    284 
    285 	if (!nvbo->global_handle) {
    286 		struct drm_gem_flink req;
    287 
    288 		ret = nouveau_bo_kalloc(nvbo, NULL);
    289 		if (ret)
    290 			return ret;
    291 
    292 		req.handle = nvbo->handle;
    293 		ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
    294 		if (ret) {
    295 			nouveau_bo_kfree(nvbo);
    296 			return ret;
    297 		}
    298 
    299 		nvbo->global_handle = req.name;
    300 	}
    301 
    302 	*handle = nvbo->global_handle;
    303 	return 0;
    304 }
    305 
    306 int
    307 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
    308 		      struct nouveau_bo **bo)
    309 {
    310 	struct nouveau_device_priv *nvdev = nouveau_device(dev);
    311 	struct nouveau_bo_priv *nvbo;
    312 	struct drm_gem_open req;
    313 	int ret;
    314 
    315 	req.name = handle;
    316 	ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
    317 	if (ret) {
    318 		nouveau_bo_ref(NULL, bo);
    319 		return ret;
    320 	}
    321 
    322 	ret = nouveau_bo_wrap(dev, req.handle, bo);
    323 	if (ret) {
    324 		nouveau_bo_ref(NULL, bo);
    325 		return ret;
    326 	}
    327 
    328 	nvbo = nouveau_bo(*bo);
    329 	nvbo->base.handle = nvbo->handle;
    330 	return 0;
    331 }
    332 
    333 static void
    334 nouveau_bo_del(struct nouveau_bo **bo)
    335 {
    336 	struct nouveau_bo_priv *nvbo;
    337 
    338 	if (!bo || !*bo)
    339 		return;
    340 	nvbo = nouveau_bo(*bo);
    341 	*bo = NULL;
    342 
    343 	if (--nvbo->refcount)
    344 		return;
    345 
    346 	if (nvbo->pending) {
    347 		nvbo->pending = NULL;
    348 		nouveau_pushbuf_flush(nvbo->pending_channel, 0);
    349 	}
    350 
    351 	nouveau_bo_ufree(nvbo);
    352 	nouveau_bo_kfree(nvbo);
    353 	free(nvbo);
    354 }
    355 
    356 int
    357 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
    358 {
    359 	if (!pbo)
    360 		return -EINVAL;
    361 
    362 	if (ref)
    363 		nouveau_bo(ref)->refcount++;
    364 
    365 	if (*pbo)
    366 		nouveau_bo_del(pbo);
    367 
    368 	*pbo = ref;
    369 	return 0;
    370 }
    371 
    372 static int
    373 nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write, int no_wait, int no_block)
    374 {
    375 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
    376 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    377 	struct drm_nouveau_gem_cpu_prep req;
    378 	int ret;
    379 
    380 	if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
    381 		return 0;
    382 
    383 	if (nvbo->pending &&
    384 	    (nvbo->pending->write_domains || cpu_write)) {
    385 		nvbo->pending = NULL;
    386 		nouveau_pushbuf_flush(nvbo->pending_channel, 0);
    387 	}
    388 
    389 	req.handle = nvbo->handle;
    390 	req.flags = 0;
    391 	if (cpu_write)
    392 		req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
    393 	if (no_wait)
    394 		req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
    395 	if (no_block)
    396 		req.flags |= NOUVEAU_GEM_CPU_PREP_NOBLOCK;
    397 
    398 	do {
    399 		ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
    400 				      &req, sizeof(req));
    401 	} while (ret == -EAGAIN);
    402 	if (ret)
    403 		return ret;
    404 
    405 	if (ret == 0)
    406 		nvbo->write_marker = 0;
    407 	return 0;
    408 }
    409 
    410 int
    411 nouveau_bo_map_range(struct nouveau_bo *bo, uint32_t delta, uint32_t size,
    412 		     uint32_t flags)
    413 {
    414 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    415 	int ret;
    416 
    417 	if (!nvbo || bo->map)
    418 		return -EINVAL;
    419 
    420 	if (!nouveau_bo_allocated(nvbo)) {
    421 		if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
    422 			ret = nouveau_bo_kalloc(nvbo, NULL);
    423 			if (ret)
    424 				return ret;
    425 		}
    426 
    427 		if (!nouveau_bo_allocated(nvbo)) {
    428 			ret = nouveau_bo_ualloc(nvbo);
    429 			if (ret)
    430 				return ret;
    431 		}
    432 	}
    433 
    434 	if (nvbo->sysmem) {
    435 		bo->map = (char *)nvbo->sysmem + delta;
    436 	} else {
    437 		ret = nouveau_bo_kmap(nvbo);
    438 		if (ret)
    439 			return ret;
    440 
    441 		if (!(flags & NOUVEAU_BO_NOSYNC)) {
    442 			ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR),
    443 					      (flags & NOUVEAU_BO_NOWAIT), 0);
    444 			if (ret)
    445 				return ret;
    446 		}
    447 
    448 		bo->map = (char *)nvbo->map + delta;
    449 	}
    450 
    451 	return 0;
    452 }
    453 
    454 void
    455 nouveau_bo_map_flush(struct nouveau_bo *bo, uint32_t delta, uint32_t size)
    456 {
    457 }
    458 
    459 int
    460 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
    461 {
    462 	return nouveau_bo_map_range(bo, 0, bo->size, flags);
    463 }
    464 
    465 void
    466 nouveau_bo_unmap(struct nouveau_bo *bo)
    467 {
    468 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    469 
    470 	if (bo->map && !nvbo->sysmem) {
    471 		struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
    472 		struct drm_nouveau_gem_cpu_fini req;
    473 
    474 		req.handle = nvbo->handle;
    475 		drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
    476 				&req, sizeof(req));
    477 	}
    478 
    479 	bo->map = NULL;
    480 }
    481 
    482 int
    483 nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
    484 {
    485 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
    486 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    487 	struct drm_nouveau_gem_pin req;
    488 	int ret;
    489 
    490 	if (nvbo->pinned)
    491 		return 0;
    492 
    493 	/* Ensure we have a kernel object... */
    494 	if (!nvbo->flags) {
    495 		if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
    496 			return -EINVAL;
    497 		nvbo->flags = flags;
    498 	}
    499 
    500 	if (!nvbo->handle) {
    501 		ret = nouveau_bo_kalloc(nvbo, NULL);
    502 		if (ret)
    503 			return ret;
    504 	}
    505 
    506 	/* Now force it to stay put :) */
    507 	req.handle = nvbo->handle;
    508 	req.domain = 0;
    509 	if (nvbo->flags & NOUVEAU_BO_VRAM)
    510 		req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
    511 	if (nvbo->flags & NOUVEAU_BO_GART)
    512 		req.domain |= NOUVEAU_GEM_DOMAIN_GART;
    513 
    514 	ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
    515 				  sizeof(struct drm_nouveau_gem_pin));
    516 	if (ret)
    517 		return ret;
    518 	nvbo->offset = req.offset;
    519 	nvbo->domain = req.domain;
    520 	nvbo->pinned = 1;
    521 	nvbo->flags |= NOUVEAU_BO_PIN;
    522 
    523 	/* Fill in public nouveau_bo members */
    524 	if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
    525 		bo->flags = NOUVEAU_BO_VRAM;
    526 	if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
    527 		bo->flags = NOUVEAU_BO_GART;
    528 	bo->offset = nvbo->offset;
    529 
    530 	return 0;
    531 }
    532 
    533 void
    534 nouveau_bo_unpin(struct nouveau_bo *bo)
    535 {
    536 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
    537 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    538 	struct drm_nouveau_gem_unpin req;
    539 
    540 	if (!nvbo->pinned)
    541 		return;
    542 
    543 	req.handle = nvbo->handle;
    544 	drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN, &req, sizeof(req));
    545 
    546 	nvbo->pinned = bo->offset = bo->flags = 0;
    547 }
    548 
    549 int
    550 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
    551 {
    552 	return nouveau_bo_wait(bo, (access & NOUVEAU_BO_WR), 1, 1);
    553 }
    554 
    555 struct drm_nouveau_gem_pushbuf_bo *
    556 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
    557 {
    558 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
    559 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
    560 	struct drm_nouveau_gem_pushbuf_bo *pbbo;
    561 	struct nouveau_bo *ref = NULL;
    562 	int ret;
    563 
    564 	if (nvbo->pending)
    565 		return nvbo->pending;
    566 
    567 	if (!nvbo->handle) {
    568 		ret = nouveau_bo_kalloc(nvbo, chan);
    569 		if (ret)
    570 			return NULL;
    571 
    572 		if (nvbo->sysmem) {
    573 			void *sysmem_tmp = nvbo->sysmem;
    574 
    575 			nvbo->sysmem = NULL;
    576 			ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
    577 			if (ret)
    578 				return NULL;
    579 			nvbo->sysmem = sysmem_tmp;
    580 
    581 			memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
    582 			nouveau_bo_ufree(nvbo);
    583 			nouveau_bo_unmap(bo);
    584 		}
    585 	}
    586 
    587 	if (nvpb->nr_buffers >= NOUVEAU_GEM_MAX_BUFFERS)
    588 		return NULL;
    589 	pbbo = nvpb->buffers + nvpb->nr_buffers++;
    590 	nvbo->pending = pbbo;
    591 	nvbo->pending_channel = chan;
    592 
    593 	nouveau_bo_ref(bo, &ref);
    594 	pbbo->user_priv = (uint64_t)(unsigned long)ref;
    595 	pbbo->handle = nvbo->handle;
    596 	pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
    597 	pbbo->read_domains = 0;
    598 	pbbo->write_domains = 0;
    599 	pbbo->presumed_domain = nvbo->domain;
    600 	pbbo->presumed_offset = nvbo->offset;
    601 	pbbo->presumed_ok = 1;
    602 	return pbbo;
    603 }
    604