Home | History | Annotate | Download | only in amdgpu
      1 /*
      2  * Copyright 2014 Advanced Micro Devices, Inc.
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice shall be included in
     12  * all copies or substantial portions of the Software.
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20  * OTHER DEALINGS IN THE SOFTWARE.
     21  *
     22  */
     23 
     24 #include <stdlib.h>
     25 #include <string.h>
     26 #include <errno.h>
     27 #include "amdgpu.h"
     28 #include "amdgpu_drm.h"
     29 #include "amdgpu_internal.h"
     30 #include "util_math.h"
     31 
     32 int amdgpu_va_range_query(amdgpu_device_handle dev,
     33 			  enum amdgpu_gpu_va_range type,
     34 			  uint64_t *start, uint64_t *end)
     35 {
     36 	if (type != amdgpu_gpu_va_range_general)
     37 		return -EINVAL;
     38 
     39 	*start = dev->dev_info.virtual_address_offset;
     40 	*end = dev->dev_info.virtual_address_max;
     41 	return 0;
     42 }
     43 
     44 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
     45 				   uint64_t max, uint64_t alignment)
     46 {
     47 	struct amdgpu_bo_va_hole *n;
     48 
     49 	mgr->va_max = max;
     50 	mgr->va_alignment = alignment;
     51 
     52 	list_inithead(&mgr->va_holes);
     53 	pthread_mutex_init(&mgr->bo_va_mutex, NULL);
     54 	pthread_mutex_lock(&mgr->bo_va_mutex);
     55 	n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
     56 	n->size = mgr->va_max - start;
     57 	n->offset = start;
     58 	list_add(&n->list, &mgr->va_holes);
     59 	pthread_mutex_unlock(&mgr->bo_va_mutex);
     60 }
     61 
     62 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
     63 {
     64 	struct amdgpu_bo_va_hole *hole, *tmp;
     65 	LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
     66 		list_del(&hole->list);
     67 		free(hole);
     68 	}
     69 	pthread_mutex_destroy(&mgr->bo_va_mutex);
     70 }
     71 
     72 static drm_private uint64_t
     73 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
     74 		     uint64_t alignment, uint64_t base_required)
     75 {
     76 	struct amdgpu_bo_va_hole *hole, *n;
     77 	uint64_t offset = 0, waste = 0;
     78 
     79 
     80 	alignment = MAX2(alignment, mgr->va_alignment);
     81 	size = ALIGN(size, mgr->va_alignment);
     82 
     83 	if (base_required % alignment)
     84 		return AMDGPU_INVALID_VA_ADDRESS;
     85 
     86 	pthread_mutex_lock(&mgr->bo_va_mutex);
     87 	LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
     88 		if (base_required) {
     89 			if (hole->offset > base_required ||
     90 			    (hole->offset + hole->size) < (base_required + size))
     91 				continue;
     92 			waste = base_required - hole->offset;
     93 			offset = base_required;
     94 		} else {
     95 			offset = hole->offset;
     96 			waste = offset % alignment;
     97 			waste = waste ? alignment - waste : 0;
     98 			offset += waste;
     99 			if (offset >= (hole->offset + hole->size)) {
    100 				continue;
    101 			}
    102 		}
    103 		if (!waste && hole->size == size) {
    104 			offset = hole->offset;
    105 			list_del(&hole->list);
    106 			free(hole);
    107 			pthread_mutex_unlock(&mgr->bo_va_mutex);
    108 			return offset;
    109 		}
    110 		if ((hole->size - waste) > size) {
    111 			if (waste) {
    112 				n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
    113 				n->size = waste;
    114 				n->offset = hole->offset;
    115 				list_add(&n->list, &hole->list);
    116 			}
    117 			hole->size -= (size + waste);
    118 			hole->offset += size + waste;
    119 			pthread_mutex_unlock(&mgr->bo_va_mutex);
    120 			return offset;
    121 		}
    122 		if ((hole->size - waste) == size) {
    123 			hole->size = waste;
    124 			pthread_mutex_unlock(&mgr->bo_va_mutex);
    125 			return offset;
    126 		}
    127 	}
    128 
    129 	pthread_mutex_unlock(&mgr->bo_va_mutex);
    130 	return AMDGPU_INVALID_VA_ADDRESS;
    131 }
    132 
    133 static drm_private void
    134 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
    135 {
    136 	struct amdgpu_bo_va_hole *hole, *next;
    137 
    138 	if (va == AMDGPU_INVALID_VA_ADDRESS)
    139 		return;
    140 
    141 	size = ALIGN(size, mgr->va_alignment);
    142 
    143 	pthread_mutex_lock(&mgr->bo_va_mutex);
    144 	hole = container_of(&mgr->va_holes, hole, list);
    145 	LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
    146 		if (next->offset < va)
    147 			break;
    148 		hole = next;
    149 	}
    150 
    151 	if (&hole->list != &mgr->va_holes) {
    152 		/* Grow upper hole if it's adjacent */
    153 		if (hole->offset == (va + size)) {
    154 			hole->offset = va;
    155 			hole->size += size;
    156 			/* Merge lower hole if it's adjacent */
    157 			if (next != hole &&
    158 			    &next->list != &mgr->va_holes &&
    159 			    (next->offset + next->size) == va) {
    160 				next->size += hole->size;
    161 				list_del(&hole->list);
    162 				free(hole);
    163 			}
    164 			goto out;
    165 		}
    166 	}
    167 
    168 	/* Grow lower hole if it's adjacent */
    169 	if (next != hole && &next->list != &mgr->va_holes &&
    170 	    (next->offset + next->size) == va) {
    171 		next->size += size;
    172 		goto out;
    173 	}
    174 
    175 	/* FIXME on allocation failure we just lose virtual address space
    176 	 * maybe print a warning
    177 	 */
    178 	next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
    179 	if (next) {
    180 		next->size = size;
    181 		next->offset = va;
    182 		list_add(&next->list, &hole->list);
    183 	}
    184 
    185 out:
    186 	pthread_mutex_unlock(&mgr->bo_va_mutex);
    187 }
    188 
    189 int amdgpu_va_range_alloc(amdgpu_device_handle dev,
    190 			  enum amdgpu_gpu_va_range va_range_type,
    191 			  uint64_t size,
    192 			  uint64_t va_base_alignment,
    193 			  uint64_t va_base_required,
    194 			  uint64_t *va_base_allocated,
    195 			  amdgpu_va_handle *va_range_handle,
    196 			  uint64_t flags)
    197 {
    198 	struct amdgpu_bo_va_mgr *vamgr;
    199 
    200 	/* Clear the flag when the high VA manager is not initialized */
    201 	if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
    202 		flags &= ~AMDGPU_VA_RANGE_HIGH;
    203 
    204 	if (flags & AMDGPU_VA_RANGE_HIGH) {
    205 		if (flags & AMDGPU_VA_RANGE_32_BIT)
    206 			vamgr = &dev->vamgr_high_32;
    207 		else
    208 			vamgr = &dev->vamgr_high;
    209 	} else {
    210 		if (flags & AMDGPU_VA_RANGE_32_BIT)
    211 			vamgr = &dev->vamgr_32;
    212 		else
    213 			vamgr = &dev->vamgr;
    214 	}
    215 
    216 	va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
    217 	size = ALIGN(size, vamgr->va_alignment);
    218 
    219 	*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
    220 					va_base_alignment, va_base_required);
    221 
    222 	if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
    223 	    (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
    224 		/* fallback to 32bit address */
    225 		if (flags & AMDGPU_VA_RANGE_HIGH)
    226 			vamgr = &dev->vamgr_high_32;
    227 		else
    228 			vamgr = &dev->vamgr_32;
    229 		*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
    230 					va_base_alignment, va_base_required);
    231 	}
    232 
    233 	if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
    234 		struct amdgpu_va* va;
    235 		va = calloc(1, sizeof(struct amdgpu_va));
    236 		if(!va){
    237 			amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
    238 			return -ENOMEM;
    239 		}
    240 		va->dev = dev;
    241 		va->address = *va_base_allocated;
    242 		va->size = size;
    243 		va->range = va_range_type;
    244 		va->vamgr = vamgr;
    245 		*va_range_handle = va;
    246 	} else {
    247 		return -EINVAL;
    248 	}
    249 
    250 	return 0;
    251 }
    252 
    253 int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
    254 {
    255 	if(!va_range_handle || !va_range_handle->address)
    256 		return 0;
    257 
    258 	amdgpu_vamgr_free_va(va_range_handle->vamgr,
    259 			va_range_handle->address,
    260 			va_range_handle->size);
    261 	free(va_range_handle);
    262 	return 0;
    263 }
    264