1 /* 2 * Copyright 2016 Red Hat. 3 * Copyright 2016 Bas Nieuwenhuizen 4 * 5 * based on amdgpu winsys. 6 * Copyright 2011 Marek Olk <maraeo (at) gmail.com> 7 * Copyright 2015 Advanced Micro Devices, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 26 * IN THE SOFTWARE. 27 */ 28 29 #include <stdio.h> 30 31 #include "radv_amdgpu_bo.h" 32 33 #include <amdgpu.h> 34 #include <amdgpu_drm.h> 35 #include <inttypes.h> 36 37 static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo) 38 { 39 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 40 41 if (bo->ws->debug_all_bos) { 42 pthread_mutex_lock(&bo->ws->global_bo_list_lock); 43 LIST_DEL(&bo->global_list_item); 44 bo->ws->num_buffers--; 45 pthread_mutex_unlock(&bo->ws->global_bo_list_lock); 46 } 47 amdgpu_bo_va_op(bo->bo, 0, bo->size, bo->va, 0, AMDGPU_VA_OP_UNMAP); 48 amdgpu_va_range_free(bo->va_handle); 49 amdgpu_bo_free(bo->bo); 50 FREE(bo); 51 } 52 53 static void radv_amdgpu_add_buffer_to_global_list(struct radv_amdgpu_winsys_bo *bo) 54 { 55 struct radv_amdgpu_winsys *ws = bo->ws; 56 57 if (bo->ws->debug_all_bos) { 58 pthread_mutex_lock(&ws->global_bo_list_lock); 59 LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list); 60 ws->num_buffers++; 61 pthread_mutex_unlock(&ws->global_bo_list_lock); 62 } 63 } 64 65 static struct radeon_winsys_bo * 66 radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, 67 uint64_t size, 68 unsigned alignment, 69 enum radeon_bo_domain initial_domain, 70 unsigned flags) 71 { 72 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws); 73 struct radv_amdgpu_winsys_bo *bo; 74 struct amdgpu_bo_alloc_request request = {0}; 75 amdgpu_bo_handle buf_handle; 76 uint64_t va = 0; 77 amdgpu_va_handle va_handle; 78 int r; 79 bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo); 80 if (!bo) { 81 return NULL; 82 } 83 84 request.alloc_size = size; 85 request.phys_alignment = alignment; 86 87 if (initial_domain & RADEON_DOMAIN_VRAM) 88 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM; 89 if (initial_domain & RADEON_DOMAIN_GTT) 90 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT; 91 92 if (flags & RADEON_FLAG_CPU_ACCESS) 93 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 94 if (flags & RADEON_FLAG_NO_CPU_ACCESS) 95 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 96 if (flags & RADEON_FLAG_GTT_WC) 97 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC; 98 99 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle); 100 if (r) { 101 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n"); 102 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size); 103 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment); 104 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain); 105 goto error_bo_alloc; 106 } 107 108 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, 109 size, alignment, 0, &va, &va_handle, 0); 110 if (r) 111 goto error_va_alloc; 112 113 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP); 114 if (r) 115 goto error_va_map; 116 117 bo->bo = buf_handle; 118 bo->va = va; 119 bo->va_handle = va_handle; 120 bo->initial_domain = initial_domain; 121 bo->size = size; 122 bo->is_shared = false; 123 bo->ws = ws; 124 radv_amdgpu_add_buffer_to_global_list(bo); 125 return (struct radeon_winsys_bo *)bo; 126 error_va_map: 127 amdgpu_va_range_free(va_handle); 128 129 error_va_alloc: 130 amdgpu_bo_free(buf_handle); 131 132 error_bo_alloc: 133 FREE(bo); 134 return NULL; 135 } 136 137 static uint64_t radv_amdgpu_winsys_bo_get_va(struct radeon_winsys_bo *_bo) 138 { 139 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 140 return bo->va; 141 } 142 143 static void * 144 radv_amdgpu_winsys_bo_map(struct radeon_winsys_bo *_bo) 145 { 146 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 147 int ret; 148 void *data; 149 ret = amdgpu_bo_cpu_map(bo->bo, &data); 150 if (ret) 151 return NULL; 152 return data; 153 } 154 155 static void 156 radv_amdgpu_winsys_bo_unmap(struct radeon_winsys_bo *_bo) 157 { 158 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 159 amdgpu_bo_cpu_unmap(bo->bo); 160 } 161 162 static struct radeon_winsys_bo * 163 radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, 164 int fd, unsigned *stride, 165 unsigned *offset) 166 { 167 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws); 168 struct radv_amdgpu_winsys_bo *bo; 169 uint64_t va; 170 amdgpu_va_handle va_handle; 171 enum amdgpu_bo_handle_type type = amdgpu_bo_handle_type_dma_buf_fd; 172 struct amdgpu_bo_import_result result = {0}; 173 struct amdgpu_bo_info info = {0}; 174 enum radeon_bo_domain initial = 0; 175 int r; 176 bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo); 177 if (!bo) 178 return NULL; 179 180 r = amdgpu_bo_import(ws->dev, type, fd, &result); 181 if (r) 182 goto error; 183 184 r = amdgpu_bo_query_info(result.buf_handle, &info); 185 if (r) 186 goto error_query; 187 188 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, 189 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0); 190 if (r) 191 goto error_query; 192 193 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP); 194 if (r) 195 goto error_va_map; 196 197 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM) 198 initial |= RADEON_DOMAIN_VRAM; 199 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT) 200 initial |= RADEON_DOMAIN_GTT; 201 202 bo->bo = result.buf_handle; 203 bo->va = va; 204 bo->va_handle = va_handle; 205 bo->initial_domain = initial; 206 bo->size = result.alloc_size; 207 bo->is_shared = true; 208 return (struct radeon_winsys_bo *)bo; 209 error_va_map: 210 amdgpu_va_range_free(va_handle); 211 212 error_query: 213 amdgpu_bo_free(result.buf_handle); 214 215 error: 216 FREE(bo); 217 return NULL; 218 } 219 220 static bool 221 radv_amdgpu_winsys_get_fd(struct radeon_winsys *_ws, 222 struct radeon_winsys_bo *_bo, 223 int *fd) 224 { 225 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 226 enum amdgpu_bo_handle_type type = amdgpu_bo_handle_type_dma_buf_fd; 227 int r; 228 unsigned handle; 229 r = amdgpu_bo_export(bo->bo, type, &handle); 230 if (r) 231 return false; 232 233 *fd = (int)handle; 234 bo->is_shared = true; 235 return true; 236 } 237 238 static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split) 239 { 240 switch (eg_tile_split) { 241 case 64: return 0; 242 case 128: return 1; 243 case 256: return 2; 244 case 512: return 3; 245 default: 246 case 1024: return 4; 247 case 2048: return 5; 248 case 4096: return 6; 249 } 250 } 251 252 static void 253 radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo *_bo, 254 struct radeon_bo_metadata *md) 255 { 256 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo); 257 struct amdgpu_bo_metadata metadata = {0}; 258 uint32_t tiling_flags = 0; 259 260 if (md->macrotile == RADEON_LAYOUT_TILED) 261 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */ 262 else if (md->microtile == RADEON_LAYOUT_TILED) 263 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */ 264 else 265 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */ 266 267 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config); 268 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw)); 269 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh)); 270 if (md->tile_split) 271 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, radv_eg_tile_split_rev(md->tile_split)); 272 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->mtilea)); 273 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->num_banks)-1); 274 275 if (md->scanout) 276 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */ 277 else 278 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */ 279 280 metadata.tiling_info = tiling_flags; 281 metadata.size_metadata = md->size_metadata; 282 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata)); 283 284 amdgpu_bo_set_metadata(bo->bo, &metadata); 285 } 286 287 void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws) 288 { 289 ws->base.buffer_create = radv_amdgpu_winsys_bo_create; 290 ws->base.buffer_destroy = radv_amdgpu_winsys_bo_destroy; 291 ws->base.buffer_get_va = radv_amdgpu_winsys_bo_get_va; 292 ws->base.buffer_map = radv_amdgpu_winsys_bo_map; 293 ws->base.buffer_unmap = radv_amdgpu_winsys_bo_unmap; 294 ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd; 295 ws->base.buffer_get_fd = radv_amdgpu_winsys_get_fd; 296 ws->base.buffer_set_metadata = radv_amdgpu_winsys_bo_set_metadata; 297 } 298