1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef _AMDGPU_INTERNAL_H_ 26 #define _AMDGPU_INTERNAL_H_ 27 28 #ifdef HAVE_CONFIG_H 29 #include "config.h" 30 #endif 31 32 #include <assert.h> 33 #include <pthread.h> 34 35 #include "libdrm_macros.h" 36 #include "xf86atomic.h" 37 #include "amdgpu.h" 38 #include "util_double_list.h" 39 40 #define AMDGPU_CS_MAX_RINGS 8 41 /* do not use below macro if b is not power of 2 aligned value */ 42 #define __round_mask(x, y) ((__typeof__(x))((y)-1)) 43 #define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1) 44 #define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y)) 45 46 #define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff 47 48 struct amdgpu_bo_va_hole { 49 struct list_head list; 50 uint64_t offset; 51 uint64_t size; 52 }; 53 54 struct amdgpu_bo_va_mgr { 55 /* the start virtual address */ 56 uint64_t va_offset; 57 uint64_t va_max; 58 struct list_head va_holes; 59 pthread_mutex_t bo_va_mutex; 60 uint32_t va_alignment; 61 }; 62 63 struct amdgpu_va { 64 amdgpu_device_handle dev; 65 uint64_t address; 66 uint64_t size; 67 enum amdgpu_gpu_va_range range; 68 struct amdgpu_bo_va_mgr *vamgr; 69 }; 70 71 struct amdgpu_device { 72 atomic_t refcount; 73 int fd; 74 int flink_fd; 75 unsigned major_version; 76 unsigned minor_version; 77 78 /** List of buffer handles. Protected by bo_table_mutex. */ 79 struct util_hash_table *bo_handles; 80 /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 81 struct util_hash_table *bo_flink_names; 82 /** This protects all hash tables. */ 83 pthread_mutex_t bo_table_mutex; 84 struct drm_amdgpu_info_device dev_info; 85 struct amdgpu_gpu_info info; 86 /** The global VA manager for the whole virtual address space */ 87 struct amdgpu_bo_va_mgr *vamgr; 88 /** The VA manager for the 32bit address space */ 89 struct amdgpu_bo_va_mgr *vamgr_32; 90 }; 91 92 struct amdgpu_bo { 93 atomic_t refcount; 94 struct amdgpu_device *dev; 95 96 uint64_t alloc_size; 97 98 uint32_t handle; 99 uint32_t flink_name; 100 101 pthread_mutex_t cpu_access_mutex; 102 void *cpu_ptr; 103 int cpu_map_count; 104 }; 105 106 struct amdgpu_bo_list { 107 struct amdgpu_device *dev; 108 109 uint32_t handle; 110 }; 111 112 struct amdgpu_context { 113 struct amdgpu_device *dev; 114 /* context id*/ 115 uint32_t id; 116 }; 117 118 /** 119 * Functions. 120 */ 121 122 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo); 123 124 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 125 uint64_t max, uint64_t alignment); 126 127 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 128 129 drm_private uint64_t 130 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 131 uint64_t alignment, uint64_t base_required); 132 133 drm_private void 134 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size); 135 136 drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 137 138 drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout); 139 140 /** 141 * Inline functions. 142 */ 143 144 /** 145 * Increment src and decrement dst as if we were updating references 146 * for an assignment between 2 pointers of some objects. 147 * 148 * \return true if dst is 0 149 */ 150 static inline bool update_references(atomic_t *dst, atomic_t *src) 151 { 152 if (dst != src) { 153 /* bump src first */ 154 if (src) { 155 assert(atomic_read(src) > 0); 156 atomic_inc(src); 157 } 158 if (dst) { 159 assert(atomic_read(dst) > 0); 160 return atomic_dec_and_test(dst); 161 } 162 } 163 return false; 164 } 165 166 /** 167 * Assignment between two amdgpu_bo pointers with reference counting. 168 * 169 * Usage: 170 * struct amdgpu_bo *dst = ... , *src = ...; 171 * 172 * dst = src; 173 * // No reference counting. Only use this when you need to move 174 * // a reference from one pointer to another. 175 * 176 * amdgpu_bo_reference(&dst, src); 177 * // Reference counters are updated. dst is decremented and src is 178 * // incremented. dst is freed if its reference counter is 0. 179 */ 180 static inline void amdgpu_bo_reference(struct amdgpu_bo **dst, 181 struct amdgpu_bo *src) 182 { 183 if (update_references(&(*dst)->refcount, &src->refcount)) 184 amdgpu_bo_free_internal(*dst); 185 *dst = src; 186 } 187 188 #endif 189