1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef _AMDGPU_INTERNAL_H_ 26 #define _AMDGPU_INTERNAL_H_ 27 28 #ifdef HAVE_CONFIG_H 29 #include "config.h" 30 #endif 31 32 #include <assert.h> 33 #include <pthread.h> 34 35 #include "libdrm_macros.h" 36 #include "xf86atomic.h" 37 #include "amdgpu.h" 38 #include "util_double_list.h" 39 40 #define AMDGPU_CS_MAX_RINGS 8 41 /* do not use below macro if b is not power of 2 aligned value */ 42 #define __round_mask(x, y) ((__typeof__(x))((y)-1)) 43 #define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1) 44 #define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y)) 45 46 #define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff 47 #define AMDGPU_NULL_SUBMIT_SEQ 0 48 49 struct amdgpu_bo_va_hole { 50 struct list_head list; 51 uint64_t offset; 52 uint64_t size; 53 }; 54 55 struct amdgpu_bo_va_mgr { 56 /* the start virtual address */ 57 uint64_t va_offset; 58 uint64_t va_max; 59 struct list_head va_holes; 60 pthread_mutex_t bo_va_mutex; 61 uint32_t va_alignment; 62 }; 63 64 struct amdgpu_va { 65 amdgpu_device_handle dev; 66 uint64_t address; 67 uint64_t size; 68 enum amdgpu_gpu_va_range range; 69 struct amdgpu_bo_va_mgr *vamgr; 70 }; 71 72 struct amdgpu_device { 73 atomic_t refcount; 74 int fd; 75 int flink_fd; 76 unsigned major_version; 77 unsigned minor_version; 78 79 /** List of buffer handles. Protected by bo_table_mutex. */ 80 struct util_hash_table *bo_handles; 81 /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 82 struct util_hash_table *bo_flink_names; 83 /** This protects all hash tables. */ 84 pthread_mutex_t bo_table_mutex; 85 struct drm_amdgpu_info_device dev_info; 86 struct amdgpu_gpu_info info; 87 /** The global VA manager for the whole virtual address space */ 88 struct amdgpu_bo_va_mgr *vamgr; 89 /** The VA manager for the 32bit address space */ 90 struct amdgpu_bo_va_mgr *vamgr_32; 91 }; 92 93 struct amdgpu_bo { 94 atomic_t refcount; 95 struct amdgpu_device *dev; 96 97 uint64_t alloc_size; 98 99 uint32_t handle; 100 uint32_t flink_name; 101 102 pthread_mutex_t cpu_access_mutex; 103 void *cpu_ptr; 104 int cpu_map_count; 105 }; 106 107 struct amdgpu_bo_list { 108 struct amdgpu_device *dev; 109 110 uint32_t handle; 111 }; 112 113 struct amdgpu_context { 114 struct amdgpu_device *dev; 115 /** Mutex for accessing fences and to maintain command submissions 116 in good sequence. */ 117 pthread_mutex_t sequence_mutex; 118 /* context id*/ 119 uint32_t id; 120 uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 121 struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 122 }; 123 124 /** 125 * Structure describing sw semaphore based on scheduler 126 * 127 */ 128 struct amdgpu_semaphore { 129 atomic_t refcount; 130 struct list_head list; 131 struct amdgpu_cs_fence signal_fence; 132 }; 133 134 /** 135 * Functions. 136 */ 137 138 drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo); 139 140 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 141 uint64_t max, uint64_t alignment); 142 143 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 144 145 drm_private uint64_t 146 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 147 uint64_t alignment, uint64_t base_required); 148 149 drm_private void 150 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size); 151 152 drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 153 154 drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout); 155 156 /** 157 * Inline functions. 158 */ 159 160 /** 161 * Increment src and decrement dst as if we were updating references 162 * for an assignment between 2 pointers of some objects. 163 * 164 * \return true if dst is 0 165 */ 166 static inline bool update_references(atomic_t *dst, atomic_t *src) 167 { 168 if (dst != src) { 169 /* bump src first */ 170 if (src) { 171 assert(atomic_read(src) > 0); 172 atomic_inc(src); 173 } 174 if (dst) { 175 assert(atomic_read(dst) > 0); 176 return atomic_dec_and_test(dst); 177 } 178 } 179 return false; 180 } 181 182 /** 183 * Assignment between two amdgpu_bo pointers with reference counting. 184 * 185 * Usage: 186 * struct amdgpu_bo *dst = ... , *src = ...; 187 * 188 * dst = src; 189 * // No reference counting. Only use this when you need to move 190 * // a reference from one pointer to another. 191 * 192 * amdgpu_bo_reference(&dst, src); 193 * // Reference counters are updated. dst is decremented and src is 194 * // incremented. dst is freed if its reference counter is 0. 195 */ 196 static inline void amdgpu_bo_reference(struct amdgpu_bo **dst, 197 struct amdgpu_bo *src) 198 { 199 if (update_references(&(*dst)->refcount, &src->refcount)) 200 amdgpu_bo_free_internal(*dst); 201 *dst = src; 202 } 203 204 #endif 205