Home | History | Annotate | Download | only in amdgpu

Lines Matching refs:mgr

47 drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
50 mgr->va_offset = start;
51 mgr->va_max = max;
52 mgr->va_alignment = alignment;
54 list_inithead(&mgr->va_holes);
55 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58 drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
61 LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
65 pthread_mutex_destroy(&mgr->bo_va_mutex);
69 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
75 alignment = MAX2(alignment, mgr->va_alignment);
76 size = ALIGN(size, mgr->va_alignment);
81 pthread_mutex_lock(&mgr->bo_va_mutex);
84 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
104 pthread_mutex_unlock(&mgr->bo_va_mutex);
116 pthread_mutex_unlock(&mgr->bo_va_mutex);
121 pthread_mutex_unlock(&mgr->bo_va_mutex);
127 if (base_required < mgr->va_offset) {
128 pthread_mutex_unlock(&mgr->bo_va_mutex);
131 offset = mgr->va_offset;
132 waste = base_required - mgr->va_offset;
134 offset = mgr->va_offset;
139 if (offset + waste + size > mgr->va_max) {
140 pthread_mutex_unlock(&mgr->bo_va_mutex);
148 list_add(&n->list, &mgr->va_holes);
152 mgr->va_offset += size + waste;
153 pthread_mutex_unlock(&mgr->bo_va_mutex);
158 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
165 size = ALIGN(size, mgr->va_alignment);
167 pthread_mutex_lock(&mgr->bo_va_mutex);
168 if ((va + size) == mgr->va_offset) {
169 mgr->va_offset = va;
171 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
172 hole = container_of(mgr->va_holes.next, hole, list);
174 mgr->va_offset = hole->offset;
182 hole = container_of(&mgr->va_holes, hole, list);
183 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
189 if (&hole->list != &mgr->va_holes) {
196 && &next->list != &mgr->va_holes
207 if (next != hole && &next->list != &mgr->va_holes &&
224 pthread_mutex_unlock(&mgr->bo_va_mutex);