1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 2 3 /* 4 * Copyright (C) 2013 Rob Clark <robclark (at) freedesktop.org> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Rob Clark <robclark (at) freedesktop.org> 27 */ 28 29 #ifdef HAVE_CONFIG_H 30 # include <config.h> 31 #endif 32 33 #include <assert.h> 34 35 #include "freedreno_ringbuffer.h" 36 #include "kgsl_priv.h" 37 38 39 /* because kgsl tries to validate the gpuaddr on kernel side in ISSUEIBCMDS, 40 * we can't use normal gem bo's for ringbuffer.. someday the kernel part 41 * needs to be reworked into a single sane drm driver :-/ 42 */ 43 struct kgsl_rb_bo { 44 struct kgsl_pipe *pipe; 45 void *hostptr; 46 uint32_t gpuaddr; 47 uint32_t size; 48 }; 49 50 struct kgsl_ringbuffer { 51 struct fd_ringbuffer base; 52 struct kgsl_rb_bo *bo; 53 }; 54 55 static inline struct kgsl_ringbuffer * to_kgsl_ringbuffer(struct fd_ringbuffer *x) 56 { 57 return (struct kgsl_ringbuffer *)x; 58 } 59 60 static void kgsl_rb_bo_del(struct kgsl_rb_bo *bo) 61 { 62 struct kgsl_sharedmem_free req = { 63 .gpuaddr = bo->gpuaddr, 64 }; 65 int ret; 66 67 drm_munmap(bo->hostptr, bo->size); 68 69 ret = ioctl(bo->pipe->fd, IOCTL_KGSL_SHAREDMEM_FREE, &req); 70 if (ret) { 71 ERROR_MSG("sharedmem free failed: %s", strerror(errno)); 72 } 73 74 free(bo); 75 } 76 77 static struct kgsl_rb_bo * kgsl_rb_bo_new(struct kgsl_pipe *pipe, uint32_t size) 78 { 79 struct kgsl_rb_bo *bo; 80 struct kgsl_gpumem_alloc req = { 81 .size = ALIGN(size, 4096), 82 .flags = KGSL_MEMFLAGS_GPUREADONLY, 83 }; 84 int ret; 85 86 bo = calloc(1, sizeof(*bo)); 87 if (!bo) { 88 ERROR_MSG("allocation failed"); 89 return NULL; 90 } 91 ret = ioctl(pipe->fd, IOCTL_KGSL_GPUMEM_ALLOC, &req); 92 if (ret) { 93 ERROR_MSG("gpumem allocation failed: %s", strerror(errno)); 94 goto fail; 95 } 96 97 bo->pipe = pipe; 98 bo->gpuaddr = req.gpuaddr; 99 bo->size = size; 100 bo->hostptr = drm_mmap(NULL, size, PROT_WRITE|PROT_READ, 101 MAP_SHARED, pipe->fd, req.gpuaddr); 102 103 return bo; 104 fail: 105 if (bo) 106 kgsl_rb_bo_del(bo); 107 return NULL; 108 } 109 110 static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring) 111 { 112 struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring); 113 return kgsl_ring->bo->hostptr; 114 } 115 116 static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start, 117 int in_fence_fd, int *out_fence_fd) 118 { 119 struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring); 120 struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe); 121 uint32_t offset = (uint8_t *)last_start - (uint8_t *)ring->start; 122 struct kgsl_ibdesc ibdesc = { 123 .gpuaddr = kgsl_ring->bo->gpuaddr + offset, 124 .hostptr = last_start, 125 .sizedwords = ring->cur - last_start, 126 }; 127 struct kgsl_ringbuffer_issueibcmds req = { 128 .drawctxt_id = kgsl_pipe->drawctxt_id, 129 .ibdesc_addr = (unsigned long)&ibdesc, 130 .numibs = 1, 131 .flags = KGSL_CONTEXT_SUBMIT_IB_LIST, 132 }; 133 int ret; 134 135 assert(in_fence_fd == -1); 136 assert(out_fence_fd == NULL); 137 138 kgsl_pipe_pre_submit(kgsl_pipe); 139 140 /* z180_cmdstream_issueibcmds() is made of fail: */ 141 if (ring->pipe->id == FD_PIPE_2D) { 142 /* fix up size field in last cmd packet */ 143 uint32_t last_size = (uint32_t)(ring->cur - last_start); 144 /* 5 is length of first packet, 2 for the two 7f000000's */ 145 last_start[2] = last_size - (5 + 2); 146 ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr; 147 ibdesc.hostptr = kgsl_ring->bo->hostptr; 148 ibdesc.sizedwords = 0x145; 149 req.timestamp = (uint32_t)kgsl_ring->bo->hostptr; 150 } 151 152 do { 153 ret = ioctl(kgsl_pipe->fd, IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, &req); 154 } while ((ret == -1) && ((errno == EINTR) || (errno == EAGAIN))); 155 if (ret) 156 ERROR_MSG("issueibcmds failed! %d (%s)", ret, strerror(errno)); 157 158 ring->last_timestamp = req.timestamp; 159 ring->last_start = ring->cur; 160 161 kgsl_pipe_post_submit(kgsl_pipe, req.timestamp); 162 163 return ret; 164 } 165 166 static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring, 167 const struct fd_reloc *r) 168 { 169 struct kgsl_bo *kgsl_bo = to_kgsl_bo(r->bo); 170 uint32_t addr = kgsl_bo_gpuaddr(kgsl_bo, r->offset); 171 assert(addr); 172 if (r->shift < 0) 173 addr >>= -r->shift; 174 else 175 addr <<= r->shift; 176 (*ring->cur++) = addr | r->or; 177 kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo); 178 } 179 180 static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring, 181 struct fd_ringbuffer *target, uint32_t cmd_idx, 182 uint32_t submit_offset, uint32_t size) 183 { 184 struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target); 185 assert(cmd_idx == 0); 186 (*ring->cur++) = target_ring->bo->gpuaddr + submit_offset; 187 return size; 188 } 189 190 static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring) 191 { 192 struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring); 193 if (ring->last_timestamp) 194 fd_pipe_wait(ring->pipe, ring->last_timestamp); 195 if (kgsl_ring->bo) 196 kgsl_rb_bo_del(kgsl_ring->bo); 197 free(kgsl_ring); 198 } 199 200 static const struct fd_ringbuffer_funcs funcs = { 201 .hostptr = kgsl_ringbuffer_hostptr, 202 .flush = kgsl_ringbuffer_flush, 203 .emit_reloc = kgsl_ringbuffer_emit_reloc, 204 .emit_reloc_ring = kgsl_ringbuffer_emit_reloc_ring, 205 .destroy = kgsl_ringbuffer_destroy, 206 }; 207 208 drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe, 209 uint32_t size) 210 { 211 struct kgsl_ringbuffer *kgsl_ring; 212 struct fd_ringbuffer *ring = NULL; 213 214 kgsl_ring = calloc(1, sizeof(*kgsl_ring)); 215 if (!kgsl_ring) { 216 ERROR_MSG("allocation failed"); 217 goto fail; 218 } 219 220 ring = &kgsl_ring->base; 221 ring->funcs = &funcs; 222 ring->size = size; 223 224 kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size); 225 if (!kgsl_ring->bo) { 226 ERROR_MSG("ringbuffer allocation failed"); 227 goto fail; 228 } 229 230 return ring; 231 fail: 232 if (ring) 233 fd_ringbuffer_del(ring); 234 return NULL; 235 } 236