Home | History | Annotate | Download | only in msm
      1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
      2 
      3 /*
      4  * Copyright (C) 2013 Rob Clark <robclark (at) freedesktop.org>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Rob Clark <robclark (at) freedesktop.org>
     27  */
     28 
     29 #include <assert.h>
     30 #include <inttypes.h>
     31 
     32 #include "freedreno_ringbuffer.h"
     33 #include "msm_priv.h"
     34 
     35 /* represents a single cmd buffer in the submit ioctl.  Each cmd buffer has
     36  * a backing bo, and a reloc table.
     37  */
     38 struct msm_cmd {
     39 	struct list_head list;
     40 
     41 	struct fd_ringbuffer *ring;
     42 	struct fd_bo *ring_bo;
     43 
     44 	/* reloc's table: */
     45 	struct drm_msm_gem_submit_reloc *relocs;
     46 	uint32_t nr_relocs, max_relocs;
     47 
     48 	uint32_t size;
     49 };
     50 
     51 struct msm_ringbuffer {
     52 	struct fd_ringbuffer base;
     53 
     54 	/* submit ioctl related tables:
     55 	 * Note that bos and cmds are tracked by the parent ringbuffer, since
     56 	 * that is global to the submit ioctl call.  The reloc's table is tracked
     57 	 * per cmd-buffer.
     58 	 */
     59 	struct {
     60 		/* bo's table: */
     61 		struct drm_msm_gem_submit_bo *bos;
     62 		uint32_t nr_bos, max_bos;
     63 
     64 		/* cmd's table: */
     65 		struct drm_msm_gem_submit_cmd *cmds;
     66 		uint32_t nr_cmds, max_cmds;
     67 	} submit;
     68 
     69 	/* should have matching entries in submit.bos: */
     70 	/* Note, only in parent ringbuffer */
     71 	struct fd_bo **bos;
     72 	uint32_t nr_bos, max_bos;
     73 
     74 	/* should have matching entries in submit.cmds: */
     75 	struct msm_cmd **cmds;
     76 	uint32_t nr_cmds, max_cmds;
     77 
     78 	/* List of physical cmdstream buffers (msm_cmd) assocated with this
     79 	 * logical fd_ringbuffer.
     80 	 *
     81 	 * Note that this is different from msm_ringbuffer::cmds (which
     82 	 * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
     83 	 * related stuff, and *only* is tracked in the parent ringbuffer.
     84 	 * And only has "completed" cmd buffers (ie. we already know the
     85 	 * size) added via get_cmd().
     86 	 */
     87 	struct list_head cmd_list;
     88 
     89 	int is_growable;
     90 	unsigned cmd_count;
     91 
     92 	unsigned seqno;
     93 
     94 	/* maps fd_bo to idx: */
     95 	void *bo_table;
     96 };
     97 
     98 static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
     99 {
    100 	return (struct msm_ringbuffer *)x;
    101 }
    102 
    103 #define INIT_SIZE 0x1000
    104 
    105 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
    106 drm_private extern pthread_mutex_t table_lock;
    107 
    108 static void ring_bo_del(struct fd_device *dev, struct fd_bo *bo)
    109 {
    110 	int ret;
    111 
    112 	pthread_mutex_lock(&table_lock);
    113 	ret = fd_bo_cache_free(&to_msm_device(dev)->ring_cache, bo);
    114 	pthread_mutex_unlock(&table_lock);
    115 
    116 	if (ret == 0)
    117 		return;
    118 
    119 	fd_bo_del(bo);
    120 }
    121 
    122 static struct fd_bo * ring_bo_new(struct fd_device *dev, uint32_t size)
    123 {
    124 	struct fd_bo *bo;
    125 
    126 	bo = fd_bo_cache_alloc(&to_msm_device(dev)->ring_cache, &size, 0);
    127 	if (bo)
    128 		return bo;
    129 
    130 	bo = fd_bo_new(dev, size, 0);
    131 	if (!bo)
    132 		return NULL;
    133 
    134 	/* keep ringbuffer bo's out of the normal bo cache: */
    135 	bo->bo_reuse = FALSE;
    136 
    137 	return bo;
    138 }
    139 
    140 static void ring_cmd_del(struct msm_cmd *cmd)
    141 {
    142 	if (cmd->ring_bo)
    143 		ring_bo_del(cmd->ring->pipe->dev, cmd->ring_bo);
    144 	list_del(&cmd->list);
    145 	to_msm_ringbuffer(cmd->ring)->cmd_count--;
    146 	free(cmd->relocs);
    147 	free(cmd);
    148 }
    149 
    150 static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
    151 {
    152 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    153 	struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
    154 
    155 	if (!cmd)
    156 		return NULL;
    157 
    158 	cmd->ring = ring;
    159 	cmd->ring_bo = ring_bo_new(ring->pipe->dev, size);
    160 	if (!cmd->ring_bo)
    161 		goto fail;
    162 
    163 	list_addtail(&cmd->list, &msm_ring->cmd_list);
    164 	msm_ring->cmd_count++;
    165 
    166 	return cmd;
    167 
    168 fail:
    169 	ring_cmd_del(cmd);
    170 	return NULL;
    171 }
    172 
    173 static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
    174 {
    175 	if ((nr + 1) > *max) {
    176 		if ((*max * 2) < (nr + 1))
    177 			*max = nr + 5;
    178 		else
    179 			*max = *max * 2;
    180 		ptr = realloc(ptr, *max * sz);
    181 	}
    182 	return ptr;
    183 }
    184 
    185 #define APPEND(x, name) ({ \
    186 	(x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
    187 	(x)->nr_ ## name ++; \
    188 })
    189 
    190 static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
    191 {
    192 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    193 	assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
    194 	return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
    195 }
    196 
    197 static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
    198 {
    199 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    200 	uint32_t idx;
    201 
    202 	idx = APPEND(&msm_ring->submit, bos);
    203 	idx = APPEND(msm_ring, bos);
    204 
    205 	msm_ring->submit.bos[idx].flags = 0;
    206 	msm_ring->submit.bos[idx].handle = bo->handle;
    207 	msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
    208 
    209 	msm_ring->bos[idx] = fd_bo_ref(bo);
    210 
    211 	return idx;
    212 }
    213 
    214 /* add (if needed) bo, return idx: */
    215 static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
    216 {
    217 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    218 	struct msm_bo *msm_bo = to_msm_bo(bo);
    219 	uint32_t idx;
    220 	pthread_mutex_lock(&idx_lock);
    221 	if (msm_bo->current_ring_seqno == msm_ring->seqno) {
    222 		idx = msm_bo->idx;
    223 	} else {
    224 		void *val;
    225 
    226 		if (!msm_ring->bo_table)
    227 			msm_ring->bo_table = drmHashCreate();
    228 
    229 		if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
    230 			/* found */
    231 			idx = (uint32_t)(uintptr_t)val;
    232 		} else {
    233 			idx = append_bo(ring, bo);
    234 			val = (void *)(uintptr_t)idx;
    235 			drmHashInsert(msm_ring->bo_table, bo->handle, val);
    236 		}
    237 		msm_bo->current_ring_seqno = msm_ring->seqno;
    238 		msm_bo->idx = idx;
    239 	}
    240 	pthread_mutex_unlock(&idx_lock);
    241 	if (flags & FD_RELOC_READ)
    242 		msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
    243 	if (flags & FD_RELOC_WRITE)
    244 		msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
    245 	return idx;
    246 }
    247 
    248 static int check_cmd_bo(struct fd_ringbuffer *ring,
    249 		struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
    250 {
    251 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    252 	return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
    253 }
    254 
    255 /* Ensure that submit has corresponding entry in cmds table for the
    256  * target cmdstream buffer:
    257  */
    258 static void get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
    259 		uint32_t submit_offset, uint32_t size, uint32_t type)
    260 {
    261 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    262 	struct drm_msm_gem_submit_cmd *cmd;
    263 	uint32_t i;
    264 
    265 	/* figure out if we already have a cmd buf: */
    266 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
    267 		cmd = &msm_ring->submit.cmds[i];
    268 		if ((cmd->submit_offset == submit_offset) &&
    269 				(cmd->size == size) &&
    270 				(cmd->type == type) &&
    271 				check_cmd_bo(ring, cmd, target_cmd->ring_bo))
    272 			return;
    273 	}
    274 
    275 	/* create cmd buf if not: */
    276 	i = APPEND(&msm_ring->submit, cmds);
    277 	APPEND(msm_ring, cmds);
    278 	msm_ring->cmds[i] = target_cmd;
    279 	cmd = &msm_ring->submit.cmds[i];
    280 	cmd->type = type;
    281 	cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
    282 	cmd->submit_offset = submit_offset;
    283 	cmd->size = size;
    284 	cmd->pad = 0;
    285 
    286 	target_cmd->size = size;
    287 }
    288 
    289 static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
    290 {
    291 	return fd_bo_map(current_cmd(ring)->ring_bo);
    292 }
    293 
    294 static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
    295 		uint32_t start, uint32_t offset)
    296 {
    297 	uint32_t i;
    298 
    299 	/* a binary search would be more clever.. */
    300 	for (i = start; i < msm_cmd->nr_relocs; i++) {
    301 		struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
    302 		if (reloc->submit_offset >= offset)
    303 			return i;
    304 	}
    305 
    306 	return i;
    307 }
    308 
    309 static void delete_cmds(struct msm_ringbuffer *msm_ring)
    310 {
    311 	struct msm_cmd *cmd, *tmp;
    312 
    313 	LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
    314 		ring_cmd_del(cmd);
    315 	}
    316 }
    317 
    318 static void flush_reset(struct fd_ringbuffer *ring)
    319 {
    320 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    321 	unsigned i;
    322 
    323 	for (i = 0; i < msm_ring->nr_bos; i++) {
    324 		struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
    325 		msm_bo->current_ring_seqno = 0;
    326 		fd_bo_del(&msm_bo->base);
    327 	}
    328 
    329 	/* for each of the cmd buffers, clear their reloc's: */
    330 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
    331 		struct msm_cmd *target_cmd = msm_ring->cmds[i];
    332 		target_cmd->nr_relocs = 0;
    333 	}
    334 
    335 	msm_ring->submit.nr_cmds = 0;
    336 	msm_ring->submit.nr_bos = 0;
    337 	msm_ring->nr_cmds = 0;
    338 	msm_ring->nr_bos = 0;
    339 
    340 	if (msm_ring->bo_table) {
    341 		drmHashDestroy(msm_ring->bo_table);
    342 		msm_ring->bo_table = NULL;
    343 	}
    344 
    345 	if (msm_ring->is_growable) {
    346 		delete_cmds(msm_ring);
    347 	} else {
    348 		/* in old mode, just reset the # of relocs: */
    349 		current_cmd(ring)->nr_relocs = 0;
    350 	}
    351 }
    352 
    353 static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
    354 {
    355 	uint32_t submit_offset, size, type;
    356 	struct fd_ringbuffer *parent;
    357 
    358 	if (ring->parent) {
    359 		parent = ring->parent;
    360 		type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
    361 	} else {
    362 		parent = ring;
    363 		type = MSM_SUBMIT_CMD_BUF;
    364 	}
    365 
    366 	submit_offset = offset_bytes(last_start, ring->start);
    367 	size = offset_bytes(ring->cur, last_start);
    368 
    369 	get_cmd(parent, current_cmd(ring), submit_offset, size, type);
    370 }
    371 
    372 static void dump_submit(struct msm_ringbuffer *msm_ring)
    373 {
    374 	uint32_t i, j;
    375 
    376 	for (i = 0; i < msm_ring->submit.nr_bos; i++) {
    377 		struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
    378 		ERROR_MSG("  bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
    379 	}
    380 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
    381 		struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
    382 		struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
    383 		ERROR_MSG("  cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
    384 				i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
    385 		for (j = 0; j < cmd->nr_relocs; j++) {
    386 			struct drm_msm_gem_submit_reloc *r = &relocs[j];
    387 			ERROR_MSG("    reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
    388 					", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
    389 					r->reloc_idx, r->reloc_offset);
    390 		}
    391 	}
    392 }
    393 
    394 static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
    395 		int in_fence_fd, int *out_fence_fd)
    396 {
    397 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    398 	struct drm_msm_gem_submit req = {
    399 			.flags = to_msm_pipe(ring->pipe)->pipe,
    400 			.queueid = to_msm_pipe(ring->pipe)->queue_id,
    401 	};
    402 	uint32_t i;
    403 	int ret;
    404 
    405 	if (in_fence_fd != -1) {
    406 		req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
    407 		req.fence_fd = in_fence_fd;
    408 	}
    409 
    410 	if (out_fence_fd) {
    411 		req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
    412 	}
    413 
    414 	finalize_current_cmd(ring, last_start);
    415 
    416 	/* needs to be after get_cmd() as that could create bos/cmds table: */
    417 	req.bos = VOID2U64(msm_ring->submit.bos),
    418 	req.nr_bos = msm_ring->submit.nr_bos;
    419 	req.cmds = VOID2U64(msm_ring->submit.cmds),
    420 	req.nr_cmds = msm_ring->submit.nr_cmds;
    421 
    422 	/* for each of the cmd's fix up their reloc's: */
    423 	for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
    424 		struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
    425 		struct msm_cmd *msm_cmd = msm_ring->cmds[i];
    426 		uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
    427 		uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
    428 		cmd->relocs = VOID2U64(&msm_cmd->relocs[a]);
    429 		cmd->nr_relocs = (b > a) ? b - a : 0;
    430 	}
    431 
    432 	DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
    433 
    434 	ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
    435 			&req, sizeof(req));
    436 	if (ret) {
    437 		ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
    438 		dump_submit(msm_ring);
    439 	} else if (!ret) {
    440 		/* update timestamp on all rings associated with submit: */
    441 		for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
    442 			struct msm_cmd *msm_cmd = msm_ring->cmds[i];
    443 			msm_cmd->ring->last_timestamp = req.fence;
    444 		}
    445 
    446 		if (out_fence_fd) {
    447 			*out_fence_fd = req.fence_fd;
    448 		}
    449 	}
    450 
    451 	flush_reset(ring);
    452 
    453 	return ret;
    454 }
    455 
    456 static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
    457 {
    458 	assert(to_msm_ringbuffer(ring)->is_growable);
    459 	finalize_current_cmd(ring, ring->last_start);
    460 	ring_cmd_new(ring, size);
    461 }
    462 
    463 static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
    464 {
    465 	flush_reset(ring);
    466 }
    467 
    468 static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
    469 		const struct fd_reloc *r)
    470 {
    471 	struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
    472 	struct msm_bo *msm_bo = to_msm_bo(r->bo);
    473 	struct drm_msm_gem_submit_reloc *reloc;
    474 	struct msm_cmd *cmd = current_cmd(ring);
    475 	uint32_t idx = APPEND(cmd, relocs);
    476 	uint32_t addr;
    477 
    478 	reloc = &cmd->relocs[idx];
    479 
    480 	reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
    481 	reloc->reloc_offset = r->offset;
    482 	reloc->or = r->or;
    483 	reloc->shift = r->shift;
    484 	reloc->submit_offset = offset_bytes(ring->cur, ring->start);
    485 
    486 	addr = msm_bo->presumed;
    487 	if (reloc->shift < 0)
    488 		addr >>= -reloc->shift;
    489 	else
    490 		addr <<= reloc->shift;
    491 	(*ring->cur++) = addr | r->or;
    492 
    493 	if (ring->pipe->gpu_id >= 500) {
    494 		struct drm_msm_gem_submit_reloc *reloc_hi;
    495 
    496 		/* NOTE: grab reloc_idx *before* APPEND() since that could
    497 		 * realloc() meaning that 'reloc' ptr is no longer valid:
    498 		 */
    499 		uint32_t reloc_idx = reloc->reloc_idx;
    500 
    501 		idx = APPEND(cmd, relocs);
    502 
    503 		reloc_hi = &cmd->relocs[idx];
    504 
    505 		reloc_hi->reloc_idx = reloc_idx;
    506 		reloc_hi->reloc_offset = r->offset;
    507 		reloc_hi->or = r->orhi;
    508 		reloc_hi->shift = r->shift - 32;
    509 		reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start);
    510 
    511 		addr = msm_bo->presumed >> 32;
    512 		if (reloc_hi->shift < 0)
    513 			addr >>= -reloc_hi->shift;
    514 		else
    515 			addr <<= reloc_hi->shift;
    516 		(*ring->cur++) = addr | r->orhi;
    517 	}
    518 }
    519 
    520 static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
    521 		struct fd_ringbuffer *target, uint32_t cmd_idx,
    522 		uint32_t submit_offset, uint32_t size)
    523 {
    524 	struct msm_cmd *cmd = NULL;
    525 	uint32_t idx = 0;
    526 
    527 	LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
    528 		if (idx == cmd_idx)
    529 			break;
    530 		idx++;
    531 	}
    532 
    533 	assert(cmd && (idx == cmd_idx));
    534 
    535 	if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
    536 		/* All but the last cmd buffer is fully "baked" (ie. already has
    537 		 * done get_cmd() to add it to the cmds table).  But in this case,
    538 		 * the size we get is invalid (since it is calculated from the
    539 		 * last cmd buffer):
    540 		 */
    541 		size = cmd->size;
    542 	} else {
    543 		get_cmd(ring, cmd, submit_offset, size, MSM_SUBMIT_CMD_IB_TARGET_BUF);
    544 	}
    545 
    546 	msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
    547 		.bo = cmd->ring_bo,
    548 		.flags = FD_RELOC_READ,
    549 		.offset = submit_offset,
    550 	});
    551 
    552 	return size;
    553 }
    554 
    555 static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
    556 {
    557 	return to_msm_ringbuffer(ring)->cmd_count;
    558 }
    559 
    560 static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
    561 {
    562 	struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
    563 
    564 	flush_reset(ring);
    565 	delete_cmds(msm_ring);
    566 
    567 	free(msm_ring->submit.cmds);
    568 	free(msm_ring->submit.bos);
    569 	free(msm_ring->bos);
    570 	free(msm_ring->cmds);
    571 	free(msm_ring);
    572 }
    573 
    574 static const struct fd_ringbuffer_funcs funcs = {
    575 		.hostptr = msm_ringbuffer_hostptr,
    576 		.flush = msm_ringbuffer_flush,
    577 		.grow = msm_ringbuffer_grow,
    578 		.reset = msm_ringbuffer_reset,
    579 		.emit_reloc = msm_ringbuffer_emit_reloc,
    580 		.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
    581 		.cmd_count = msm_ringbuffer_cmd_count,
    582 		.destroy = msm_ringbuffer_destroy,
    583 };
    584 
    585 drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
    586 		uint32_t size)
    587 {
    588 	struct msm_ringbuffer *msm_ring;
    589 	struct fd_ringbuffer *ring;
    590 
    591 	msm_ring = calloc(1, sizeof(*msm_ring));
    592 	if (!msm_ring) {
    593 		ERROR_MSG("allocation failed");
    594 		return NULL;
    595 	}
    596 
    597 	if (size == 0) {
    598 		assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
    599 		size = INIT_SIZE;
    600 		msm_ring->is_growable = TRUE;
    601 	}
    602 
    603 	list_inithead(&msm_ring->cmd_list);
    604 	msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
    605 
    606 	ring = &msm_ring->base;
    607 	ring->funcs = &funcs;
    608 	ring->size = size;
    609 	ring->pipe = pipe;   /* needed in ring_cmd_new() */
    610 
    611 	ring_cmd_new(ring, size);
    612 
    613 	return ring;
    614 }
    615