Home | History | Annotate | Download | only in src
      1 /**************************************************************************
      2  *
      3  * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
      4  * All Rights Reserved.
      5  * Copyright 2009 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     31  */
     32 
     33 #ifdef HAVE_CONFIG_H
     34 #include "config.h"
     35 #endif
     36 
     37 #include <stdint.h>
     38 #include <errno.h>
     39 #include <unistd.h>
     40 #include <assert.h>
     41 #include <stdio.h>
     42 #include <string.h>
     43 #include "wsbm_pool.h"
     44 #include "wsbm_fencemgr.h"
     45 #include "wsbm_manager.h"
     46 #include "wsbm_mm.h"
     47 #include "wsbm_priv.h"
     48 
     49 /*
     50  * Malloced memory must be aligned to 16 bytes, since that's what
     51  * the DMA bitblt requires.
     52  */
     53 
     54 #define WSBM_USER_ALIGN_ADD 16
     55 #define WSBM_USER_ALIGN_SYSMEM(_val) \
     56     ((void *)(((unsigned long) (_val) + 15) & ~15))
     57 
     58 struct _WsbmUserBuffer
     59 {
     60     struct _WsbmBufStorage buf;
     61     struct _WsbmKernelBuf kBuf;
     62 
     63     /* Protected by the pool mutex */
     64 
     65     struct _WsbmListHead lru;
     66     struct _WsbmListHead delayed;
     67 
     68     /* Protected by the buffer mutex */
     69 
     70     unsigned long size;
     71     unsigned long alignment;
     72 
     73     struct _WsbmCond event;
     74     uint32_t proposedPlacement;
     75     uint32_t newFenceType;
     76 
     77     void *map;
     78     void *sysmem;
     79     int unFenced;
     80     struct _WsbmFenceObject *fence;
     81     struct _WsbmMMNode *node;
     82 
     83     struct _WsbmAtomic writers;
     84 };
     85 
     86 struct _WsbmUserPool
     87 {
     88     /*
     89      * Constant after initialization.
     90      */
     91 
     92     struct _WsbmBufferPool pool;
     93     unsigned long agpOffset;
     94     unsigned long agpMap;
     95     unsigned long agpSize;
     96     unsigned long vramOffset;
     97     unsigned long vramMap;
     98     unsigned long vramSize;
     99     struct _WsbmMutex mutex;
    100     struct _WsbmListHead delayed;
    101     struct _WsbmListHead vramLRU;
    102     struct _WsbmListHead agpLRU;
    103     struct _WsbmMM vramMM;
    104     struct _WsbmMM agpMM;
    105         uint32_t(*fenceTypes) (uint64_t);
    106 };
    107 
    108 static inline struct _WsbmUserPool *
    109 userPool(struct _WsbmUserBuffer *buf)
    110 {
    111     return containerOf(buf->buf.pool, struct _WsbmUserPool, pool);
    112 }
    113 
    114 static inline struct _WsbmUserBuffer *
    115 userBuf(struct _WsbmBufStorage *buf)
    116 {
    117     return containerOf(buf, struct _WsbmUserBuffer, buf);
    118 }
    119 
    120 static void
    121 waitIdleLocked(struct _WsbmBufStorage *buf, int lazy)
    122 {
    123     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    124 
    125     while (vBuf->unFenced || vBuf->fence != NULL) {
    126 	if (vBuf->unFenced)
    127 	    WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
    128 
    129 	if (vBuf->fence != NULL) {
    130 	    if (!wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
    131 		struct _WsbmFenceObject *fence =
    132 		    wsbmFenceReference(vBuf->fence);
    133 
    134 		WSBM_MUTEX_UNLOCK(&buf->mutex);
    135 		(void)wsbmFenceFinish(fence, vBuf->kBuf.fence_type_mask,
    136 				      lazy);
    137 		WSBM_MUTEX_LOCK(&buf->mutex);
    138 
    139 		if (vBuf->fence == fence)
    140 		    wsbmFenceUnreference(&vBuf->fence);
    141 
    142 		wsbmFenceUnreference(&fence);
    143 	    } else {
    144 		wsbmFenceUnreference(&vBuf->fence);
    145 	    }
    146 	}
    147     }
    148 }
    149 
    150 static int
    151 pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
    152 {
    153     WSBM_MUTEX_UNLOCK(&buf->mutex);
    154     waitIdleLocked(buf, lazy);
    155     WSBM_MUTEX_UNLOCK(&buf->mutex);
    156 
    157     return 0;
    158 }
    159 
    160 static int
    161 evict_lru(struct _WsbmListHead *lru)
    162 {
    163     struct _WsbmUserBuffer *vBuf;
    164     struct _WsbmUserPool *p;
    165     struct _WsbmListHead *list = lru->next;
    166     int err;
    167 
    168     if (list == lru) {
    169 	return -ENOMEM;
    170     }
    171 
    172     vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, lru);
    173     p = userPool(vBuf);
    174     WSBM_MUTEX_UNLOCK(&p->mutex);
    175     WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
    176     WSBM_MUTEX_LOCK(&p->mutex);
    177 
    178     vBuf->sysmem = malloc(vBuf->size + WSBM_USER_ALIGN_ADD);
    179 
    180     if (!vBuf->sysmem) {
    181 	err = -ENOMEM;
    182 	goto out_unlock;
    183     }
    184 
    185     (void)wsbmFenceFinish(vBuf->fence, vBuf->kBuf.fence_type_mask, 0);
    186     wsbmFenceUnreference(&vBuf->fence);
    187 
    188     memcpy(WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem), vBuf->map, vBuf->size);
    189     WSBMLISTDELINIT(&vBuf->lru);
    190     vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
    191     vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
    192 
    193     /*
    194      * FIXME: Free memory.
    195      */
    196 
    197     err = 0;
    198   out_unlock:
    199     WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
    200     return err;
    201 }
    202 
    203 static struct _WsbmBufStorage *
    204 pool_create(struct _WsbmBufferPool *pool,
    205 	    unsigned long size, uint32_t placement, unsigned alignment)
    206 {
    207     struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
    208     struct _WsbmUserBuffer *vBuf = calloc(1, sizeof(*vBuf));
    209 
    210     if (!vBuf)
    211 	return NULL;
    212 
    213     wsbmBufStorageInit(&vBuf->buf, pool);
    214     vBuf->sysmem = NULL;
    215     vBuf->proposedPlacement = placement;
    216     vBuf->size = size;
    217     vBuf->alignment = alignment;
    218 
    219     WSBMINITLISTHEAD(&vBuf->lru);
    220     WSBMINITLISTHEAD(&vBuf->delayed);
    221     WSBM_MUTEX_LOCK(&p->mutex);
    222 
    223     if (placement & WSBM_PL_FLAG_TT) {
    224 	vBuf->node = wsbmMMSearchFree(&p->agpMM, size, alignment, 1);
    225 	if (vBuf->node)
    226 	    vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
    227 
    228 	if (vBuf->node) {
    229 	    vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
    230 	    vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
    231 	    vBuf->map = (void *)(p->agpMap + vBuf->node->start);
    232 	    WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
    233 	    goto have_mem;
    234 	}
    235     }
    236 
    237     if (placement & WSBM_PL_FLAG_VRAM) {
    238 	vBuf->node = wsbmMMSearchFree(&p->vramMM, size, alignment, 1);
    239 	if (vBuf->node)
    240 	    vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
    241 
    242 	if (vBuf->node) {
    243 	    vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
    244 	    vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
    245 	    vBuf->map = (void *)(p->vramMap + vBuf->node->start);
    246 	    WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
    247 	    goto have_mem;
    248 	}
    249     }
    250 
    251     if ((placement & WSBM_PL_FLAG_NO_EVICT)
    252 	&& !(placement & WSBM_PL_FLAG_SYSTEM)) {
    253 	WSBM_MUTEX_UNLOCK(&p->mutex);
    254 	goto out_err;
    255     }
    256 
    257     vBuf->sysmem = malloc(size + WSBM_USER_ALIGN_ADD);
    258     vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
    259     vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
    260 
    261   have_mem:
    262 
    263     WSBM_MUTEX_UNLOCK(&p->mutex);
    264     if (vBuf->sysmem != NULL
    265 	|| (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)))
    266 	return &vBuf->buf;
    267   out_err:
    268     free(vBuf);
    269     return NULL;
    270 }
    271 
    272 static int
    273 pool_validate(struct _WsbmBufStorage *buf, uint64_t set_flags,
    274 	      uint64_t clr_flags)
    275 {
    276     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    277     struct _WsbmUserPool *p = userPool(vBuf);
    278     int err = -ENOMEM;
    279 
    280     WSBM_MUTEX_LOCK(&buf->mutex);
    281 
    282     while (wsbmAtomicRead(&vBuf->writers) != 0)
    283 	WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
    284 
    285     vBuf->unFenced = 1;
    286 
    287     WSBM_MUTEX_LOCK(&p->mutex);
    288     WSBMLISTDELINIT(&vBuf->lru);
    289 
    290     vBuf->proposedPlacement =
    291 	(vBuf->proposedPlacement | set_flags) & ~clr_flags;
    292 
    293     if ((vBuf->proposedPlacement & vBuf->kBuf.placement & WSBM_PL_MASK_MEM) ==
    294 	vBuf->kBuf.placement) {
    295 	err = 0;
    296 	goto have_mem;
    297     }
    298 
    299     /*
    300      * We're moving to another memory region, so evict first and we'll
    301      * do a sw copy to the other region.
    302      */
    303 
    304     if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)) {
    305 	struct _WsbmListHead tmpLRU;
    306 
    307 	WSBMINITLISTHEAD(&tmpLRU);
    308 	WSBMLISTADDTAIL(&tmpLRU, &vBuf->lru);
    309 	err = evict_lru(&tmpLRU);
    310 	if (err)
    311 	    goto have_mem;
    312     }
    313 
    314     if (vBuf->proposedPlacement & WSBM_PL_FLAG_TT) {
    315 	do {
    316 	    vBuf->node =
    317 		wsbmMMSearchFree(&p->agpMM, vBuf->size, vBuf->alignment, 1);
    318 	    if (vBuf->node)
    319 		vBuf->node =
    320 		    wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
    321 
    322 	    if (vBuf->node) {
    323 		vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
    324 		vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
    325 		vBuf->map = (void *)(p->agpMap + vBuf->node->start);
    326 		memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
    327 		       vBuf->size);
    328 		free(vBuf->sysmem);
    329 		goto have_mem;
    330 	    }
    331 	} while (evict_lru(&p->agpLRU) == 0);
    332     }
    333 
    334     if (vBuf->proposedPlacement & WSBM_PL_FLAG_VRAM) {
    335 	do {
    336 	    vBuf->node =
    337 		wsbmMMSearchFree(&p->vramMM, vBuf->size, vBuf->alignment, 1);
    338 	    if (vBuf->node)
    339 		vBuf->node =
    340 		    wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
    341 
    342 	    if (!err && vBuf->node) {
    343 		vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
    344 		vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
    345 		vBuf->map = (void *)(p->vramMap + vBuf->node->start);
    346 		memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
    347 		       vBuf->size);
    348 		free(vBuf->sysmem);
    349 		goto have_mem;
    350 	    }
    351 	} while (evict_lru(&p->vramLRU) == 0);
    352     }
    353 
    354     if (vBuf->proposedPlacement & WSBM_PL_FLAG_SYSTEM)
    355 	goto have_mem;
    356 
    357     err = -ENOMEM;
    358 
    359   have_mem:
    360     vBuf->newFenceType = p->fenceTypes(set_flags);
    361     WSBM_MUTEX_UNLOCK(&p->mutex);
    362     WSBM_MUTEX_UNLOCK(&buf->mutex);
    363     return err;
    364 }
    365 
    366 static int
    367 pool_setStatus(struct _WsbmBufStorage *buf,
    368 	       uint32_t set_placement, uint32_t clr_placement)
    369 {
    370     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    371     int ret;
    372 
    373     ret = pool_validate(buf, set_placement, clr_placement);
    374     vBuf->unFenced = 0;
    375     return ret;
    376 }
    377 
    378 void
    379 release_delayed_buffers(struct _WsbmUserPool *p)
    380 {
    381     struct _WsbmUserBuffer *vBuf;
    382     struct _WsbmListHead *list, *next;
    383 
    384     WSBM_MUTEX_LOCK(&p->mutex);
    385 
    386     /*
    387      * We don't need to take the buffer mutexes in this loop, since
    388      * the only other user is the evict_lru function, which has the
    389      * pool mutex held when accessing the buffer fence member.
    390      */
    391 
    392     WSBMLISTFOREACHSAFE(list, next, &p->delayed) {
    393 	vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, delayed);
    394 
    395 	if (!vBuf->fence
    396 	    || wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
    397 	    if (vBuf->fence)
    398 		wsbmFenceUnreference(&vBuf->fence);
    399 
    400 	    WSBMLISTDEL(&vBuf->delayed);
    401 	    WSBMLISTDEL(&vBuf->lru);
    402 
    403 	    if ((vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM) == 0)
    404 		wsbmMMPutBlock(vBuf->node);
    405 	    else
    406 		free(vBuf->sysmem);
    407 
    408 	    free(vBuf);
    409 	} else
    410 	    break;
    411 
    412     }
    413     WSBM_MUTEX_UNLOCK(&p->mutex);
    414 }
    415 
    416 static void
    417 pool_destroy(struct _WsbmBufStorage **buf)
    418 {
    419     struct _WsbmUserBuffer *vBuf = userBuf(*buf);
    420     struct _WsbmUserPool *p = userPool(vBuf);
    421 
    422     *buf = NULL;
    423 
    424     WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
    425     if ((vBuf->fence
    426 	 && !wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask))) {
    427 	WSBM_MUTEX_LOCK(&p->mutex);
    428 	WSBMLISTADDTAIL(&vBuf->delayed, &p->delayed);
    429 	WSBM_MUTEX_UNLOCK(&p->mutex);
    430 	WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
    431 	return;
    432     }
    433 
    434     if (vBuf->fence)
    435 	wsbmFenceUnreference(&vBuf->fence);
    436 
    437     WSBM_MUTEX_LOCK(&p->mutex);
    438     WSBMLISTDEL(&vBuf->lru);
    439     WSBM_MUTEX_UNLOCK(&p->mutex);
    440 
    441     if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM))
    442 	wsbmMMPutBlock(vBuf->node);
    443     else
    444 	free(vBuf->sysmem);
    445 
    446     free(vBuf);
    447     return;
    448 }
    449 
    450 static int
    451 pool_map(struct _WsbmBufStorage *buf, unsigned mode __attribute__ ((unused)), void **virtual)
    452 {
    453     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    454 
    455     *virtual = vBuf->map;
    456     return 0;
    457 }
    458 
    459 static void
    460 pool_unmap(struct _WsbmBufStorage *buf __attribute__ ((unused)))
    461 {
    462     ;
    463 }
    464 
    465 static void
    466 pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode __attribute__ ((unused)))
    467 {
    468     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    469 
    470     if (wsbmAtomicDecZero(&vBuf->writers))
    471 	WSBM_COND_BROADCAST(&vBuf->event);
    472 
    473 }
    474 
    475 static int
    476 pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
    477 {
    478     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    479     int ret = 0;
    480 
    481     WSBM_MUTEX_LOCK(&buf->mutex);
    482     if ((mode & WSBM_SYNCCPU_DONT_BLOCK)) {
    483 
    484 	if (vBuf->unFenced) {
    485 	    ret = -EBUSY;
    486 	    goto out_unlock;
    487 	}
    488 
    489 	ret = 0;
    490 	if ((vBuf->fence == NULL) ||
    491 	    wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
    492 	    wsbmFenceUnreference(&vBuf->fence);
    493 	    wsbmAtomicInc(&vBuf->writers);
    494 	} else
    495 	    ret = -EBUSY;
    496 
    497 	goto out_unlock;
    498     }
    499     waitIdleLocked(buf, 0);
    500     wsbmAtomicInc(&vBuf->writers);
    501   out_unlock:
    502     WSBM_MUTEX_UNLOCK(&buf->mutex);
    503     return ret;
    504 }
    505 
    506 static unsigned long
    507 pool_offset(struct _WsbmBufStorage *buf)
    508 {
    509     return userBuf(buf)->kBuf.gpuOffset;
    510 }
    511 
    512 static unsigned long
    513 pool_poolOffset(struct _WsbmBufStorage *buf __attribute__ ((unused)))
    514 {
    515     return 0UL;
    516 }
    517 
    518 static unsigned long
    519 pool_size(struct _WsbmBufStorage *buf)
    520 {
    521     return userBuf(buf)->size;
    522 }
    523 
    524 static void
    525 pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
    526 {
    527     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    528     struct _WsbmUserPool *p = userPool(vBuf);
    529 
    530     WSBM_MUTEX_LOCK(&buf->mutex);
    531 
    532     if (vBuf->fence)
    533 	wsbmFenceUnreference(&vBuf->fence);
    534 
    535     vBuf->fence = wsbmFenceReference(fence);
    536     vBuf->unFenced = 0;
    537     vBuf->kBuf.fence_type_mask = vBuf->newFenceType;
    538 
    539     WSBM_COND_BROADCAST(&vBuf->event);
    540     WSBM_MUTEX_LOCK(&p->mutex);
    541     if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
    542 	WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
    543     else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
    544 	WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
    545     WSBM_MUTEX_UNLOCK(&p->mutex);
    546     WSBM_MUTEX_UNLOCK(&buf->mutex);
    547 }
    548 
    549 static void
    550 pool_unvalidate(struct _WsbmBufStorage *buf)
    551 {
    552     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    553     struct _WsbmUserPool *p = userPool(vBuf);
    554 
    555     WSBM_MUTEX_LOCK(&buf->mutex);
    556 
    557     if (!vBuf->unFenced)
    558 	goto out_unlock;
    559 
    560     vBuf->unFenced = 0;
    561     WSBM_COND_BROADCAST(&vBuf->event);
    562     WSBM_MUTEX_LOCK(&p->mutex);
    563     if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
    564 	WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
    565     else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
    566 	WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
    567     WSBM_MUTEX_UNLOCK(&p->mutex);
    568 
    569   out_unlock:
    570 
    571     WSBM_MUTEX_UNLOCK(&buf->mutex);
    572 }
    573 
    574 static struct _WsbmKernelBuf *
    575 pool_kernel(struct _WsbmBufStorage *buf)
    576 {
    577     struct _WsbmUserBuffer *vBuf = userBuf(buf);
    578 
    579     return &vBuf->kBuf;
    580 }
    581 
    582 static void
    583 pool_takedown(struct _WsbmBufferPool *pool)
    584 {
    585     struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
    586     int empty;
    587 
    588     do {
    589 	release_delayed_buffers(p);
    590 	WSBM_MUTEX_LOCK(&p->mutex);
    591 	empty = (p->delayed.next == &p->delayed);
    592 	WSBM_MUTEX_UNLOCK(&p->mutex);
    593 
    594 	if (!empty)
    595 	    usleep(1000);
    596 
    597     } while (!empty);
    598     WSBM_MUTEX_LOCK(&p->mutex);
    599 
    600     while (evict_lru(&p->vramLRU) == 0) ;
    601     while (evict_lru(&p->agpLRU) == 0) ;
    602 
    603     WSBM_MUTEX_UNLOCK(&p->mutex);
    604 
    605     wsbmMMtakedown(&p->agpMM);
    606     wsbmMMtakedown(&p->vramMM);
    607 
    608     free(p);
    609 }
    610 
    611 void
    612 wsbmUserPoolClean(struct _WsbmBufferPool *pool, int cleanVram, int cleanAgp)
    613 {
    614     struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
    615 
    616     WSBM_MUTEX_LOCK(&p->mutex);
    617     if (cleanVram)
    618 	while (evict_lru(&p->vramLRU) == 0) ;
    619     if (cleanAgp)
    620 	while (evict_lru(&p->agpLRU) == 0) ;
    621     WSBM_MUTEX_UNLOCK(&p->mutex);
    622 }
    623 
    624 struct _WsbmBufferPool *
    625 wsbmUserPoolInit(void *vramAddr,
    626 		 unsigned long vramStart, unsigned long vramSize,
    627 		 void *agpAddr, unsigned long agpStart,
    628 		 unsigned long agpSize,
    629 		 uint32_t(*fenceTypes) (uint64_t set_flags))
    630 {
    631     struct _WsbmBufferPool *pool;
    632     struct _WsbmUserPool *uPool;
    633     int ret;
    634 
    635     uPool = calloc(1, sizeof(*uPool));
    636     if (!uPool)
    637 	goto out_err0;
    638 
    639     ret = WSBM_MUTEX_INIT(&uPool->mutex);
    640     if (ret)
    641 	goto out_err0;
    642 
    643     ret = wsbmMMinit(&uPool->vramMM, 0, vramSize);
    644     if (ret)
    645 	goto out_err1;
    646 
    647     ret = wsbmMMinit(&uPool->agpMM, 0, agpSize);
    648     if (ret)
    649 	goto out_err2;
    650 
    651     WSBMINITLISTHEAD(&uPool->delayed);
    652     WSBMINITLISTHEAD(&uPool->vramLRU);
    653     WSBMINITLISTHEAD(&uPool->agpLRU);
    654 
    655     uPool->agpOffset = agpStart;
    656     uPool->agpMap = (unsigned long)agpAddr;
    657     uPool->vramOffset = vramStart;
    658     uPool->vramMap = (unsigned long)vramAddr;
    659     uPool->fenceTypes = fenceTypes;
    660 
    661     pool = &uPool->pool;
    662     pool->map = &pool_map;
    663     pool->unmap = &pool_unmap;
    664     pool->destroy = &pool_destroy;
    665     pool->offset = &pool_offset;
    666     pool->poolOffset = &pool_poolOffset;
    667     pool->size = &pool_size;
    668     pool->create = &pool_create;
    669     pool->fence = &pool_fence;
    670     pool->unvalidate = &pool_unvalidate;
    671     pool->kernel = &pool_kernel;
    672     pool->validate = &pool_validate;
    673     pool->waitIdle = &pool_waitIdle;
    674     pool->takeDown = &pool_takedown;
    675     pool->setStatus = &pool_setStatus;
    676     pool->syncforcpu = &pool_syncForCpu;
    677     pool->releasefromcpu = &pool_releaseFromCpu;
    678 
    679     return pool;
    680 
    681   out_err2:
    682     wsbmMMtakedown(&uPool->vramMM);
    683   out_err1:
    684     WSBM_MUTEX_FREE(&uPool->mutex);
    685   out_err0:
    686     free(uPool);
    687 
    688     return NULL;
    689 }
    690