Home | History | Annotate | Download | only in pipebuffer
      1 /*
      2  * Copyright 2016 Advanced Micro Devices, Inc.
      3  * All Rights Reserved.
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining
      6  * a copy of this software and associated documentation files (the
      7  * "Software"), to deal in the Software without restriction, including
      8  * without limitation the rights to use, copy, modify, merge, publish,
      9  * distribute, sub license, and/or sell copies of the Software, and to
     10  * permit persons to whom the Software is furnished to do so, subject to
     11  * the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the
     14  * next paragraph) shall be included in all copies or substantial portions
     15  * of the Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
     19  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     20  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
     21  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  */
     27 
     28 /**
     29  * \file
     30  *
     31  * Helper library for carving out smaller allocations (called "(slab) entries")
     32  * from larger buffers (called "slabs").
     33  *
     34  * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
     35  * meaning of each heap is treated as opaque by this library.
     36  *
     37  * The library allows delaying the re-use of an entry, i.e. an entry may be
     38  * freed by calling \ref pb_slab_free even while the corresponding buffer
     39  * region is still in use by the GPU. A callback function is called to
     40  * determine when it is safe to allocate the entry again; the user of this
     41  * library is expected to maintain the required fences or similar.
     42  */
     43 
     44 #ifndef PB_SLAB_H
     45 #define PB_SLAB_H
     46 
     47 #include "pb_buffer.h"
     48 #include "util/list.h"
     49 #include "os/os_thread.h"
     50 
     51 struct pb_slab;
     52 struct pb_slabs;
     53 struct pb_slab_group;
     54 
     55 /* Descriptor of a slab entry.
     56  *
     57  * The user of this utility library is expected to embed this in a larger
     58  * structure that describes a buffer object.
     59  */
     60 struct pb_slab_entry
     61 {
     62    struct list_head head;
     63    struct pb_slab *slab; /* the slab that contains this buffer */
     64    unsigned group_index; /* index into pb_slabs::groups */
     65 };
     66 
     67 /* Descriptor of a slab from which many entries are carved out.
     68  *
     69  * The user of this utility library is expected to embed this in a larger
     70  * structure that describes a buffer object.
     71  */
     72 struct pb_slab
     73 {
     74    struct list_head head;
     75 
     76    struct list_head free; /* list of free pb_slab_entry structures */
     77    unsigned num_free; /* number of entries in free list */
     78    unsigned num_entries; /* total number of entries */
     79 };
     80 
     81 /* Callback function that is called when a new slab needs to be allocated
     82  * for fulfilling allocation requests of the given size from the given heap.
     83  *
     84  * The callback must allocate a pb_slab structure and the desired number
     85  * of entries. All entries that belong to the slab must be added to the free
     86  * list. Entries' pb_slab_entry structures must be initialized with the given
     87  * group_index.
     88  *
     89  * The callback may call pb_slab functions.
     90  */
     91 typedef struct pb_slab *(slab_alloc_fn)(void *priv,
     92                                         unsigned heap,
     93                                         unsigned entry_size,
     94                                         unsigned group_index);
     95 
     96 /* Callback function that is called when all entries of a slab have been freed.
     97  *
     98  * The callback must free the slab and all its entries. It must not call any of
     99  * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
    100  */
    101 typedef void (slab_free_fn)(void *priv, struct pb_slab *);
    102 
    103 /* Callback function to determine whether a given entry can already be reused.
    104  */
    105 typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
    106 
    107 /* Manager of slab allocations. The user of this utility library should embed
    108  * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
    109  * time.
    110  */
    111 struct pb_slabs
    112 {
    113    pipe_mutex mutex;
    114 
    115    unsigned min_order;
    116    unsigned num_orders;
    117    unsigned num_heaps;
    118 
    119    /* One group per (heap, order) pair. */
    120    struct pb_slab_group *groups;
    121 
    122    /* List of entries waiting to be reclaimed, i.e. they have been passed to
    123     * pb_slab_free, but may not be safe for re-use yet. The tail points at
    124     * the most-recently freed entry.
    125     */
    126    struct list_head reclaim;
    127 
    128    void *priv;
    129    slab_can_reclaim_fn *can_reclaim;
    130    slab_alloc_fn *slab_alloc;
    131    slab_free_fn *slab_free;
    132 };
    133 
    134 struct pb_slab_entry *
    135 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
    136 
    137 void
    138 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
    139 
    140 void
    141 pb_slabs_reclaim(struct pb_slabs *slabs);
    142 
    143 bool
    144 pb_slabs_init(struct pb_slabs *slabs,
    145               unsigned min_order, unsigned max_order,
    146               unsigned num_heaps,
    147               void *priv,
    148               slab_can_reclaim_fn *can_reclaim,
    149               slab_alloc_fn *slab_alloc,
    150               slab_free_fn *slab_free);
    151 
    152 void
    153 pb_slabs_deinit(struct pb_slabs *slabs);
    154 
    155 #endif
    156