Home | History | Annotate | Download | only in pipebuffer
      1 /*
      2  * Copyright 2016 Advanced Micro Devices, Inc.
      3  * All Rights Reserved.
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining
      6  * a copy of this software and associated documentation files (the
      7  * "Software"), to deal in the Software without restriction, including
      8  * without limitation the rights to use, copy, modify, merge, publish,
      9  * distribute, sub license, and/or sell copies of the Software, and to
     10  * permit persons to whom the Software is furnished to do so, subject to
     11  * the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the
     14  * next paragraph) shall be included in all copies or substantial portions
     15  * of the Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
     19  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     20  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
     21  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  */
     27 
     28 #include "pb_slab.h"
     29 
     30 #include "util/u_math.h"
     31 #include "util/u_memory.h"
     32 
     33 /* All slab allocations from the same heap and with the same size belong
     34  * to the same group.
     35  */
     36 struct pb_slab_group
     37 {
     38    /* Slabs with allocation candidates. Typically, slabs in this list should
     39     * have some free entries.
     40     *
     41     * However, when the head becomes full we purposefully keep it around
     42     * until the next allocation attempt, at which time we try a reclaim.
     43     * The intention is to keep serving allocations from the same slab as long
     44     * as possible for better locality.
     45     *
     46     * Due to a race in new slab allocation, additional slabs in this list
     47     * can be fully allocated as well.
     48     */
     49    struct list_head slabs;
     50 };
     51 
     52 
     53 static void
     54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
     55 {
     56    struct pb_slab *slab = entry->slab;
     57 
     58    LIST_DEL(&entry->head); /* remove from reclaim list */
     59    LIST_ADD(&entry->head, &slab->free);
     60    slab->num_free++;
     61 
     62    /* Add slab to the group's list if it isn't already linked. */
     63    if (!slab->head.next) {
     64       struct pb_slab_group *group = &slabs->groups[entry->group_index];
     65       LIST_ADDTAIL(&slab->head, &group->slabs);
     66    }
     67 
     68    if (slab->num_free >= slab->num_entries) {
     69       LIST_DEL(&slab->head);
     70       slabs->slab_free(slabs->priv, slab);
     71    }
     72 }
     73 
     74 static void
     75 pb_slabs_reclaim_locked(struct pb_slabs *slabs)
     76 {
     77    while (!LIST_IS_EMPTY(&slabs->reclaim)) {
     78       struct pb_slab_entry *entry =
     79          LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
     80 
     81       if (!slabs->can_reclaim(slabs->priv, entry))
     82          break;
     83 
     84       pb_slab_reclaim(slabs, entry);
     85    }
     86 }
     87 
     88 /* Allocate a slab entry of the given size from the given heap.
     89  *
     90  * This will try to re-use entries that have previously been freed. However,
     91  * if no entries are free (or all free entries are still "in flight" as
     92  * determined by the can_reclaim fallback function), a new slab will be
     93  * requested via the slab_alloc callback.
     94  *
     95  * Note that slab_free can also be called by this function.
     96  */
     97 struct pb_slab_entry *
     98 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
     99 {
    100    unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
    101    unsigned group_index;
    102    struct pb_slab_group *group;
    103    struct pb_slab *slab;
    104    struct pb_slab_entry *entry;
    105 
    106    assert(order < slabs->min_order + slabs->num_orders);
    107    assert(heap < slabs->num_heaps);
    108 
    109    group_index = heap * slabs->num_orders + (order - slabs->min_order);
    110    group = &slabs->groups[group_index];
    111 
    112    pipe_mutex_lock(slabs->mutex);
    113 
    114    /* If there is no candidate slab at all, or the first slab has no free
    115     * entries, try reclaiming entries.
    116     */
    117    if (LIST_IS_EMPTY(&group->slabs) ||
    118        LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
    119       pb_slabs_reclaim_locked(slabs);
    120 
    121    /* Remove slabs without free entries. */
    122    while (!LIST_IS_EMPTY(&group->slabs)) {
    123       slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
    124       if (!LIST_IS_EMPTY(&slab->free))
    125          break;
    126 
    127       LIST_DEL(&slab->head);
    128    }
    129 
    130    if (LIST_IS_EMPTY(&group->slabs)) {
    131       /* Drop the mutex temporarily to prevent a deadlock where the allocation
    132        * calls back into slab functions (most likely to happen for
    133        * pb_slab_reclaim if memory is low).
    134        *
    135        * There's a chance that racing threads will end up allocating multiple
    136        * slabs for the same group, but that doesn't hurt correctness.
    137        */
    138       pipe_mutex_unlock(slabs->mutex);
    139       slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
    140       if (!slab)
    141          return NULL;
    142       pipe_mutex_lock(slabs->mutex);
    143 
    144       LIST_ADD(&slab->head, &group->slabs);
    145    }
    146 
    147    entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
    148    LIST_DEL(&entry->head);
    149    slab->num_free--;
    150 
    151    pipe_mutex_unlock(slabs->mutex);
    152 
    153    return entry;
    154 }
    155 
    156 /* Free the given slab entry.
    157  *
    158  * The entry may still be in use e.g. by in-flight command submissions. The
    159  * can_reclaim callback function will be called to determine whether the entry
    160  * can be handed out again by pb_slab_alloc.
    161  */
    162 void
    163 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
    164 {
    165    pipe_mutex_lock(slabs->mutex);
    166    LIST_ADDTAIL(&entry->head, &slabs->reclaim);
    167    pipe_mutex_unlock(slabs->mutex);
    168 }
    169 
    170 /* Check if any of the entries handed to pb_slab_free are ready to be re-used.
    171  *
    172  * This may end up freeing some slabs and is therefore useful to try to reclaim
    173  * some no longer used memory. However, calling this function is not strictly
    174  * required since pb_slab_alloc will eventually do the same thing.
    175  */
    176 void
    177 pb_slabs_reclaim(struct pb_slabs *slabs)
    178 {
    179    pipe_mutex_lock(slabs->mutex);
    180    pb_slabs_reclaim_locked(slabs);
    181    pipe_mutex_unlock(slabs->mutex);
    182 }
    183 
    184 /* Initialize the slabs manager.
    185  *
    186  * The minimum and maximum size of slab entries are 2^min_order and
    187  * 2^max_order, respectively.
    188  *
    189  * priv will be passed to the given callback functions.
    190  */
    191 bool
    192 pb_slabs_init(struct pb_slabs *slabs,
    193               unsigned min_order, unsigned max_order,
    194               unsigned num_heaps,
    195               void *priv,
    196               slab_can_reclaim_fn *can_reclaim,
    197               slab_alloc_fn *slab_alloc,
    198               slab_free_fn *slab_free)
    199 {
    200    unsigned num_groups;
    201    unsigned i;
    202 
    203    assert(min_order <= max_order);
    204    assert(max_order < sizeof(unsigned) * 8 - 1);
    205 
    206    slabs->min_order = min_order;
    207    slabs->num_orders = max_order - min_order + 1;
    208    slabs->num_heaps = num_heaps;
    209 
    210    slabs->priv = priv;
    211    slabs->can_reclaim = can_reclaim;
    212    slabs->slab_alloc = slab_alloc;
    213    slabs->slab_free = slab_free;
    214 
    215    LIST_INITHEAD(&slabs->reclaim);
    216 
    217    num_groups = slabs->num_orders * slabs->num_heaps;
    218    slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
    219    if (!slabs->groups)
    220       return false;
    221 
    222    for (i = 0; i < num_groups; ++i) {
    223       struct pb_slab_group *group = &slabs->groups[i];
    224       LIST_INITHEAD(&group->slabs);
    225    }
    226 
    227    pipe_mutex_init(slabs->mutex);
    228 
    229    return true;
    230 }
    231 
    232 /* Shutdown the slab manager.
    233  *
    234  * This will free all allocated slabs and internal structures, even if some
    235  * of the slab entries are still in flight (i.e. if can_reclaim would return
    236  * false).
    237  */
    238 void
    239 pb_slabs_deinit(struct pb_slabs *slabs)
    240 {
    241    /* Reclaim all slab entries (even those that are still in flight). This
    242     * implicitly calls slab_free for everything.
    243     */
    244    while (!LIST_IS_EMPTY(&slabs->reclaim)) {
    245       struct pb_slab_entry *entry =
    246          LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
    247       pb_slab_reclaim(slabs, entry);
    248    }
    249 
    250    FREE(slabs->groups);
    251    pipe_mutex_destroy(slabs->mutex);
    252 }
    253