Home | History | Annotate | Download | only in drm
      1 /*
      2  * Copyright  2008 Jrme Glisse
      3  * Copyright  2011 Marek Olk <maraeo (at) gmail.com>
      4  * Copyright  2015 Advanced Micro Devices, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
     17  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
     19  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * The above copyright notice and this permission notice (including the
     25  * next paragraph) shall be included in all copies or substantial portions
     26  * of the Software.
     27  */
     28 
     29 #ifndef AMDGPU_BO_H
     30 #define AMDGPU_BO_H
     31 
     32 #include "amdgpu_winsys.h"
     33 
     34 #include "pipebuffer/pb_slab.h"
     35 
     36 struct amdgpu_sparse_backing_chunk;
     37 
     38 /*
     39  * Sub-allocation information for a real buffer used as backing memory of a
     40  * sparse buffer.
     41  */
     42 struct amdgpu_sparse_backing {
     43    struct list_head list;
     44 
     45    struct amdgpu_winsys_bo *bo;
     46 
     47    /* Sorted list of free chunks. */
     48    struct amdgpu_sparse_backing_chunk *chunks;
     49    uint32_t max_chunks;
     50    uint32_t num_chunks;
     51 };
     52 
     53 struct amdgpu_sparse_commitment {
     54    struct amdgpu_sparse_backing *backing;
     55    uint32_t page;
     56 };
     57 
     58 struct amdgpu_winsys_bo {
     59    struct pb_buffer base;
     60    union {
     61       struct {
     62          struct pb_cache_entry cache_entry;
     63 
     64          amdgpu_va_handle va_handle;
     65          int map_count;
     66          bool use_reusable_pool;
     67 
     68          struct list_head global_list_item;
     69       } real;
     70       struct {
     71          struct pb_slab_entry entry;
     72          struct amdgpu_winsys_bo *real;
     73       } slab;
     74       struct {
     75          simple_mtx_t commit_lock;
     76          amdgpu_va_handle va_handle;
     77          enum radeon_bo_flag flags;
     78 
     79          uint32_t num_va_pages;
     80          uint32_t num_backing_pages;
     81 
     82          struct list_head backing;
     83 
     84          /* Commitment information for each page of the virtual memory area. */
     85          struct amdgpu_sparse_commitment *commitments;
     86       } sparse;
     87    } u;
     88 
     89    struct amdgpu_winsys *ws;
     90    void *user_ptr; /* from buffer_from_ptr */
     91 
     92    amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
     93    bool sparse;
     94    uint32_t unique_id;
     95    uint64_t va;
     96    enum radeon_bo_domain initial_domain;
     97 
     98    /* how many command streams is this bo referenced in? */
     99    int num_cs_references;
    100 
    101    /* how many command streams, which are being emitted in a separate
    102     * thread, is this bo referenced in? */
    103    volatile int num_active_ioctls;
    104 
    105    /* whether buffer_get_handle or buffer_from_handle was called,
    106     * it can only transition from false to true
    107     */
    108    volatile int is_shared; /* bool (int for atomicity) */
    109 
    110    /* Fences for buffer synchronization. */
    111    unsigned num_fences;
    112    unsigned max_fences;
    113    struct pipe_fence_handle **fences;
    114 
    115    bool is_local;
    116 };
    117 
    118 struct amdgpu_slab {
    119    struct pb_slab base;
    120    struct amdgpu_winsys_bo *buffer;
    121    struct amdgpu_winsys_bo *entries;
    122 };
    123 
    124 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
    125 void amdgpu_bo_destroy(struct pb_buffer *_buf);
    126 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws);
    127 
    128 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
    129 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
    130                                      unsigned entry_size,
    131                                      unsigned group_index);
    132 void amdgpu_bo_slab_free(void *priv, struct pb_slab *slab);
    133 
    134 static inline
    135 struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
    136 {
    137    return (struct amdgpu_winsys_bo *)bo;
    138 }
    139 
    140 static inline
    141 struct amdgpu_slab *amdgpu_slab(struct pb_slab *slab)
    142 {
    143    return (struct amdgpu_slab *)slab;
    144 }
    145 
    146 static inline
    147 void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
    148                                 struct amdgpu_winsys_bo *src)
    149 {
    150    pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
    151 }
    152 
    153 #endif
    154