Home | History | Annotate | Download | only in drm
      1 /*
      2  * Copyright  2011 Marek Olk <maraeo (at) gmail.com>
      3  * Copyright  2015 Advanced Micro Devices, Inc.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining
      7  * a copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
     16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
     18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * The above copyright notice and this permission notice (including the
     24  * next paragraph) shall be included in all copies or substantial portions
     25  * of the Software.
     26  */
     27 /*
     28  * Authors:
     29  *      Marek Olk <maraeo (at) gmail.com>
     30  */
     31 
     32 #ifndef AMDGPU_CS_H
     33 #define AMDGPU_CS_H
     34 
     35 #include "amdgpu_bo.h"
     36 #include "util/u_memory.h"
     37 
     38 struct amdgpu_ctx {
     39    struct amdgpu_winsys *ws;
     40    amdgpu_context_handle ctx;
     41    amdgpu_bo_handle user_fence_bo;
     42    uint64_t *user_fence_cpu_address_base;
     43    int refcount;
     44 };
     45 
     46 struct amdgpu_cs_buffer {
     47    struct amdgpu_winsys_bo *bo;
     48    union {
     49       struct {
     50          uint64_t priority_usage;
     51       } real;
     52       struct {
     53          uint32_t real_idx; /* index of underlying real BO */
     54       } slab;
     55    } u;
     56    enum radeon_bo_usage usage;
     57 };
     58 
     59 enum ib_type {
     60    IB_CONST_PREAMBLE = 0,
     61    IB_CONST = 1, /* the const IB must be first */
     62    IB_MAIN = 2,
     63    IB_NUM
     64 };
     65 
     66 struct amdgpu_ib {
     67    struct radeon_winsys_cs base;
     68 
     69    /* A buffer out of which new IBs are allocated. */
     70    struct pb_buffer        *big_ib_buffer;
     71    uint8_t                 *ib_mapped;
     72    unsigned                used_ib_space;
     73    unsigned                max_ib_size;
     74    uint32_t                *ptr_ib_size;
     75    enum ib_type            ib_type;
     76 };
     77 
     78 struct amdgpu_cs_context {
     79    struct amdgpu_cs_request    request;
     80    struct amdgpu_cs_ib_info    ib[IB_NUM];
     81 
     82    /* Buffers. */
     83    unsigned                    max_real_buffers;
     84    unsigned                    num_real_buffers;
     85    amdgpu_bo_handle            *handles;
     86    uint8_t                     *flags;
     87    struct amdgpu_cs_buffer     *real_buffers;
     88 
     89    unsigned                    num_slab_buffers;
     90    unsigned                    max_slab_buffers;
     91    struct amdgpu_cs_buffer     *slab_buffers;
     92 
     93    int                         buffer_indices_hashlist[4096];
     94 
     95    unsigned                    max_dependencies;
     96 
     97    struct pipe_fence_handle    *fence;
     98 
     99    /* the error returned from cs_flush for non-async submissions */
    100    int                         error_code;
    101 };
    102 
    103 struct amdgpu_cs {
    104    struct amdgpu_ib main; /* must be first because this is inherited */
    105    struct amdgpu_ib const_ib; /* optional constant engine IB */
    106    struct amdgpu_ib const_preamble_ib;
    107    struct amdgpu_ctx *ctx;
    108    enum ring_type ring_type;
    109 
    110    /* We flip between these two CS. While one is being consumed
    111     * by the kernel in another thread, the other one is being filled
    112     * by the pipe driver. */
    113    struct amdgpu_cs_context csc1;
    114    struct amdgpu_cs_context csc2;
    115    /* The currently-used CS. */
    116    struct amdgpu_cs_context *csc;
    117    /* The CS being currently-owned by the other thread. */
    118    struct amdgpu_cs_context *cst;
    119 
    120    /* Flush CS. */
    121    void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
    122    void *flush_data;
    123 
    124    struct util_queue_fence flush_completed;
    125    struct pipe_fence_handle *next_fence;
    126 };
    127 
    128 struct amdgpu_fence {
    129    struct pipe_reference reference;
    130 
    131    struct amdgpu_ctx *ctx;  /* submission context */
    132    struct amdgpu_cs_fence fence;
    133    uint64_t *user_fence_cpu_address;
    134 
    135    /* If the fence is unknown due to an IB still being submitted
    136     * in the other thread. */
    137    volatile int submission_in_progress; /* bool (int for atomicity) */
    138    volatile int signalled;              /* bool (int for atomicity) */
    139 };
    140 
    141 static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
    142 {
    143    if (p_atomic_dec_zero(&ctx->refcount)) {
    144       amdgpu_cs_ctx_free(ctx->ctx);
    145       amdgpu_bo_free(ctx->user_fence_bo);
    146       FREE(ctx);
    147    }
    148 }
    149 
    150 static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
    151                                           struct pipe_fence_handle *src)
    152 {
    153    struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
    154    struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
    155 
    156    if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
    157       amdgpu_ctx_unref((*rdst)->ctx);
    158       FREE(*rdst);
    159    }
    160    *rdst = rsrc;
    161 }
    162 
    163 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
    164 
    165 static inline struct amdgpu_ib *
    166 amdgpu_ib(struct radeon_winsys_cs *base)
    167 {
    168    return (struct amdgpu_ib *)base;
    169 }
    170 
    171 static inline struct amdgpu_cs *
    172 amdgpu_cs(struct radeon_winsys_cs *base)
    173 {
    174    assert(amdgpu_ib(base)->ib_type == IB_MAIN);
    175    return (struct amdgpu_cs*)base;
    176 }
    177 
    178 #define get_container(member_ptr, container_type, container_member) \
    179    (container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
    180 
    181 static inline struct amdgpu_cs *
    182 amdgpu_cs_from_ib(struct amdgpu_ib *ib)
    183 {
    184    switch (ib->ib_type) {
    185    case IB_MAIN:
    186       return get_container(ib, struct amdgpu_cs, main);
    187    case IB_CONST:
    188       return get_container(ib, struct amdgpu_cs, const_ib);
    189    case IB_CONST_PREAMBLE:
    190       return get_container(ib, struct amdgpu_cs, const_preamble_ib);
    191    default:
    192       unreachable("bad ib_type");
    193    }
    194 }
    195 
    196 static inline bool
    197 amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
    198                               struct amdgpu_winsys_bo *bo)
    199 {
    200    int num_refs = bo->num_cs_references;
    201    return num_refs == bo->ws->num_cs ||
    202          (num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
    203 }
    204 
    205 static inline bool
    206 amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
    207                                          struct amdgpu_winsys_bo *bo,
    208                                          enum radeon_bo_usage usage)
    209 {
    210    int index;
    211    struct amdgpu_cs_buffer *buffer;
    212 
    213    if (!bo->num_cs_references)
    214       return false;
    215 
    216    index = amdgpu_lookup_buffer(cs->csc, bo);
    217    if (index == -1)
    218       return false;
    219 
    220    buffer = bo->bo ? &cs->csc->real_buffers[index]
    221                    : &cs->csc->slab_buffers[index];
    222 
    223    return (buffer->usage & usage) != 0;
    224 }
    225 
    226 static inline bool
    227 amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
    228 {
    229    return bo->num_cs_references != 0;
    230 }
    231 
    232 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
    233                        bool absolute);
    234 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
    235 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
    236 void amdgpu_cs_submit_ib(void *job, int thread_index);
    237 
    238 #endif
    239