1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 */ 23 24 /** 25 * This file contains common screen and context structures and functions 26 * for r600g and radeonsi. 27 */ 28 29 #ifndef R600_PIPE_COMMON_H 30 #define R600_PIPE_COMMON_H 31 32 #include <stdio.h> 33 34 #include "amd/common/ac_binary.h" 35 36 #include "radeon/radeon_winsys.h" 37 38 #include "util/disk_cache.h" 39 #include "util/u_blitter.h" 40 #include "util/list.h" 41 #include "util/u_range.h" 42 #include "util/slab.h" 43 #include "util/u_suballoc.h" 44 #include "util/u_transfer.h" 45 #include "util/u_threaded_context.h" 46 47 struct u_log_context; 48 struct si_screen; 49 struct si_context; 50 51 #define R600_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0) 52 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1) 53 #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2) 54 #define R600_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3) 55 #define R600_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4) 56 #define R600_RESOURCE_FLAG_READ_ONLY (PIPE_RESOURCE_FLAG_DRV_PRIV << 5) 57 58 /* Debug flags. */ 59 enum { 60 /* Shader logging options: */ 61 DBG_VS = PIPE_SHADER_VERTEX, 62 DBG_PS = PIPE_SHADER_FRAGMENT, 63 DBG_GS = PIPE_SHADER_GEOMETRY, 64 DBG_TCS = PIPE_SHADER_TESS_CTRL, 65 DBG_TES = PIPE_SHADER_TESS_EVAL, 66 DBG_CS = PIPE_SHADER_COMPUTE, 67 DBG_NO_IR, 68 DBG_NO_TGSI, 69 DBG_NO_ASM, 70 DBG_PREOPT_IR, 71 72 /* Shader compiler options the shader cache should be aware of: */ 73 DBG_FS_CORRECT_DERIVS_AFTER_KILL, 74 DBG_UNSAFE_MATH, 75 DBG_SI_SCHED, 76 77 /* Shader compiler options (with no effect on the shader cache): */ 78 DBG_CHECK_IR, 79 DBG_PRECOMPILE, 80 DBG_NIR, 81 DBG_MONOLITHIC_SHADERS, 82 DBG_NO_OPT_VARIANT, 83 84 /* Information logging options: */ 85 DBG_INFO, 86 DBG_TEX, 87 DBG_COMPUTE, 88 DBG_VM, 89 90 /* Driver options: */ 91 DBG_FORCE_DMA, 92 DBG_NO_ASYNC_DMA, 93 DBG_NO_WC, 94 DBG_CHECK_VM, 95 DBG_RESERVE_VMID, 96 97 /* 3D engine options: */ 98 DBG_SWITCH_ON_EOP, 99 DBG_NO_OUT_OF_ORDER, 100 DBG_NO_DPBB, 101 DBG_NO_DFSM, 102 DBG_DPBB, 103 DBG_DFSM, 104 DBG_NO_HYPERZ, 105 DBG_NO_RB_PLUS, 106 DBG_NO_2D_TILING, 107 DBG_NO_TILING, 108 DBG_NO_DCC, 109 DBG_NO_DCC_CLEAR, 110 DBG_NO_DCC_FB, 111 DBG_NO_DCC_MSAA, 112 DBG_DCC_MSAA, 113 114 /* Tests: */ 115 DBG_TEST_DMA, 116 DBG_TEST_VMFAULT_CP, 117 DBG_TEST_VMFAULT_SDMA, 118 DBG_TEST_VMFAULT_SHADER, 119 }; 120 121 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1)) 122 #define DBG(name) (1ull << DBG_##name) 123 124 #define R600_MAP_BUFFER_ALIGNMENT 64 125 126 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024 127 128 struct r600_common_context; 129 struct r600_perfcounters; 130 struct tgsi_shader_info; 131 struct r600_qbo_state; 132 133 /* Only 32-bit buffer allocations are supported, gallium doesn't support more 134 * at the moment. 135 */ 136 struct r600_resource { 137 struct threaded_resource b; 138 139 /* Winsys objects. */ 140 struct pb_buffer *buf; 141 uint64_t gpu_address; 142 /* Memory usage if the buffer placement is optimal. */ 143 uint64_t vram_usage; 144 uint64_t gart_usage; 145 146 /* Resource properties. */ 147 uint64_t bo_size; 148 unsigned bo_alignment; 149 enum radeon_bo_domain domains; 150 enum radeon_bo_flag flags; 151 unsigned bind_history; 152 int max_forced_staging_uploads; 153 154 /* The buffer range which is initialized (with a write transfer, 155 * streamout, DMA, or as a random access target). The rest of 156 * the buffer is considered invalid and can be mapped unsynchronized. 157 * 158 * This allows unsychronized mapping of a buffer range which hasn't 159 * been used yet. It's for applications which forget to use 160 * the unsynchronized map flag and expect the driver to figure it out. 161 */ 162 struct util_range valid_buffer_range; 163 164 /* For buffers only. This indicates that a write operation has been 165 * performed by TC L2, but the cache hasn't been flushed. 166 * Any hw block which doesn't use or bypasses TC L2 should check this 167 * flag and flush the cache before using the buffer. 168 * 169 * For example, TC L2 must be flushed if a buffer which has been 170 * modified by a shader store instruction is about to be used as 171 * an index buffer. The reason is that VGT DMA index fetching doesn't 172 * use TC L2. 173 */ 174 bool TC_L2_dirty; 175 176 /* Whether the resource has been exported via resource_get_handle. */ 177 unsigned external_usage; /* PIPE_HANDLE_USAGE_* */ 178 179 /* Whether this resource is referenced by bindless handles. */ 180 bool texture_handle_allocated; 181 bool image_handle_allocated; 182 }; 183 184 struct r600_transfer { 185 struct threaded_transfer b; 186 struct r600_resource *staging; 187 unsigned offset; 188 }; 189 190 struct r600_fmask_info { 191 uint64_t offset; 192 uint64_t size; 193 unsigned alignment; 194 unsigned pitch_in_pixels; 195 unsigned bank_height; 196 unsigned slice_tile_max; 197 unsigned tile_mode_index; 198 unsigned tile_swizzle; 199 }; 200 201 struct r600_cmask_info { 202 uint64_t offset; 203 uint64_t size; 204 unsigned alignment; 205 unsigned slice_tile_max; 206 uint64_t base_address_reg; 207 }; 208 209 struct r600_texture { 210 struct r600_resource resource; 211 212 struct radeon_surf surface; 213 uint64_t size; 214 struct r600_texture *flushed_depth_texture; 215 216 /* Colorbuffer compression and fast clear. */ 217 struct r600_fmask_info fmask; 218 struct r600_cmask_info cmask; 219 struct r600_resource *cmask_buffer; 220 uint64_t dcc_offset; /* 0 = disabled */ 221 unsigned cb_color_info; /* fast clear enable bit */ 222 unsigned color_clear_value[2]; 223 unsigned last_msaa_resolve_target_micro_mode; 224 unsigned num_level0_transfers; 225 226 /* Depth buffer compression and fast clear. */ 227 uint64_t htile_offset; 228 float depth_clear_value; 229 uint16_t dirty_level_mask; /* each bit says if that mipmap is compressed */ 230 uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */ 231 enum pipe_format db_render_format:16; 232 uint8_t stencil_clear_value; 233 bool tc_compatible_htile:1; 234 bool depth_cleared:1; /* if it was cleared at least once */ 235 bool stencil_cleared:1; /* if it was cleared at least once */ 236 bool upgraded_depth:1; /* upgraded from unorm to Z32_FLOAT */ 237 bool is_depth:1; 238 bool db_compatible:1; 239 bool can_sample_z:1; 240 bool can_sample_s:1; 241 242 /* We need to track DCC dirtiness, because st/dri usually calls 243 * flush_resource twice per frame (not a bug) and we don't wanna 244 * decompress DCC twice. Also, the dirty tracking must be done even 245 * if DCC isn't used, because it's required by the DCC usage analysis 246 * for a possible future enablement. 247 */ 248 bool separate_dcc_dirty:1; 249 /* Statistics gathering for the DCC enablement heuristic. */ 250 bool dcc_gather_statistics:1; 251 /* Counter that should be non-zero if the texture is bound to a 252 * framebuffer. 253 */ 254 unsigned framebuffers_bound; 255 /* Whether the texture is a displayable back buffer and needs DCC 256 * decompression, which is expensive. Therefore, it's enabled only 257 * if statistics suggest that it will pay off and it's allocated 258 * separately. It can't be bound as a sampler by apps. Limited to 259 * target == 2D and last_level == 0. If enabled, dcc_offset contains 260 * the absolute GPUVM address, not the relative one. 261 */ 262 struct r600_resource *dcc_separate_buffer; 263 /* When DCC is temporarily disabled, the separate buffer is here. */ 264 struct r600_resource *last_dcc_separate_buffer; 265 /* Estimate of how much this color buffer is written to in units of 266 * full-screen draws: ps_invocations / (width * height) 267 * Shader kills, late Z, and blending with trivial discards make it 268 * inaccurate (we need to count CB updates, not PS invocations). 269 */ 270 unsigned ps_draw_ratio; 271 /* The number of clears since the last DCC usage analysis. */ 272 unsigned num_slow_clears; 273 }; 274 275 struct r600_surface { 276 struct pipe_surface base; 277 278 /* These can vary with block-compressed textures. */ 279 uint16_t width0; 280 uint16_t height0; 281 282 bool color_initialized:1; 283 bool depth_initialized:1; 284 285 /* Misc. color flags. */ 286 bool color_is_int8:1; 287 bool color_is_int10:1; 288 bool dcc_incompatible:1; 289 290 /* Color registers. */ 291 unsigned cb_color_info; 292 unsigned cb_color_view; 293 unsigned cb_color_attrib; 294 unsigned cb_color_attrib2; /* GFX9 and later */ 295 unsigned cb_dcc_control; /* VI and later */ 296 unsigned spi_shader_col_format:8; /* no blending, no alpha-to-coverage. */ 297 unsigned spi_shader_col_format_alpha:8; /* alpha-to-coverage */ 298 unsigned spi_shader_col_format_blend:8; /* blending without alpha. */ 299 unsigned spi_shader_col_format_blend_alpha:8; /* blending with alpha. */ 300 301 /* DB registers. */ 302 uint64_t db_depth_base; /* DB_Z_READ/WRITE_BASE */ 303 uint64_t db_stencil_base; 304 uint64_t db_htile_data_base; 305 unsigned db_depth_info; 306 unsigned db_z_info; 307 unsigned db_z_info2; /* GFX9+ */ 308 unsigned db_depth_view; 309 unsigned db_depth_size; 310 unsigned db_depth_slice; 311 unsigned db_stencil_info; 312 unsigned db_stencil_info2; /* GFX9+ */ 313 unsigned db_htile_surface; 314 }; 315 316 struct r600_mmio_counter { 317 unsigned busy; 318 unsigned idle; 319 }; 320 321 union r600_mmio_counters { 322 struct { 323 /* For global GPU load including SDMA. */ 324 struct r600_mmio_counter gpu; 325 326 /* GRBM_STATUS */ 327 struct r600_mmio_counter spi; 328 struct r600_mmio_counter gui; 329 struct r600_mmio_counter ta; 330 struct r600_mmio_counter gds; 331 struct r600_mmio_counter vgt; 332 struct r600_mmio_counter ia; 333 struct r600_mmio_counter sx; 334 struct r600_mmio_counter wd; 335 struct r600_mmio_counter bci; 336 struct r600_mmio_counter sc; 337 struct r600_mmio_counter pa; 338 struct r600_mmio_counter db; 339 struct r600_mmio_counter cp; 340 struct r600_mmio_counter cb; 341 342 /* SRBM_STATUS2 */ 343 struct r600_mmio_counter sdma; 344 345 /* CP_STAT */ 346 struct r600_mmio_counter pfp; 347 struct r600_mmio_counter meq; 348 struct r600_mmio_counter me; 349 struct r600_mmio_counter surf_sync; 350 struct r600_mmio_counter cp_dma; 351 struct r600_mmio_counter scratch_ram; 352 } named; 353 unsigned array[0]; 354 }; 355 356 struct r600_memory_object { 357 struct pipe_memory_object b; 358 struct pb_buffer *buf; 359 uint32_t stride; 360 uint32_t offset; 361 }; 362 363 /* This encapsulates a state or an operation which can emitted into the GPU 364 * command stream. */ 365 struct r600_atom { 366 void (*emit)(struct r600_common_context *ctx, struct r600_atom *state); 367 unsigned short id; 368 }; 369 370 struct r600_ring { 371 struct radeon_winsys_cs *cs; 372 void (*flush)(void *ctx, unsigned flags, 373 struct pipe_fence_handle **fence); 374 }; 375 376 /* Saved CS data for debugging features. */ 377 struct radeon_saved_cs { 378 uint32_t *ib; 379 unsigned num_dw; 380 381 struct radeon_bo_list_item *bo_list; 382 unsigned bo_count; 383 }; 384 385 struct r600_common_context { 386 struct pipe_context b; /* base class */ 387 388 struct si_screen *screen; 389 struct radeon_winsys *ws; 390 struct radeon_winsys_ctx *ctx; 391 enum radeon_family family; 392 enum chip_class chip_class; 393 struct r600_ring gfx; 394 struct r600_ring dma; 395 struct pipe_fence_handle *last_gfx_fence; 396 struct pipe_fence_handle *last_sdma_fence; 397 struct r600_resource *eop_bug_scratch; 398 struct u_upload_mgr *cached_gtt_allocator; 399 unsigned num_gfx_cs_flushes; 400 unsigned initial_gfx_cs_size; 401 unsigned gpu_reset_counter; 402 unsigned last_dirty_tex_counter; 403 unsigned last_compressed_colortex_counter; 404 unsigned last_num_draw_calls; 405 406 struct threaded_context *tc; 407 struct u_suballocator *allocator_zeroed_memory; 408 struct slab_child_pool pool_transfers; 409 struct slab_child_pool pool_transfers_unsync; /* for threaded_context */ 410 411 /* Current unaccounted memory usage. */ 412 uint64_t vram; 413 uint64_t gtt; 414 415 /* Additional context states. */ 416 unsigned flags; /* flush flags */ 417 418 /* Queries. */ 419 /* Maintain the list of active queries for pausing between IBs. */ 420 int num_occlusion_queries; 421 int num_perfect_occlusion_queries; 422 struct list_head active_queries; 423 unsigned num_cs_dw_queries_suspend; 424 /* Misc stats. */ 425 unsigned num_draw_calls; 426 unsigned num_decompress_calls; 427 unsigned num_mrt_draw_calls; 428 unsigned num_prim_restart_calls; 429 unsigned num_spill_draw_calls; 430 unsigned num_compute_calls; 431 unsigned num_spill_compute_calls; 432 unsigned num_dma_calls; 433 unsigned num_cp_dma_calls; 434 unsigned num_vs_flushes; 435 unsigned num_ps_flushes; 436 unsigned num_cs_flushes; 437 unsigned num_cb_cache_flushes; 438 unsigned num_db_cache_flushes; 439 unsigned num_L2_invalidates; 440 unsigned num_L2_writebacks; 441 unsigned num_resident_handles; 442 uint64_t num_alloc_tex_transfer_bytes; 443 unsigned last_tex_ps_draw_ratio; /* for query */ 444 445 /* Render condition. */ 446 struct r600_atom render_cond_atom; 447 struct pipe_query *render_cond; 448 unsigned render_cond_mode; 449 bool render_cond_invert; 450 bool render_cond_force_off; /* for u_blitter */ 451 452 /* Statistics gathering for the DCC enablement heuristic. It can't be 453 * in r600_texture because r600_texture can be shared by multiple 454 * contexts. This is for back buffers only. We shouldn't get too many 455 * of those. 456 * 457 * X11 DRI3 rotates among a finite set of back buffers. They should 458 * all fit in this array. If they don't, separate DCC might never be 459 * enabled by DCC stat gathering. 460 */ 461 struct { 462 struct r600_texture *tex; 463 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */ 464 struct pipe_query *ps_stats[3]; 465 /* If all slots are used and another slot is needed, 466 * the least recently used slot is evicted based on this. */ 467 int64_t last_use_timestamp; 468 bool query_active; 469 } dcc_stats[5]; 470 471 struct pipe_device_reset_callback device_reset_callback; 472 struct u_log_context *log; 473 474 void *query_result_shader; 475 476 /* Copy one resource to another using async DMA. */ 477 void (*dma_copy)(struct pipe_context *ctx, 478 struct pipe_resource *dst, 479 unsigned dst_level, 480 unsigned dst_x, unsigned dst_y, unsigned dst_z, 481 struct pipe_resource *src, 482 unsigned src_level, 483 const struct pipe_box *src_box); 484 485 void (*dma_clear_buffer)(struct pipe_context *ctx, struct pipe_resource *dst, 486 uint64_t offset, uint64_t size, unsigned value); 487 488 void (*blit_decompress_depth)(struct pipe_context *ctx, 489 struct r600_texture *texture, 490 struct r600_texture *staging, 491 unsigned first_level, unsigned last_level, 492 unsigned first_layer, unsigned last_layer, 493 unsigned first_sample, unsigned last_sample); 494 495 void (*decompress_dcc)(struct pipe_context *ctx, 496 struct r600_texture *rtex); 497 498 /* Reallocate the buffer and update all resource bindings where 499 * the buffer is bound, including all resource descriptors. */ 500 void (*invalidate_buffer)(struct pipe_context *ctx, struct pipe_resource *buf); 501 502 /* Update all resource bindings where the buffer is bound, including 503 * all resource descriptors. This is invalidate_buffer without 504 * the invalidation. */ 505 void (*rebind_buffer)(struct pipe_context *ctx, struct pipe_resource *buf, 506 uint64_t old_gpu_address); 507 508 /* Enable or disable occlusion queries. */ 509 void (*set_occlusion_query_state)(struct pipe_context *ctx, 510 bool old_enable, 511 bool old_perfect_enable); 512 513 void (*save_qbo_state)(struct pipe_context *ctx, struct r600_qbo_state *st); 514 515 /* This ensures there is enough space in the command stream. */ 516 void (*need_gfx_cs_space)(struct pipe_context *ctx, unsigned num_dw, 517 bool include_draw_vbo); 518 519 void (*set_atom_dirty)(struct r600_common_context *ctx, 520 struct r600_atom *atom, bool dirty); 521 522 void (*check_vm_faults)(struct r600_common_context *ctx, 523 struct radeon_saved_cs *saved, 524 enum ring_type ring); 525 }; 526 527 /* r600_buffer_common.c */ 528 bool si_rings_is_buffer_referenced(struct r600_common_context *ctx, 529 struct pb_buffer *buf, 530 enum radeon_bo_usage usage); 531 void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx, 532 struct r600_resource *resource, 533 unsigned usage); 534 void si_init_resource_fields(struct si_screen *sscreen, 535 struct r600_resource *res, 536 uint64_t size, unsigned alignment); 537 bool si_alloc_resource(struct si_screen *sscreen, 538 struct r600_resource *res); 539 struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen, 540 unsigned flags, 541 unsigned usage, 542 unsigned size, 543 unsigned alignment); 544 void si_replace_buffer_storage(struct pipe_context *ctx, 545 struct pipe_resource *dst, 546 struct pipe_resource *src); 547 void si_init_screen_buffer_functions(struct si_screen *sscreen); 548 void si_init_buffer_functions(struct si_context *sctx); 549 550 /* r600_common_pipe.c */ 551 void si_gfx_write_event_eop(struct r600_common_context *ctx, 552 unsigned event, unsigned event_flags, 553 unsigned data_sel, 554 struct r600_resource *buf, uint64_t va, 555 uint32_t new_fence, unsigned query_type); 556 unsigned si_gfx_write_fence_dwords(struct si_screen *screen); 557 void si_gfx_wait_fence(struct r600_common_context *ctx, 558 uint64_t va, uint32_t ref, uint32_t mask); 559 bool si_common_context_init(struct r600_common_context *rctx, 560 struct si_screen *sscreen, 561 unsigned context_flags); 562 void si_common_context_cleanup(struct r600_common_context *rctx); 563 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst, 564 uint64_t offset, uint64_t size, unsigned value); 565 void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw, 566 struct r600_resource *dst, struct r600_resource *src); 567 void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs, 568 struct radeon_saved_cs *saved, bool get_buffer_list); 569 void si_clear_saved_cs(struct radeon_saved_cs *saved); 570 bool si_check_device_reset(struct r600_common_context *rctx); 571 572 /* r600_gpu_load.c */ 573 void si_gpu_load_kill_thread(struct si_screen *sscreen); 574 uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type); 575 unsigned si_end_counter(struct si_screen *sscreen, unsigned type, 576 uint64_t begin); 577 578 /* r600_perfcounters.c */ 579 void si_perfcounters_destroy(struct si_screen *sscreen); 580 581 /* r600_query.c */ 582 void si_init_screen_query_functions(struct si_screen *sscreen); 583 void si_init_query_functions(struct r600_common_context *rctx); 584 void si_suspend_queries(struct r600_common_context *ctx); 585 void si_resume_queries(struct r600_common_context *ctx); 586 587 /* r600_texture.c */ 588 bool si_prepare_for_dma_blit(struct r600_common_context *rctx, 589 struct r600_texture *rdst, 590 unsigned dst_level, unsigned dstx, 591 unsigned dsty, unsigned dstz, 592 struct r600_texture *rsrc, 593 unsigned src_level, 594 const struct pipe_box *src_box); 595 void si_texture_get_fmask_info(struct si_screen *sscreen, 596 struct r600_texture *rtex, 597 unsigned nr_samples, 598 struct r600_fmask_info *out); 599 void si_texture_get_cmask_info(struct si_screen *sscreen, 600 struct r600_texture *rtex, 601 struct r600_cmask_info *out); 602 bool si_init_flushed_depth_texture(struct pipe_context *ctx, 603 struct pipe_resource *texture, 604 struct r600_texture **staging); 605 void si_print_texture_info(struct si_screen *sscreen, 606 struct r600_texture *rtex, struct u_log_context *log); 607 struct pipe_resource *si_texture_create(struct pipe_screen *screen, 608 const struct pipe_resource *templ); 609 bool vi_dcc_formats_compatible(enum pipe_format format1, 610 enum pipe_format format2); 611 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex, 612 unsigned level, 613 enum pipe_format view_format); 614 void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx, 615 struct pipe_resource *tex, 616 unsigned level, 617 enum pipe_format view_format); 618 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe, 619 struct pipe_resource *texture, 620 const struct pipe_surface *templ, 621 unsigned width0, unsigned height0, 622 unsigned width, unsigned height); 623 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap); 624 void vi_separate_dcc_try_enable(struct r600_common_context *rctx, 625 struct r600_texture *tex); 626 void vi_separate_dcc_start_query(struct pipe_context *ctx, 627 struct r600_texture *tex); 628 void vi_separate_dcc_stop_query(struct pipe_context *ctx, 629 struct r600_texture *tex); 630 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx, 631 struct r600_texture *tex); 632 bool si_texture_disable_dcc(struct r600_common_context *rctx, 633 struct r600_texture *rtex); 634 void si_init_screen_texture_functions(struct si_screen *sscreen); 635 void si_init_context_texture_functions(struct r600_common_context *rctx); 636 637 638 /* Inline helpers. */ 639 640 static inline struct r600_resource *r600_resource(struct pipe_resource *r) 641 { 642 return (struct r600_resource*)r; 643 } 644 645 static inline void 646 r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res) 647 { 648 pipe_resource_reference((struct pipe_resource **)ptr, 649 (struct pipe_resource *)res); 650 } 651 652 static inline void 653 r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res) 654 { 655 pipe_resource_reference((struct pipe_resource **)ptr, &res->resource.b.b); 656 } 657 658 static inline bool 659 vi_dcc_enabled(struct r600_texture *tex, unsigned level) 660 { 661 return tex->dcc_offset && level < tex->surface.num_dcc_levels; 662 } 663 664 #define R600_ERR(fmt, args...) \ 665 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args) 666 667 #endif 668