Home | History | Annotate | Download | only in radeonsi

Lines Matching refs:sctx

40 si_init_external_atom(struct si_context *sctx, struct r600_atom *atom,
43 atom->id = list_elem - sctx->atoms.array + 1;
48 void si_init_atom(struct si_context *sctx, struct r600_atom *atom,
53 atom->id = list_elem - sctx->atoms.array + 1; /* index+1 in the atom array */
93 static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *atom)
95 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
96 struct si_state_blend *blend = sctx->queued.named.blend;
99 uint32_t cb_target_mask = sctx->framebuffer.colorbuf_enabled_4bit;
112 sctx->ps_shader.cso &&
113 (sctx->ps_shader.cso->info.colors_written & 0x3) != 0x3)
119 if (sctx->b.family == CHIP_STONEY) {
121 sctx->ps_shader.cso ?
122 sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format : 0;
127 for (i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) {
129 (struct r600_surface*)sctx->framebuffer.state.cbufs[i];
243 if (sctx->screen->b.debug_flags & DBG_NO_RB_PLUS) {
407 struct si_context *sctx = (struct si_context*)ctx;
559 if (sctx->b.family == CHIP_STONEY) {
593 struct si_context *sctx = (struct si_context *)ctx;
594 si_pm4_bind_state(sctx, blend, (struct si_state_blend *)state);
595 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
596 sctx->do_update_shaders = true;
601 struct si_context *sctx = (struct si_context *)ctx;
602 si_pm4_delete_state(sctx, blend, (struct si_state_blend *)state);
608 struct si_context *sctx = (struct si_context *)ctx;
610 if (memcmp(&sctx->blend_color.state, state, sizeof(*state)) == 0)
613 sctx->blend_color.state = *state;
614 si_mark_atom_dirty(sctx, &sctx->blend_color.atom);
617 static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
619 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
622 radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
632 struct si_context *sctx = (struct si_context *)ctx;
635 if (memcmp(&sctx->clip_state.state, state, sizeof(*state)) == 0)
638 sctx->clip_state.state = *state;
639 si_mark_atom_dirty(sctx, &sctx->clip_state.atom);
645 si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES, &cb);
649 static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
651 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
654 radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
659 static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
661 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
662 struct si_shader *vs = si_get_vs_state(sctx);
663 struct tgsi_shader_info *info = si_get_vs_info(sctx);
664 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
716 static void si_update_poly_offset_state(struct si_context *sctx)
718 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
720 if (!rs || !rs->uses_poly_offset || !sctx->framebuffer.state.zsbuf) {
721 si_pm4_bind_state(sctx, poly_offset, NULL);
728 switch (sctx->framebuffer.state.zsbuf->texture->format) {
730 si_pm4_bind_state(sctx, poly_offset, &rs->pm4_poly_offset[0]);
733 si_pm4_bind_state(sctx, poly_offset, &rs->pm4_poly_offset[1]);
737 si_pm4_bind_state(sctx, poly_offset, &rs->pm4_poly_offset[2]);
898 struct si_context *sctx = (struct si_context *)ctx;
900 (struct si_state_rasterizer*)sctx->queued.named.rasterizer;
907 si_mark_atom_dirty(sctx, &sctx->db_render_state);
910 if (sctx->b.family >= CHIP_POLARIS10 &&
911 sctx->framebuffer.nr_samples > 1)
912 si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
915 r600_viewport_set_rast_deps(&sctx->b, rs->scissor_enable, rs->clip_halfz);
917 si_pm4_bind_state(sctx, rasterizer, rs);
918 si_update_poly_offset_state(sctx);
920 si_mark_atom_dirty(sctx, &sctx->clip_regs);
921 sctx->do_update_shaders = true;
926 struct si_context *sctx = (struct si_context *)ctx;
928 if (sctx->queued.named.rasterizer == state)
929 si_pm4_bind_state(sctx, poly_offset, NULL);
930 si_pm4_delete_state(sctx, rasterizer, (struct si_state_rasterizer *)state);
936 static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
938 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
939 struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
940 struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
956 struct si_context *sctx = (struct si_context *)ctx;
958 if (memcmp(&sctx->stencil_ref.state, state, sizeof(*state)) == 0)
961 sctx->stencil_ref.state = *state;
962 si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom);
1058 struct si_context *sctx = (struct si_context *)ctx;
1064 si_pm4_bind_state(sctx, dsa, dsa);
1066 if (memcmp(&dsa->stencil_ref, &sctx->stencil_ref.dsa_part,
1068 sctx->stencil_ref.dsa_part = dsa->stencil_ref;
1069 si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom);
1071 sctx->do_update_shaders = true;
1076 struct si_context *sctx = (struct si_context *)ctx;
1077 si_pm4_delete_state(sctx, dsa, (struct si_state_dsa *)state);
1080 static void *si_create_db_flush_dsa(struct si_context *sctx)
1084 return sctx->b.b.create_depth_stencil_alpha_state(&sctx->b.b, &dsa);
1091 struct si_context *sctx = (struct si_context*)ctx;
1095 sctx->b.flags &= ~R600_CONTEXT_STOP_PIPELINE_STATS;
1096 sctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS;
1098 sctx->b.flags &= ~R600_CONTEXT_START_PIPELINE_STATS;
1099 sctx->b.flags |= R600_CONTEXT_STOP_PIPELINE_STATS;
1103 if (sctx->occlusion_queries_disabled != !enable) {
1104 sctx->occlusion_queries_disabled = !enable;
1105 si_mark_atom_dirty(sctx, &sctx->db_render_state);
1111 struct si_context *sctx = (struct si_context*)ctx;
1113 si_mark_atom_dirty(sctx, &sctx->db_render_state);
1118 struct si_context *sctx = (struct si_context*)ctx;
1120 st->saved_compute = sctx->cs_shader_state.program;
1122 si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1123 si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1126 static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
1128 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1129 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1135 if (sctx->dbcb_depth_copy_enabled ||
1136 sctx->dbcb_stencil_copy_enabled) {
1138 S_028000_DEPTH_COPY(sctx->dbcb_depth_copy_enabled) |
1139 S_028000_STENCIL_COPY(sctx->dbcb_stencil_copy_enabled) |
1141 S_028000_COPY_SAMPLE(sctx->dbcb_copy_sample));
1142 } else if (sctx->db_flush_depth_inplace || sctx->db_flush_stencil_inplace) {
1144 S_028000_DEPTH_COMPRESS_DISABLE(sctx->db_flush_depth_inplace) |
1145 S_028000_STENCIL_COMPRESS_DISABLE(sctx->db_flush_stencil_inplace));
1148 S_028000_DEPTH_CLEAR_ENABLE(sctx->db_depth_clear) |
1149 S_028000_STENCIL_CLEAR_ENABLE(sctx->db_stencil_clear));
1153 if (sctx->b.num_occlusion_queries > 0 &&
1154 !sctx->occlusion_queries_disabled) {
1155 bool perfect = sctx->b.num_perfect_occlusion_queries > 0;
1157 if (sctx->b.chip_class >= CIK) {
1160 S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples) |
1167 S_028004_SAMPLE_RATE(sctx->framebuffer.log_samples));
1171 if (sctx->b.chip_class >= CIK) {
1180 S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(sctx->db_depth_disable_expclear) |
1181 S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(sctx->db_stencil_disable_expclear) |
1182 S_028010_DECOMPRESS_Z_ON_FLUSH(sctx->framebuffer.nr_samples >= 4));
1184 db_shader_control = sctx->ps_db_shader_control;
1187 if (sctx->b.chip_class == SI && sctx->smoothing_enabled) {
1196 if (sctx->b.family == CHIP_STONEY &&
1197 sctx->screen->b.debug_flags & DBG_NO_RB_PLUS)
2059 static void si_initialize_color_surface(struct si_context *sctx,
2161 if (sctx->b.chip_class == SI) {
2172 if (sctx->b.chip_class >= VI) {
2187 if (!rtex->fmask.size && sctx->b.chip_class == SI) {
2198 static void si_init_depth_surface(struct si_context *sctx,
2232 if (sctx->b.chip_class >= CIK) {
2233 struct radeon_info *info = &sctx->screen->b.info;
2350 struct si_context *sctx = (struct si_context *)ctx;
2354 bool old_any_dst_linear = sctx->framebuffer.any_dst_linear;
2355 unsigned old_nr_samples = sctx->framebuffer.nr_samples;
2358 for (i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) {
2359 if (!sctx->framebuffer.state.cbufs[i])
2362 rtex = (struct r600_texture*)sctx->framebuffer.state.cbufs[i]->texture;
2374 sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
2382 sctx->framebuffer.dirty_cbufs |=
2383 (1 << MAX2(sctx->framebuffer.state.nr_cbufs, state->nr_cbufs)) - 1;
2384 sctx->framebuffer.dirty_zsbuf |= sctx->framebuffer.state.zsbuf != state->zsbuf;
2386 si_dec_framebuffer_counters(&sctx->framebuffer.state);
2387 util_copy_framebuffer_state(&sctx->framebuffer.state, state);
2389 sctx->framebuffer.colorbuf_enabled_4bit = 0;
2390 sctx->framebuffer.spi_shader_col_format = 0;
2391 sctx->framebuffer.spi_shader_col_format_alpha = 0;
2392 sctx->framebuffer.spi_shader_col_format_blend = 0;
2393 sctx->framebuffer.spi_shader_col_format_blend_alpha = 0;
2394 sctx->framebuffer.color_is_int8 = 0;
2395 sctx->framebuffer.color_is_int10 = 0;
2397 sctx->framebuffer.compressed_cb_mask = 0;
2398 sctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
2399 sctx->framebuffer.log_samples = util_logbase2(sctx->framebuffer.nr_samples);
2400 sctx->framebuffer.any_dst_linear = false;
2410 si_initialize_color_surface(sctx, surf);
2413 sctx->framebuffer.colorbuf_enabled_4bit |= 0xf << (i * 4);
2414 sctx->framebuffer.spi_shader_col_format |=
2416 sctx->framebuffer.spi_shader_col_format_alpha |=
2418 sctx->framebuffer.spi_shader_col_format_blend |=
2420 sctx->framebuffer.spi_shader_col_format_blend_alpha |=
2424 sctx->framebuffer.color_is_int8 |= 1 << i;
2426 sctx->framebuffer.color_is_int10 |= 1 << i;
2429 sctx->framebuffer.compressed_cb_mask |= 1 << i;
2433 sctx->framebuffer.any_dst_linear = true;
2441 sctx->framebuffer.compressed_cb_mask |= 1 << i;
2451 si_init_depth_surface(sctx, surf);
2456 si_update_poly_offset_state(sctx);
2457 si_mark_atom_dirty(sctx, &sctx->cb_render_state);
2458 si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
2460 if (sctx->framebuffer.any_dst_linear != old_any_dst_linear)
2461 si_mark_atom_dirty(sctx, &sctx->msaa_config);
2463 if (sctx->framebuffer.nr_samples != old_nr_samples) {
2464 si_mark_atom_dirty(sctx, &sctx->msaa_config);
2465 si_mark_atom_dirty(sctx, &sctx->db_render_state);
2468 switch (sctx->framebuffer.nr_samples) {
2470 constbuf.user_buffer = sctx->b.sample_locations_1x;
2473 constbuf.user_buffer = sctx->b.sample_locations_2x;
2476 constbuf.user_buffer = sctx->b.sample_locations_4x;
2479 constbuf.user_buffer = sctx->b.sample_locations_8x;
2482 constbuf.user_buffer = sctx->b.sample_locations_16x;
2486 sctx->framebuffer.nr_samples);
2489 constbuf.buffer_size = sctx->framebuffer.nr_samples * 2 * 4;
2490 si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS, &constbuf);
2492 si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
2495 sctx->need_check_render_feedback = true;
2496 sctx->do_update_shaders = true;
2499 static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
2501 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
2502 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
2515 if (!(sctx->framebuffer.dirty_cbufs & (1 << i)))
2527 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2534 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2540 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2558 if (sctx->b.chip_class >= CIK)
2565 if (sctx->b.chip_class >= CIK)
2585 sctx->b.chip_class >= VI ? 14 : 13);
2600 if (sctx->b.chip_class >= VI) /* R_028C94_CB_COLOR0_DCC_BASE */
2606 if (sctx->framebuffer.dirty_cbufs & (1 << i))
2610 if (state->zsbuf && sctx->framebuffer.dirty_zsbuf) {
2614 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2621 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
2646 } else if (sctx->framebuffer.dirty_zsbuf) {
2657 sctx->framebuffer.dirty_cbufs = 0;
2658 sctx->framebuffer.dirty_zsbuf = false;
2661 static void si_emit_msaa_sample_locs(struct si_context *sctx,
2664 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
2665 unsigned nr_samples = sctx->framebuffer.nr_samples;
2670 if (nr_samples <= 1 && sctx->smoothing_enabled)
2676 if (sctx->b.family >= CHIP_POLARIS10)
2680 (nr_samples != sctx->msaa_sample_locs.nr_samples)) {
2681 sctx->msaa_sample_locs.nr_samples = nr_samples;
2685 if (sctx->b.family >= CHIP_POLARIS10) {
2686 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
2695 if (sctx->framebuffer.nr_samples > 1 && rs && !rs->multisample_enable)
2703 static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
2705 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
2706 unsigned num_tile_pipes = sctx->screen->b.info.num_tile_pipes;
2708 bool dst_is_linear = sctx->framebuffer.any_dst_linear;
2721 cayman_emit_msaa_config(cs, sctx->framebuffer.nr_samples,
2722 sctx->ps_iter_samples,
2723 sctx->smoothing_enabled ? SI_NUM_SMOOTH_AA_SAMPLES : 0,
2729 struct si_context *sctx = (struct si_context *)ctx;
2731 if (sctx->ps_iter_samples == min_samples)
2734 sctx->ps_iter_samples = min_samples;
2735 sctx->do_update_shaders = true;
2737 if (sctx->framebuffer.nr_samples > 1)
2738 si_mark_atom_dirty(sctx, &sctx->msaa_config);
3032 struct si_context *sctx = (struct si_context*)ctx;
3062 si_make_buffer_descriptor(sctx->screen,
3151 vi_dcc_disable_if_incompatible_format(&sctx->b, texture,
3155 si_make_texture_descriptor(sctx->screen, tmp, true,
3211 struct si_context *sctx = (struct si_context *)ctx;
3212 struct r600_common_screen *rscreen = sctx->b.screen;
3246 for (i = 0; i < sctx->border_color_count; i++)
3247 if (memcmp(&sctx->border_color_table[i], &state->border_color,
3258 if (i == sctx->border_color_count) {
3260 memcpy(&sctx->border_color_table[i], &state->border_color,
3262 util_memcpy_cpu_to_le32(&sctx->border_color_map[i],
3265 sctx->border_color_count++;
3284 S_008F30_COMPAT_MODE(sctx->b.chip_class >= VI));
3295 S_008F38_ANISO_OVERRIDE(sctx->b.chip_class >= VI));
3303 struct si_context *sctx = (struct si_context *)ctx;
3305 if (sctx->sample_mask.sample_mask == (uint16_t)sample_mask)
3308 sctx->sample_mask.sample_mask = sample_mask;
3309 si_mark_atom_dirty(sctx, &sctx->sample_mask.atom);
3312 static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
3314 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
3315 unsigned mask = sctx->sample_mask.sample_mask;
3321 assert(mask == 0xffff || sctx->framebuffer.nr_samples > 1 ||
3322 (mask & 1 && sctx->blitter->running));
3443 struct si_context *sctx = (struct si_context *)ctx;
3446 sctx->vertex_elements = v;
3447 sctx->vertex_buffers_dirty = true;
3448 sctx->do_update_shaders = true;
3453 struct si_context *sctx = (struct si_context *)ctx;
3455 if (sctx->vertex_elements == state)
3456 sctx->vertex_elements = NULL;
3464 struct si_context *sctx = (struct si_context *)ctx;
3465 struct pipe_vertex_buffer *dst = sctx->vertex_buffer + start_slot;
3468 assert(start_slot + count <= ARRAY_SIZE(sctx->vertex_buffer));
3488 sctx->vertex_buffers_dirty = true;
3494 struct si_context *sctx = (struct si_context *)ctx;
3499 pipe_resource_reference(&sctx->index_buffer.buffer, buf);
3500 memcpy(&sctx->index_buffer, ib, sizeof(*ib));
3505 pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
3517 struct si_context *sctx = (struct si_context *)ctx;
3528 sctx, (struct r600_resource**)&cb.buffer,
3532 si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS, &cb);
3538 struct si_context *sctx = (struct si_context *)ctx;
3540 sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
3548 struct si_context *sctx = (struct si_context *)ctx;
3552 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
3556 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
3568 sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1;
3575 if (sctx->screen->b.chip_class <= CIK)
3576 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
3580 sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
3584 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
3587 static void *si_create_blend_custom(struct si_context *sctx, unsigned mode)
3594 return si_create_blend_state_mode(&sctx->b.b, &blend, mode);
3603 static void si_init_config(struct si_context *sctx);
3605 void si_init_state_functions(struct si_context *sctx)
3607 si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
3608 si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
3609 si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
3610 si_init_external_atom(sctx, &sctx->b.scissors.atom, &sctx->atoms.s.scissors);
3611 si_init_external_atom(sctx, &sctx->b.viewports.atom, &sctx->atoms.s.viewports);
3613 si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
3614 si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
3615 si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
3616 si_init_atom(sctx, &sctx->msaa_config, &sctx->atoms.s.msaa_config, si_emit_msaa_config);
3617 si_init_atom(sctx, &sctx->sample_mask.atom, &sctx->atoms.s.sample_mask, si_emit_sample_mask);
3618 si_init_atom(sctx, &sctx->cb_render_state, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state);
3619 si_init_atom(sctx, &sctx->blend_color.atom, &sctx->atoms.s.blend_color, si_emit_blend_color);
3620 si_init_atom(sctx, &sctx->clip_regs, &sctx->atoms.s.clip_regs, si_emit_clip_regs);
3621 si_init_atom(sctx, &sctx->clip_state.atom, &sctx->atoms.s.clip_state, si_emit_clip_state);
3622 si_init_atom(sctx, &sctx->stencil_ref.atom, &sctx->atoms.s.stencil_ref, si_emit_stencil_ref);
3624 sctx->b.b.create_blend_state = si_create_blend_state;
3625 sctx->b.b.bind_blend_state = si_bind_blend_state;
3626 sctx->b.b.delete_blend_state = si_delete_blend_state;
3627 sctx->b.b.set_blend_color = si_set_blend_color;
3629 sctx->b.b.create_rasterizer_state = si_create_rs_state;
3630 sctx->b.b.bind_rasterizer_state = si_bind_rs_state;
3631 sctx->b.b.delete_rasterizer_state = si_delete_rs_state;
3633 sctx->b.b.create_depth_stencil_alpha_state = si_create_dsa_state;
3634 sctx->b.b.bind_depth_stencil_alpha_state = si_bind_dsa_state;
3635 sctx->b.b.delete_depth_stencil_alpha_state = si_delete_dsa_state;
3637 sctx->custom_dsa_flush = si_create_db_flush_dsa(sctx);
3638 sctx->custom_blend_resolve = si_create_blend_custom(sctx, V_028808_CB_RESOLVE);
3639 sctx->custom_blend_decompress = si_create_blend_custom(sctx, V_028808_CB_FMASK_DECOMPRESS);
3640 sctx->custom_blend_fastclear = si_create_blend_custom(sctx, V_028808_CB_ELIMINATE_FAST_CLEAR);
3641 sctx->custom_blend_dcc_decompress = si_create_blend_custom(sctx, V_028808_CB_DCC_DECOMPRESS);
3643 sctx->b.b.set_clip_state = si_set_clip_state;
3644 sctx->b.b.set_stencil_ref = si_set_stencil_ref;
3646 sctx->b.b.set_framebuffer_state = si_set_framebuffer_state;
3647 sctx->b.b.get_sample_position = cayman_get_sample_position;
3649 sctx->b.b.create_sampler_state = si_create_sampler_state;
3650 sctx->b.b.delete_sampler_state = si_delete_sampler_state;
3652 sctx->b.b.create_sampler_view = si_create_sampler_view;
3653 sctx->b.b.sampler_view_destroy = si_sampler_view_destroy;
3655 sctx->b.b.set_sample_mask = si_set_sample_mask;
3657 sctx->b.b.create_vertex_elements_state = si_create_vertex_elements;
3658 sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements;
3659 sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element;
3660 sctx->b.b.set_vertex_buffers = si_set_vertex_buffers;
3661 sctx->b.b.set_index_buffer = si_set_index_buffer;
3663 sctx->b.b.texture_barrier = si_texture_barrier;
3664 sctx->b.b.memory_barrier = si_memory_barrier;
3665 sctx->b.b.set_min_samples = si_set_min_samples;
3666 sctx->b.b.set_tess_state = si_set_tess_state;
3668 sctx->b.b.set_active_query_state = si_set_active_query_state;
3669 sctx->b.set_occlusion_query_state = si_set_occlusion_query_state;
3670 sctx->b.save_qbo_state = si_save_qbo_state;
3671 sctx->b.need_gfx_cs_space = si_need_gfx_cs_space;
3673 sctx->b.b.draw_vbo = si_draw_vbo;
3675 si_init_config(sctx);
3779 si_write_harvested_raster_configs(struct si_context *sctx,
3784 unsigned sh_per_se = MAX2(sctx->screen->b.info.max_sh_per_se, 1);
3785 unsigned num_se = MAX2(sctx->screen->b.info.max_se, 1);
3786 unsigned rb_mask = sctx->screen->b.info.enabled_rb_mask;
3787 unsigned num_rb = MIN2(sctx->screen->b.info.num_render_backends, 16);
3881 if (sctx->b.chip_class < CIK)
3893 if (sctx->b.chip_class < CIK)
3919 static void si_init_config(struct si_context *sctx)
3921 struct si_screen *sscreen = sctx->screen;
3922 unsigned num_rb = MIN2(sctx->screen->b.info.num_render_backends, 16);
3923 unsigned rb_mask = sctx->screen->b.info.enabled_rb_mask;
3925 uint64_t border_color_va = sctx->border_color_buffer->gpu_address;
3949 if (sctx->b.chip_class < CIK)
3958 switch (sctx->screen->b.family) {
4043 if (sctx->b.chip_class >= CIK)
4047 si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1);
4080 if (sctx->b.chip_class >= CIK) {
4117 if (sctx->b.chip_class >= VI) {
4123 if (sctx->b.family < CHIP_POLARIS10)
4136 if (sctx->b.family == CHIP_FIJI ||
4137 sctx->b.family >= CHIP_POLARIS10)
4146 if (sctx->b.family == CHIP_STONEY)
4150 if (sctx->b.chip_class >= CIK)
4152 si_pm4_add_bo(pm4, sctx->border_color_buffer, RADEON_USAGE_READ,
4155 si_pm4_upload_indirect_buffer(sctx, pm4);
4156 sctx->init_config = pm4;