1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: Marek Olk <maraeo (at) gmail.com> 24 * 25 */ 26 27 #include "r600_pipe_common.h" 28 #include "r600_cs.h" 29 30 #include "util/u_memory.h" 31 32 static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable); 33 34 static struct pipe_stream_output_target * 35 r600_create_so_target(struct pipe_context *ctx, 36 struct pipe_resource *buffer, 37 unsigned buffer_offset, 38 unsigned buffer_size) 39 { 40 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 41 struct r600_so_target *t; 42 struct r600_resource *rbuffer = (struct r600_resource*)buffer; 43 44 t = CALLOC_STRUCT(r600_so_target); 45 if (!t) { 46 return NULL; 47 } 48 49 u_suballocator_alloc(rctx->allocator_zeroed_memory, 4, 4, 50 &t->buf_filled_size_offset, 51 (struct pipe_resource**)&t->buf_filled_size); 52 if (!t->buf_filled_size) { 53 FREE(t); 54 return NULL; 55 } 56 57 t->b.reference.count = 1; 58 t->b.context = ctx; 59 pipe_resource_reference(&t->b.buffer, buffer); 60 t->b.buffer_offset = buffer_offset; 61 t->b.buffer_size = buffer_size; 62 63 util_range_add(&rbuffer->valid_buffer_range, buffer_offset, 64 buffer_offset + buffer_size); 65 return &t->b; 66 } 67 68 static void r600_so_target_destroy(struct pipe_context *ctx, 69 struct pipe_stream_output_target *target) 70 { 71 struct r600_so_target *t = (struct r600_so_target*)target; 72 pipe_resource_reference(&t->b.buffer, NULL); 73 r600_resource_reference(&t->buf_filled_size, NULL); 74 FREE(t); 75 } 76 77 void r600_streamout_buffers_dirty(struct r600_common_context *rctx) 78 { 79 struct r600_atom *begin = &rctx->streamout.begin_atom; 80 unsigned num_bufs = util_bitcount(rctx->streamout.enabled_mask); 81 unsigned num_bufs_appended = util_bitcount(rctx->streamout.enabled_mask & 82 rctx->streamout.append_bitmask); 83 84 if (!num_bufs) 85 return; 86 87 rctx->streamout.num_dw_for_end = 88 12 + /* flush_vgt_streamout */ 89 num_bufs * 11; /* STRMOUT_BUFFER_UPDATE, BUFFER_SIZE */ 90 91 begin->num_dw = 12; /* flush_vgt_streamout */ 92 93 if (rctx->chip_class >= SI) { 94 begin->num_dw += num_bufs * 4; /* SET_CONTEXT_REG */ 95 } else { 96 begin->num_dw += num_bufs * 7; /* SET_CONTEXT_REG */ 97 98 if (rctx->family >= CHIP_RS780 && rctx->family <= CHIP_RV740) 99 begin->num_dw += num_bufs * 5; /* STRMOUT_BASE_UPDATE */ 100 } 101 102 begin->num_dw += 103 num_bufs_appended * 8 + /* STRMOUT_BUFFER_UPDATE */ 104 (num_bufs - num_bufs_appended) * 6 + /* STRMOUT_BUFFER_UPDATE */ 105 (rctx->family > CHIP_R600 && rctx->family < CHIP_RS780 ? 2 : 0); /* SURFACE_BASE_UPDATE */ 106 107 rctx->set_atom_dirty(rctx, begin, true); 108 109 r600_set_streamout_enable(rctx, true); 110 } 111 112 void r600_set_streamout_targets(struct pipe_context *ctx, 113 unsigned num_targets, 114 struct pipe_stream_output_target **targets, 115 const unsigned *offsets) 116 { 117 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 118 unsigned i; 119 unsigned enabled_mask = 0, append_bitmask = 0; 120 121 /* Stop streamout. */ 122 if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) { 123 r600_emit_streamout_end(rctx); 124 } 125 126 /* Set the new targets. */ 127 for (i = 0; i < num_targets; i++) { 128 pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]); 129 if (!targets[i]) 130 continue; 131 132 r600_context_add_resource_size(ctx, targets[i]->buffer); 133 enabled_mask |= 1 << i; 134 if (offsets[i] == ((unsigned)-1)) 135 append_bitmask |= 1 << i; 136 } 137 for (; i < rctx->streamout.num_targets; i++) { 138 pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL); 139 } 140 141 rctx->streamout.enabled_mask = enabled_mask; 142 143 rctx->streamout.num_targets = num_targets; 144 rctx->streamout.append_bitmask = append_bitmask; 145 146 if (num_targets) { 147 r600_streamout_buffers_dirty(rctx); 148 } else { 149 rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false); 150 r600_set_streamout_enable(rctx, false); 151 } 152 } 153 154 static void r600_flush_vgt_streamout(struct r600_common_context *rctx) 155 { 156 struct radeon_winsys_cs *cs = rctx->gfx.cs; 157 unsigned reg_strmout_cntl; 158 159 /* The register is at different places on different ASICs. */ 160 if (rctx->chip_class >= CIK) { 161 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL; 162 } else if (rctx->chip_class >= EVERGREEN) { 163 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL; 164 } else { 165 reg_strmout_cntl = R_008490_CP_STRMOUT_CNTL; 166 } 167 168 if (rctx->chip_class >= CIK) { 169 radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0); 170 } else { 171 radeon_set_config_reg(cs, reg_strmout_cntl, 0); 172 } 173 174 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 175 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0)); 176 177 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); 178 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */ 179 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */ 180 radeon_emit(cs, 0); 181 radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */ 182 radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */ 183 radeon_emit(cs, 4); /* poll interval */ 184 } 185 186 static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom) 187 { 188 struct radeon_winsys_cs *cs = rctx->gfx.cs; 189 struct r600_so_target **t = rctx->streamout.targets; 190 unsigned *stride_in_dw = rctx->streamout.stride_in_dw; 191 unsigned i, update_flags = 0; 192 193 r600_flush_vgt_streamout(rctx); 194 195 for (i = 0; i < rctx->streamout.num_targets; i++) { 196 if (!t[i]) 197 continue; 198 199 t[i]->stride_in_dw = stride_in_dw[i]; 200 201 if (rctx->chip_class >= SI) { 202 /* SI binds streamout buffers as shader resources. 203 * VGT only counts primitives and tells the shader 204 * through SGPRs what to do. */ 205 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2); 206 radeon_emit(cs, (t[i]->b.buffer_offset + 207 t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */ 208 radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */ 209 } else { 210 uint64_t va = r600_resource(t[i]->b.buffer)->gpu_address; 211 212 update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i); 213 214 radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3); 215 radeon_emit(cs, (t[i]->b.buffer_offset + 216 t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */ 217 radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */ 218 radeon_emit(cs, va >> 8); /* BUFFER_BASE */ 219 220 r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer), 221 RADEON_USAGE_WRITE, RADEON_PRIO_SHADER_RW_BUFFER); 222 223 /* R7xx requires this packet after updating BUFFER_BASE. 224 * Without this, R7xx locks up. */ 225 if (rctx->family >= CHIP_RS780 && rctx->family <= CHIP_RV740) { 226 radeon_emit(cs, PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0)); 227 radeon_emit(cs, i); 228 radeon_emit(cs, va >> 8); 229 230 r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer), 231 RADEON_USAGE_WRITE, RADEON_PRIO_SHADER_RW_BUFFER); 232 } 233 } 234 235 if (rctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) { 236 uint64_t va = t[i]->buf_filled_size->gpu_address + 237 t[i]->buf_filled_size_offset; 238 239 /* Append. */ 240 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); 241 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | 242 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */ 243 radeon_emit(cs, 0); /* unused */ 244 radeon_emit(cs, 0); /* unused */ 245 radeon_emit(cs, va); /* src address lo */ 246 radeon_emit(cs, va >> 32); /* src address hi */ 247 248 r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size, 249 RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE); 250 } else { 251 /* Start from the beginning. */ 252 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); 253 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | 254 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */ 255 radeon_emit(cs, 0); /* unused */ 256 radeon_emit(cs, 0); /* unused */ 257 radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */ 258 radeon_emit(cs, 0); /* unused */ 259 } 260 } 261 262 if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770) { 263 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 264 radeon_emit(cs, update_flags); 265 } 266 rctx->streamout.begin_emitted = true; 267 } 268 269 void r600_emit_streamout_end(struct r600_common_context *rctx) 270 { 271 struct radeon_winsys_cs *cs = rctx->gfx.cs; 272 struct r600_so_target **t = rctx->streamout.targets; 273 unsigned i; 274 uint64_t va; 275 276 r600_flush_vgt_streamout(rctx); 277 278 for (i = 0; i < rctx->streamout.num_targets; i++) { 279 if (!t[i]) 280 continue; 281 282 va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset; 283 radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); 284 radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | 285 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) | 286 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */ 287 radeon_emit(cs, va); /* dst address lo */ 288 radeon_emit(cs, va >> 32); /* dst address hi */ 289 radeon_emit(cs, 0); /* unused */ 290 radeon_emit(cs, 0); /* unused */ 291 292 r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size, 293 RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE); 294 295 /* Zero the buffer size. The counters (primitives generated, 296 * primitives emitted) may be enabled even if there is not 297 * buffer bound. This ensures that the primitives-emitted query 298 * won't increment. */ 299 radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0); 300 301 t[i]->buf_filled_size_valid = true; 302 } 303 304 rctx->streamout.begin_emitted = false; 305 rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH; 306 } 307 308 /* STREAMOUT CONFIG DERIVED STATE 309 * 310 * Streamout must be enabled for the PRIMITIVES_GENERATED query to work. 311 * The buffer mask is an independent state, so no writes occur if there 312 * are no buffers bound. 313 */ 314 315 static void r600_emit_streamout_enable(struct r600_common_context *rctx, 316 struct r600_atom *atom) 317 { 318 unsigned strmout_config_reg = R_028AB0_VGT_STRMOUT_EN; 319 unsigned strmout_config_val = S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)); 320 unsigned strmout_buffer_reg = R_028B20_VGT_STRMOUT_BUFFER_EN; 321 unsigned strmout_buffer_val = rctx->streamout.hw_enabled_mask & 322 rctx->streamout.enabled_stream_buffers_mask; 323 324 if (rctx->chip_class >= EVERGREEN) { 325 strmout_buffer_reg = R_028B98_VGT_STRMOUT_BUFFER_CONFIG; 326 327 strmout_config_reg = R_028B94_VGT_STRMOUT_CONFIG; 328 strmout_config_val |= 329 S_028B94_RAST_STREAM(0) | 330 S_028B94_STREAMOUT_1_EN(r600_get_strmout_en(rctx)) | 331 S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) | 332 S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx)); 333 } 334 radeon_set_context_reg(rctx->gfx.cs, strmout_buffer_reg, strmout_buffer_val); 335 radeon_set_context_reg(rctx->gfx.cs, strmout_config_reg, strmout_config_val); 336 } 337 338 static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable) 339 { 340 bool old_strmout_en = r600_get_strmout_en(rctx); 341 unsigned old_hw_enabled_mask = rctx->streamout.hw_enabled_mask; 342 343 rctx->streamout.streamout_enabled = enable; 344 345 rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask | 346 (rctx->streamout.enabled_mask << 4) | 347 (rctx->streamout.enabled_mask << 8) | 348 (rctx->streamout.enabled_mask << 12); 349 350 if ((old_strmout_en != r600_get_strmout_en(rctx)) || 351 (old_hw_enabled_mask != rctx->streamout.hw_enabled_mask)) { 352 rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true); 353 } 354 } 355 356 void r600_update_prims_generated_query_state(struct r600_common_context *rctx, 357 unsigned type, int diff) 358 { 359 if (type == PIPE_QUERY_PRIMITIVES_GENERATED) { 360 bool old_strmout_en = r600_get_strmout_en(rctx); 361 362 rctx->streamout.num_prims_gen_queries += diff; 363 assert(rctx->streamout.num_prims_gen_queries >= 0); 364 365 rctx->streamout.prims_gen_query_enabled = 366 rctx->streamout.num_prims_gen_queries != 0; 367 368 if (old_strmout_en != r600_get_strmout_en(rctx)) { 369 rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true); 370 } 371 } 372 } 373 374 void r600_streamout_init(struct r600_common_context *rctx) 375 { 376 rctx->b.create_stream_output_target = r600_create_so_target; 377 rctx->b.stream_output_target_destroy = r600_so_target_destroy; 378 rctx->streamout.begin_atom.emit = r600_emit_streamout_begin; 379 rctx->streamout.enable_atom.emit = r600_emit_streamout_enable; 380 rctx->streamout.enable_atom.num_dw = 6; 381 } 382