Home | History | Annotate | Download | only in radeon

Lines Matching defs:query

61 	struct r600_query_sw *query = (struct r600_query_sw *)rquery;
63 sscreen->b.fence_reference(&sscreen->b, &query->fence, NULL);
64 FREE(query);
90 default: unreachable("query type does not correspond to winsys id");
97 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
100 switch(query->b.type) {
105 query->begin_result = rctx->num_draw_calls;
108 query->begin_result = rctx->num_decompress_calls;
111 query->begin_result = rctx->num_mrt_draw_calls;
114 query->begin_result = rctx->num_prim_restart_calls;
117 query->begin_result = rctx->num_spill_draw_calls;
120 query->begin_result = rctx->num_compute_calls;
123 query->begin_result = rctx->num_spill_compute_calls;
126 query->begin_result = rctx->num_dma_calls;
129 query->begin_result = rctx->num_cp_dma_calls;
132 query->begin_result = rctx->num_vs_flushes;
135 query->begin_result = rctx->num_ps_flushes;
138 query->begin_result = rctx->num_cs_flushes;
141 query->begin_result = rctx->num_cb_cache_flushes;
144 query->begin_result = rctx->num_db_cache_flushes;
147 query->begin_result = rctx->num_L2_invalidates;
150 query->begin_result = rctx->num_L2_writebacks;
153 query->begin_result = rctx->num_resident_handles;
156 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
159 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
162 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
176 query->begin_result = 0;
185 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
186 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
190 ws_id = winsys_id_from_type(query->b.type);
191 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
192 query->begin_time = rctx->ws->query_value(rctx->ws,
196 ws_id = winsys_id_from_type(query->b.type);
197 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
198 query->begin_time = os_time_get_nano();
201 query->begin_result =
203 query->begin_time = os_time_get_nano();
226 query->begin_result = si_begin_counter(rctx->screen,
227 query->b.type);
230 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
233 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
236 query->begin_result =
246 unreachable("r600_query_sw_begin: bad query type");
255 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
258 switch(query->b.type) {
262 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
265 query->end_result = rctx->num_draw_calls;
268 query->end_result = rctx->num_decompress_calls;
271 query->end_result = rctx->num_mrt_draw_calls;
274 query->end_result = rctx->num_prim_restart_calls;
277 query->end_result = rctx->num_spill_draw_calls;
280 query->end_result = rctx->num_compute_calls;
283 query->end_result = rctx->num_spill_compute_calls;
286 query->end_result = rctx->num_dma_calls;
289 query->end_result = rctx->num_cp_dma_calls;
292 query->end_result = rctx->num_vs_flushes;
295 query->end_result = rctx->num_ps_flushes;
298 query->end_result = rctx->num_cs_flushes;
301 query->end_result = rctx->num_cb_cache_flushes;
304 query->end_result = rctx->num_db_cache_flushes;
307 query->end_result = rctx->num_L2_invalidates;
310 query->end_result = rctx->num_L2_writebacks;
313 query->end_result = rctx->num_resident_handles;
316 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
319 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
322 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
342 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
343 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
347 ws_id = winsys_id_from_type(query->b.type);
348 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
349 query->end_time = rctx->ws->query_value(rctx->ws,
353 ws_id = winsys_id_from_type(query->b.type);
354 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
355 query->end_time = os_time_get_nano();
358 query->end_result =
360 query->end_time = os_time_get_nano();
383 query->end_result = si_end_counter(rctx->screen,
384 query->b.type,
385 query->begin_result);
386 query->begin_result = 0;
389 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
392 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
395 query->end_result = rctx->last_tex_ps_draw_ratio;
398 query->end_result =
408 unreachable("r600_query_sw_end: bad query type");
419 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
421 switch (query->b.type) {
432 result->b = screen->fence_finish(screen, ctx, query->fence,
438 result->u64 = (query->end_result - query->begin_result) /
439 (query->end_time - query->begin_time);
443 result->u64 = (query->end_result - query->begin_result) * 100 /
444 (query->end_time - query->begin_time);
463 result->u64 = query->end_result - query->begin_result;
465 switch (query->b.type) {
490 struct r600_query_sw *query;
492 query = CALLOC_STRUCT(r600_query_sw);
493 if (!query)
496 query->b.type = query_type;
497 query->b.ops = &sw_query_ops;
499 return (struct pipe_query *)query;
505 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
506 struct r600_query_buffer *prev = query->buffer.previous;
508 /* Release all query buffers. */
516 r600_resource_reference(&query->buffer.buf, NULL);
517 r600_resource_reference(&query->workaround_buf, NULL);
522 struct r600_query_hw *query)
524 unsigned buf_size = MAX2(query->result_size,
537 if (!query->ops->prepare_buffer(sscreen, query, buf)) {
546 struct r600_query_hw *query,
558 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
559 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
560 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
567 num_results = buffer->b.b.width0 / query->result_size;
599 struct r600_query_hw *query,
603 struct r600_query_hw *query,
621 struct r600_query_hw *query)
623 query->buffer.buf = r600_new_query_buffer(sscreen, query);
624 if (!query->buffer.buf)
634 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
635 if (!query)
638 query->b.type = query_type;
639 query->b.ops = &query_hw_ops;
640 query->ops = &query_hw_default_hw_ops;
646 query->result_size = 16 * sscreen->info.num_render_backends;
647 query->result_size += 16; /* for the fence + alignment */
648 query->num_cs_dw_begin = 6;
649 query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(sscreen);
652 query->result_size = 24;
653 query->num_cs_dw_begin = 8;
654 query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(sscreen);
657 query->result_size = 16;
658 query->num_cs_dw_end = 8 + si_gfx_write_fence_dwords(sscreen);
659 query->flags = R600_QUERY_HW_FLAG_NO_START;
666 query->result_size = 32;
667 query->num_cs_dw_begin = 6;
668 query->num_cs_dw_end = 6;
669 query->stream = index;
673 query->result_size = 32 * R600_MAX_STREAMS;
674 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
675 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
679 query->result_size = 11 * 16;
680 query->result_size += 8; /* for the fence + alignment */
681 query->num_cs_dw_begin = 6;
682 query->num_cs_dw_end = 6 + si_gfx_write_fence_dwords(sscreen);
686 FREE(query);
690 if (!si_query_hw_init(sscreen, query)) {
691 FREE(query);
695 return (struct pipe_query *)query;
748 struct r600_query_hw *query,
754 switch (query->b.type) {
767 emit_sample_streamout(cs, va, query->stream);
795 radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
800 struct r600_query_hw *query)
804 if (!query->buffer.buf)
807 r600_update_occlusion_query_state(ctx, query->b.type, 1);
808 si_update_prims_generated_query_state((void*)ctx, query->b.type, 1);
810 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
813 /* Get a new query buffer if needed. */
814 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
816 *qbuf = query->buffer;
817 query->buffer.results_end = 0;
818 query->buffer.previous = qbuf;
819 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
820 if (!query->buffer.buf)
824 /* emit begin query */
825 va = query->buffer.buf->gpu_address + query->buffer.results_end;
827 query->ops->emit_start(ctx, query, query->buffer.buf, va);
829 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
833 struct r600_query_hw *query,
840 switch (query->b.type) {
857 emit_sample_streamout(cs, va, query->stream);
870 0, query->b.type);
874 unsigned sample_size = (query->result_size - 8) / 2;
888 radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
894 query->buffer.buf, fence_va, 0x80000000,
895 query->b.type);
899 struct r600_query_hw *query)
903 if (!query->buffer.buf)
907 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
908 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
911 /* emit end query */
912 va = query->buffer.buf->gpu_address + query->buffer.results_end;
914 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
916 query->buffer.results_end += query->result_size;
918 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
919 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
921 r600_update_occlusion_query_state(ctx, query->b.type, -1);
922 si_update_prims_generated_query_state((void*)ctx, query->b.type, -1);
948 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
953 if (!query)
960 if (query->workaround_buf) {
963 switch (query->b.type) {
993 if (query->workaround_buf) {
994 uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
995 emit_set_predicate(ctx, query->workaround_buf, va, op);
1002 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1009 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
1021 results_base += query->result_size;
1039 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1042 struct r600_query *rquery = (struct r600_query *)query;
1048 struct pipe_query *query)
1051 struct r600_query *rquery = (struct r600_query *)query;
1057 struct r600_query_hw *query)
1059 struct r600_query_buffer *prev = query->buffer.previous;
1061 /* Discard the old query buffers. */
1069 query->buffer.results_end = 0;
1070 query->buffer.previous = NULL;
1073 if (si_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1074 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1075 r600_resource_reference(&query->buffer.buf, NULL);
1076 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1078 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1079 r600_resource_reference(&query->buffer.buf, NULL);
1086 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1088 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1093 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1094 si_query_hw_reset_buffers(rctx, query);
1096 r600_resource_reference(&query->workaround_buf, NULL);
1098 r600_query_hw_emit_start(rctx, query);
1099 if (!query->buffer.buf)
1102 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1106 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1109 struct r600_query *rquery = (struct r600_query *)query;
1117 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1119 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1120 si_query_hw_reset_buffers(rctx, query);
1122 r600_query_hw_emit_stop(rctx, query);
1124 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1125 LIST_DELINIT(&query->list);
1127 if (!query->buffer.buf)
1223 struct r600_query_hw *query,
1229 switch (query->b.type) {
1331 struct pipe_query *query, boolean wait,
1335 struct r600_query *rquery = (struct r600_query *)query;
1341 struct pipe_query *query,
1349 struct r600_query *rquery = (struct r600_query *)query;
1355 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1358 util_query_clear_result(result, query->b.type);
1366 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1369 query->ops->clear_result(query, result);
1371 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1386 query->ops->add_result(sscreen, query, map + results_base,
1388 results_base += query->result_size;
1402 * One compute grid with a single thread is launched for every query result
1404 * accumulates data from the query result buffer, and writes the result either
1428 * BUFFER[0] = query result buffer
1637 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1663 if (query->buffer.previous) {
1672 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1675 consts.result_stride = query->result_size;
1700 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1701 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1703 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1704 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1706 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1707 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1724 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1725 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1727 consts.result_count = qbuf->results_end / query->result_size;
1729 if (qbuf != &query->buffer)
1738 params.start_offset += qbuf->results_end - query->result_size;
1757 if (wait && qbuf == &query->buffer) {
1764 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1779 struct pipe_query *query,
1784 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1787 if (query) {
1819 ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
1831 rctx->render_cond = query;
1835 rctx->set_atom_dirty(rctx, atom, query != NULL);
1840 struct r600_query_hw *query;
1842 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1843 r600_query_hw_emit_stop(ctx, query);
1851 struct r600_query_hw *query;
1854 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1856 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1860 * resumed query, which raises the bar in need_cs_space for
1863 num_dw += query->num_cs_dw_end;
1873 struct r600_query_hw *query;
1881 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1882 r600_query_hw_emit_start(ctx, query);