Lines Matching defs:query
60 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
62 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
63 FREE(query);
88 default: unreachable("query type does not correspond to winsys id");
95 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
98 switch(query->b.type) {
103 query->begin_result = rctx->num_draw_calls;
106 query->begin_result = rctx->num_decompress_calls;
109 query->begin_result = rctx->num_mrt_draw_calls;
112 query->begin_result = rctx->num_prim_restart_calls;
115 query->begin_result = rctx->num_spill_draw_calls;
118 query->begin_result = rctx->num_compute_calls;
121 query->begin_result = rctx->num_spill_compute_calls;
124 query->begin_result = rctx->num_dma_calls;
127 query->begin_result = rctx->num_cp_dma_calls;
130 query->begin_result = rctx->num_vs_flushes;
133 query->begin_result = rctx->num_ps_flushes;
136 query->begin_result = rctx->num_cs_flushes;
139 query->begin_result = rctx->num_cb_cache_flushes;
142 query->begin_result = rctx->num_db_cache_flushes;
145 query->begin_result = rctx->num_resident_handles;
148 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
151 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
154 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
167 query->begin_result = 0;
175 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
176 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
180 ws_id = winsys_id_from_type(query->b.type);
181 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
182 query->begin_time = rctx->ws->query_value(rctx->ws,
186 ws_id = winsys_id_from_type(query->b.type);
187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188 query->begin_time = os_time_get_nano();
191 query->begin_result =
193 query->begin_time = os_time_get_nano();
216 query->begin_result = r600_begin_counter(rctx->screen,
217 query->b.type);
220 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
223 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
226 query->begin_result =
236 unreachable("r600_query_sw_begin: bad query type");
245 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
248 switch(query->b.type) {
252 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
255 query->end_result = rctx->num_draw_calls;
258 query->end_result = rctx->num_decompress_calls;
261 query->end_result = rctx->num_mrt_draw_calls;
264 query->end_result = rctx->num_prim_restart_calls;
267 query->end_result = rctx->num_spill_draw_calls;
270 query->end_result = rctx->num_compute_calls;
273 query->end_result = rctx->num_spill_compute_calls;
276 query->end_result = rctx->num_dma_calls;
279 query->end_result = rctx->num_cp_dma_calls;
282 query->end_result = rctx->num_vs_flushes;
285 query->end_result = rctx->num_ps_flushes;
288 query->end_result = rctx->num_cs_flushes;
291 query->end_result = rctx->num_cb_cache_flushes;
294 query->end_result = rctx->num_db_cache_flushes;
297 query->end_result = rctx->num_resident_handles;
300 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
303 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
306 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
325 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
326 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
330 ws_id = winsys_id_from_type(query->b.type);
331 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
332 query->end_time = rctx->ws->query_value(rctx->ws,
336 ws_id = winsys_id_from_type(query->b.type);
337 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
338 query->end_time = os_time_get_nano();
341 query->end_result =
343 query->end_time = os_time_get_nano();
366 query->end_result = r600_end_counter(rctx->screen,
367 query->b.type,
368 query->begin_result);
369 query->begin_result = 0;
372 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
375 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
378 query->end_result =
388 unreachable("r600_query_sw_end: bad query type");
399 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
401 switch (query->b.type) {
412 result->b = screen->fence_finish(screen, ctx, query->fence,
418 result->u64 = (query->end_result - query->begin_result) /
419 (query->end_time - query->begin_time);
423 result->u64 = (query->end_result - query->begin_result) * 100 /
424 (query->end_time - query->begin_time);
443 result->u64 = query->end_result - query->begin_result;
445 switch (query->b.type) {
470 struct r600_query_sw *query;
472 query = CALLOC_STRUCT(r600_query_sw);
473 if (!query)
476 query->b.type = query_type;
477 query->b.ops = &sw_query_ops;
479 return (struct pipe_query *)query;
485 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
486 struct r600_query_buffer *prev = query->buffer.previous;
488 /* Release all query buffers. */
496 r600_resource_reference(&query->buffer.buf, NULL);
501 struct r600_query_hw *query)
503 unsigned buf_size = MAX2(query->result_size,
516 query->ops->prepare_buffer(rscreen, query, buf)) {
525 struct r600_query_hw *query,
537 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
538 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
545 num_results = buffer->b.b.width0 / query->result_size;
577 struct r600_query_hw *query,
581 struct r600_query_hw *query,
599 struct r600_query_hw *query)
601 query->buffer.buf = r600_new_query_buffer(rscreen, query);
602 if (!query->buffer.buf)
612 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
613 if (!query)
616 query->b.type = query_type;
617 query->b.ops = &query_hw_ops;
618 query->ops = &query_hw_default_hw_ops;
623 query->result_size = 16 * rscreen->info.num_render_backends;
624 query->result_size += 16; /* for the fence + alignment */
625 query->num_cs_dw_begin = 6;
626 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
629 query->result_size = 24;
630 query->num_cs_dw_begin = 8;
631 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
634 query->result_size = 16;
635 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
636 query->flags = R600_QUERY_HW_FLAG_NO_START;
643 query->result_size = 32;
644 query->num_cs_dw_begin = 6;
645 query->num_cs_dw_end = 6;
646 query->stream = index;
650 query->result_size = 32 * R600_MAX_STREAMS;
651 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
652 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
656 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
657 query->result_size += 8; /* for the fence + alignment */
658 query->num_cs_dw_begin = 6;
659 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
663 FREE(query);
667 if (!r600_query_hw_init(rscreen, query)) {
668 FREE(query);
672 return (struct pipe_query *)query;
724 struct r600_query_hw *query,
730 switch (query->b.type) {
742 emit_sample_streamout(cs, va, query->stream);
754 NULL, va, 0, query->b.type);
765 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
770 struct r600_query_hw *query)
774 if (!query->buffer.buf)
777 r600_update_occlusion_query_state(ctx, query->b.type, 1);
778 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
780 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
783 /* Get a new query buffer if needed. */
784 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
786 *qbuf = query->buffer;
787 query->buffer.results_end = 0;
788 query->buffer.previous = qbuf;
789 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
790 if (!query->buffer.buf)
794 /* emit begin query */
795 va = query->buffer.buf->gpu_address + query->buffer.results_end;
797 query->ops->emit_start(ctx, query, query->buffer.buf, va);
799 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
803 struct r600_query_hw *query,
810 switch (query->b.type) {
826 emit_sample_streamout(cs, va, query->stream);
839 0, query->b.type);
843 unsigned sample_size = (query->result_size - 8) / 2;
857 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
863 query->buffer.buf, fence_va, 0x80000000,
864 query->b.type);
868 struct r600_query_hw *query)
872 if (!query->buffer.buf)
876 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
877 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
880 /* emit end query */
881 va = query->buffer.buf->gpu_address + query->buffer.results_end;
883 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
885 query->buffer.results_end += query->result_size;
887 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
888 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
890 r600_update_occlusion_query_state(ctx, query->b.type, -1);
891 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
910 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
915 if (!query)
922 switch (query->b.type) {
946 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
953 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
965 results_base += query->result_size;
983 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
986 struct r600_query *rquery = (struct r600_query *)query;
992 struct pipe_query *query)
995 struct r600_query *rquery = (struct r600_query *)query;
1001 struct r600_query_hw *query)
1003 struct r600_query_buffer *prev = query->buffer.previous;
1005 /* Discard the old query buffers. */
1013 query->buffer.results_end = 0;
1014 query->buffer.previous = NULL;
1017 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1018 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1019 r600_resource_reference(&query->buffer.buf, NULL);
1020 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1022 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1023 r600_resource_reference(&query->buffer.buf, NULL);
1030 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1032 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1037 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1038 r600_query_hw_reset_buffers(rctx, query);
1040 r600_query_hw_emit_start(rctx, query);
1041 if (!query->buffer.buf)
1044 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1048 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1051 struct r600_query *rquery = (struct r600_query *)query;
1059 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1061 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1062 r600_query_hw_reset_buffers(rctx, query);
1064 r600_query_hw_emit_stop(rctx, query);
1066 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1067 LIST_DELINIT(&query->list);
1069 if (!query->buffer.buf)
1164 struct r600_query_hw *query,
1170 switch (query->b.type) {
1290 struct pipe_query *query, boolean wait,
1294 struct r600_query *rquery = (struct r600_query *)query;
1300 struct pipe_query *query,
1308 struct r600_query *rquery = (struct r600_query *)query;
1314 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1317 util_query_clear_result(result, query->b.type);
1325 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1328 query->ops->clear_result(query, result);
1330 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1345 query->ops->add_result(rscreen, query, map + results_base,
1347 results_base += query->result_size;
1361 * One compute grid with a single thread is launched for every query result
1363 * accumulates data from the query result buffer, and writes the result either
1387 * BUFFER[0] = query result buffer
1596 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1622 if (query->buffer.previous) {
1631 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, ¶ms);
1634 consts.result_stride = query->result_size;
1659 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE)
1661 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1662 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1664 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1665 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1682 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1683 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1685 consts.result_count = qbuf->results_end / query->result_size;
1687 if (qbuf != &query->buffer)
1696 params.start_offset += qbuf->results_end - query->result_size;
1714 if (wait && qbuf == &query->buffer) {
1721 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1736 struct pipe_query *query,
1741 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1747 if (query) {
1755 rctx->render_cond = query;
1759 rctx->set_atom_dirty(rctx, atom, query != NULL);
1764 struct r600_query_hw *query;
1766 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1767 r600_query_hw_emit_stop(ctx, query);
1775 struct r600_query_hw *query;
1778 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1780 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1784 * resumed query, which raises the bar in need_cs_space for
1787 num_dw += query->num_cs_dw_end;
1789 /* primitives generated query */
1799 struct r600_query_hw *query;
1807 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1808 r600_query_hw_emit_start(ctx, query);
1827 * there's 8, trying to read results from query buffers never
1838 * if backend_map query is supported by the kernel.