/external/mesa3d/src/gallium/drivers/freedreno/a5xx/ |
fd5_gmem.c | 211 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode) 214 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) { 215 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i); 218 util_dynarray_resize(&batch->draw_patches, 0); 223 fd5_emit_tile_init(struct fd_batch *batch) 225 struct fd_ringbuffer *ring = batch->gmem; 227 fd5_emit_restore(batch, ring); 242 fd_wfi(batch, ring); 250 fd5_set_render_mode(batch->ctx, ring, GMEM); 255 fd5_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile [all...] |
fd5_draw.h | 40 fd5_draw(struct fd_batch *batch, struct fd_ringbuffer *ring, 62 &batch->draw_patches); 76 fd_reset_wfi(batch); 80 fd5_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring, 91 struct pipe_index_buffer *idx = &batch->ctx->indexbuf; 108 fd5_draw(batch, ring, primtype, vismode, src_sel,
|
/external/mesa3d/src/intel/vulkan/ |
genX_state.c | 42 struct anv_batch batch; local 45 batch.start = batch.next = cmds; 46 batch.end = (void *) cmds + sizeof(cmds); 48 anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) { 55 anv_batch_emit(&batch, GENX(3DSTATE_VF_STATISTICS), vfs) 58 anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa); 60 anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) { 70 anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck); 75 anv_batch_emit(&batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) [all...] |
genX_gpu_memcpy.c | 92 dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_VERTEX_BUFFERS)); 93 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, dw + 1, 108 dw = anv_batch_emitn(&cmd_buffer->batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS)); 109 GENX(VERTEX_ELEMENT_STATE_pack)(&cmd_buffer->batch, dw + 1, 122 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_SGVS), sgvs); 126 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VS), vs); 127 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HS), hs); 128 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_TE), te); 129 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DS), DS); 130 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_GS), gs) [all...] |
anv_batch_chain.c | 40 * the actual batch buffers as well as link them together and handle 196 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords) 198 if (batch->next + num_dwords * 4 > batch->end) 199 batch->extend_cb(batch, batch->user_data); 201 void *p = batch->next; 203 batch->next += num_dwords * 4; 204 assert(batch->next <= batch->end) 472 struct anv_batch *batch = &cmd_buffer->batch; local 1230 struct anv_batch *batch = &cmd_buffer->batch; local [all...] |
/external/autotest/client/site_tests/security_ptraceRestrictions/src/ |
ptrace-restrictions.sh | 39 OUT=$(gdb -ex run -ex quit --batch ./sleeper </dev/null 2>&1) 50 OUT=$(gdb -ex "attach $pid" -ex "quit" --batch </dev/null 2>&1) 67 OUT=$(gdb -ex "attach 1" -ex "quit" --batch </dev/null 2>&1) 89 OUT=$(gdb -ex "attach $pid" -ex "quit" --batch </dev/null 2>&1) 101 OUT=$(gdb -ex "attach $pid" -ex "quit" --batch </dev/null 2>&1) 113 OUT=$(gdb -ex "attach $pid" -ex "quit" --batch </dev/null 2>&1) 125 OUT=$(gdb -ex "attach $pid" -ex "quit" --batch </dev/null 2>&1)
|
/external/tensorflow/tensorflow/core/kernels/batching_util/ |
basic_batch_scheduler_test.cc | 60 auto callback = [&callback_called](std::unique_ptr<Batch<FakeTask>> batch) { 62 ASSERT_TRUE(batch->IsClosed()); 63 ASSERT_EQ(2, batch->num_tasks()); 64 EXPECT_EQ(3, batch->task(0).size()); 65 EXPECT_EQ(5, batch->task(1).size());
|
adaptive_shared_batch_scheduler_test.cc | 72 [&queue_0_tasks](std::unique_ptr<Batch<FakeTask>> batch) { 73 ASSERT_TRUE(batch->IsClosed()); 74 EXPECT_GT(batch->num_tasks(), 0); 75 for (int i = 0; i < batch->num_tasks(); i++) { 76 queue_0_tasks += batch->task(i).size(); 81 [&queue_1_tasks](std::unique_ptr<Batch<FakeTask>> batch) { 82 ASSERT_TRUE(batch->IsClosed()); 83 EXPECT_GT(batch->num_tasks(), 0) [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
eigen_softmax_test.cc | 30 const int batch = 32; local 33 Tensor<float, 2> input(depth, batch); 36 Tensor<float, 2> reference(depth, batch); 46 dims2d[1] = batch; 59 for (int j = 0; j < batch; ++j) {
|
matrix_inverse_op.cc | 167 // For small matrices or very large batch sizes, we use the batched 176 for (int batch = 0; batch < batch_size; ++batch) { 177 input_copy_ptr_array_base[batch] = &input_copy_reshaped(batch, 0, 0); 178 output_ptr_array_base[batch] = &output_reshaped(batch, 0, 0); 193 // For larger matrices and large batch size, we used the batched 213 // For large matrices, we compute the inverse of each matrix in the batch [all...] |
matrix_solve_op.cc | 218 // For small matrices or large batch sizes, we use the batched 222 for (int batch = 0; batch < batch_size; ++batch) { 223 input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0); 233 // For small batch sizes we use the non-batched interface from cuSolver, 236 for (int batch = 0; batch < batch_size; ++batch) { [all...] |
eigen_attention_test.cc | 30 const ptrdiff_t batch = 10; local 36 Tensor<float, 4> input(depth, rows, cols, batch); 40 offsets.resize(batch); 41 for (int i = 0; i < batch; ++i) { 46 Tensor<float, 4> result(depth, glimpse_rows, glimpse_cols, batch); 49 for (int b = 0; b < batch; ++b) { 66 const ptrdiff_t batch = 10; local 72 Tensor<float, 4> input(depth, rows, cols, batch); 76 offsets.resize(batch); 77 for (int i = 0; i < batch; ++i) [all...] |
qr_op_impl.h | 205 for (int batch = 0; batch < batch_size; ++batch) { 208 solver->Geqrf(m, n, &input_transposed_reshaped(batch, 0, 0), m, 209 &tau_matrix(batch, 0), 210 dev_info.back().mutable_data() + batch), 224 for (int batch = 0; batch < batch_size; ++batch) { 229 &input_transposed_reshaped(batch, 0, 0), m, &beta [all...] |
bias_op_gpu.h | 32 T* output, int32 batch, int32 height, int32 width, 39 T* bias_backprop, int32 batch, int32 height, int32 width,
|
/external/mesa3d/src/gallium/drivers/freedreno/ |
freedreno_batch_cache.h | 59 #define foreach_batch(batch, cache, mask) \ 60 for (uint32_t _m = (mask); _m && ((batch) = (cache)->batches[u_bit_scan(&_m)]); _m &= (mask)) 68 void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
|
freedreno_gmem.h | 64 void fd_gmem_render_tiles(struct fd_batch *batch); 65 void fd_gmem_render_noop(struct fd_batch *batch); 67 bool fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile,
|
freedreno_draw.h | 45 fd_draw(struct fd_batch *batch, struct fd_ringbuffer *ring, 62 if (is_a3xx_p0(batch->ctx->screen)) { 84 &batch->draw_patches); 96 fd_reset_wfi(batch); 115 fd_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring, 126 struct pipe_index_buffer *idx = &batch->ctx->indexbuf; 143 fd_draw(batch, ring, primtype, vismode, src_sel,
|
/external/mesa3d/src/intel/blorp/ |
blorp.h | 64 void (*exec)(struct blorp_batch *batch, const struct blorp_params *params); 87 void blorp_batch_init(struct blorp_context *blorp, struct blorp_batch *batch, 89 void blorp_batch_finish(struct blorp_batch *batch); 111 blorp_blit(struct blorp_batch *batch, 125 blorp_copy(struct blorp_batch *batch, 135 blorp_fast_clear(struct blorp_batch *batch, 141 blorp_clear(struct blorp_batch *batch, 150 blorp_clear_depth_stencil(struct blorp_batch *batch, 165 blorp_gen8_hiz_clear_attachments(struct blorp_batch *batch, 172 blorp_clear_attachments(struct blorp_batch *batch, [all...] |
/external/tensorflow/tensorflow/contrib/boosted_trees/python/ops/ |
batch_ops_utils.py | 39 """Returns the function that executes the operation on the batch.""" 52 # one batch. So we use (device, operation) as the key. 79 def _scheduled_stamp_resource_op_runner(batch, stamp): 80 """Runs a batch operation on a stamped resource.""" 81 if not batch: 83 arg_keys = set(batch[0].args.keys()) 87 for op in batch: 98 return batch[0].op(resource_handles, stamp, **grouped_args) 102 """Given a dictionary of ops for each handler, runs them in batch.""" 111 for batch in batched_ops.values() [all...] |
/system/libhidl/transport/allocator/1.0/default/ |
AshmemAllocator.cpp | 68 hidl_vec<hidl_memory> batch; local 69 batch.resize(count); 73 batch[allocated] = allocateOne(size); 75 if (batch[allocated].handle() == nullptr) { 81 // batch[i].handle() != nullptr for i in [0, allocated - 1]. 82 // batch[i].handle() == nullptr for i in [allocated, count - 1]. 87 _hidl_cb(true /* success */, batch); 91 cleanup(std::move(batch[i]));
|
/external/mesa3d/src/gallium/drivers/freedreno/a4xx/ |
fd4_gmem.c | 135 use_hw_binning(struct fd_batch *batch) 137 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem; 138 struct pipe_framebuffer_state *pfb = &batch->framebuffer; 150 emit_gmem2mem_surf(struct fd_batch *batch, bool stencil, 153 struct fd_ringbuffer *ring = batch->gmem; 183 fd4_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY, 188 fd4_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile) 190 struct fd_context *ctx = batch->ctx; 192 struct fd_ringbuffer *ring = batch->gmem; 193 struct pipe_framebuffer_state *pfb = &batch->framebuffer [all...] |
/external/mesa3d/src/gallium/drivers/freedreno/a2xx/ |
fd2_gmem.c | 59 emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base, 62 struct fd_ringbuffer *ring = batch->gmem; 93 fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY, 98 fd2_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile) 100 struct fd_context *ctx = batch->ctx; 102 struct fd_ringbuffer *ring = batch->gmem; 103 struct pipe_framebuffer_state *pfb = &batch->framebuffer; 163 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) 164 emit_gmem2mem_surf(batch, tile->bin_w * tile->bin_h, pfb->zsbuf); 166 if (batch->resolve & FD_BUFFER_COLOR [all...] |
/system/libhidl/transport/allocator/1.0/ |
IAllocator.hal | 39 * @return batch Unmapped memory objects. 41 batchAllocate(uint64_t size, uint64_t count) generates (bool success, vec<memory> batch);
|
/external/mesa3d/src/gallium/drivers/freedreno/a3xx/ |
fd3_gmem.c | 131 use_hw_binning(struct fd_batch *batch) 133 struct fd_gmem_stateobj *gmem = &batch->ctx->gmem; 156 static void update_vsc_pipe(struct fd_batch *batch); 158 emit_binning_workaround(struct fd_batch *batch) 160 struct fd_context *ctx = batch->ctx; 162 struct fd_ringbuffer *ring = batch->gmem; 259 fd_wfi(batch, ring); 286 fd_reset_wfi(batch); 294 fd_wfi(batch, ring); 311 emit_gmem2mem_surf(struct fd_batch *batch, [all...] |
/packages/apps/QuickSearchBox/src/com/android/quicksearchbox/util/ |
BatchingNamedTaskExecutor.java | 61 * Instructs the executor to submit the next batch of results. 65 NamedTask[] batch = new NamedTask[0]; local 69 batch = nextTasks.toArray(batch); 71 if (DBG) Log.d(TAG, "Dispatching batch of " + count); 74 for (NamedTask task : batch) {
|