HomeSort by relevance Sort by last modified time
    Searched defs:batch (Results 26 - 50 of 256) sorted by null

12 3 4 5 6 7 8 91011

  /external/mesa3d/src/gallium/drivers/freedreno/
freedreno_draw.c 43 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
47 fd_batch_resource_used(batch, fd_resource(prsc), false);
51 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
55 fd_batch_resource_used(batch, fd_resource(prsc), true);
62 struct fd_batch *batch = ctx->batch; local
63 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
88 fd_batch_reset(batch);
92 batch->blit = ctx->in_blit;
93 batch->back_blit = ctx->in_shadow
284 struct fd_batch *batch = ctx->batch; local
    [all...]
freedreno_batch.c 38 batch_init(struct fd_batch *batch)
40 struct fd_context *ctx = batch->ctx;
44 util_queue_fence_init(&batch->flush_fence);
56 batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
57 batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
58 batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
60 fd_ringbuffer_set_parent(batch->gmem, NULL);
61 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
62 fd_ringbuffer_set_parent(batch->binning, batch->gmem)
92 struct fd_batch *batch = CALLOC_STRUCT(fd_batch); local
239 struct fd_batch *batch = job; local
248 struct fd_batch *batch = job; local
    [all...]
freedreno_batch_cache.c 39 * The batch cache provides lookup for mapping pipe_framebuffer_state
40 * to a batch.
45 * Batch Cache hashtable key:
52 * Batch:
54 * Each batch needs to hold a reference to each resource it depends on (ie.
59 * When a resource is destroyed, we need to remove entries in the batch
64 * When a batch has weak reference to no more resources (ie. all the
65 * surfaces it rendered to are destroyed) the batch can be destroyed.
68 * surfaces are destroyed before the batch is submitted.
70 * If (for example), batch writes to zsbuf but that surface is destroye
136 struct fd_batch *batch = NULL; local
159 struct fd_batch *batch; local
208 struct fd_batch *batch; local
233 struct fd_batch *batch; local
315 struct fd_batch *batch = NULL; local
    [all...]
  /external/mesa3d/src/mesa/drivers/dri/i915/
intel_batchbuffer.h 15 * Number of bytes to reserve for commands necessary to complete a batch.
19 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
76 return (intel->batch.bo->size - intel->batch.reserved_space)
77 - intel->batch.used*4;
87 intel->batch.map[intel->batch.used++] = dword;
112 intel->batch.emit = intel->batch.used;
114 intel->batch.total = n
122 struct intel_batchbuffer *batch = &intel->batch; local
    [all...]
  /external/mesa3d/src/mesa/drivers/dri/i965/
intel_batchbuffer.h 14 * Number of bytes to reserve for commands necessary to complete a batch.
18 * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
42 void intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
44 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
66 uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
72 uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
79 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
98 intel_batchbuffer_space(struct intel_batchbuffer *batch)
136 struct intel_batchbuffer *batch = &brw->batch; local
    [all...]
intel_batchbuffer.c 39 intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
43 intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
46 intel_batchbuffer_reset(batch, bufmgr, has_llc);
49 batch->cpu_map = malloc(BATCH_SZ);
50 batch->map = batch->cpu_map;
51 batch->map_next = batch->cpu_map;
56 intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
59 if (batch->last_bo != NULL)
146 struct intel_batchbuffer *batch = &brw->batch; local
324 struct intel_batchbuffer *batch = &brw->batch; local
    [all...]
  /external/tensorflow/tensorflow/compiler/tf2xla/kernels/
lrn_ops.cc 92 const int64 batch = in_grads_shape.dim_size(0); variable
97 ctx, in_image_shape.dim_size(0) == batch &&
101 out_image_shape.dim_size(0) == batch &&
  /external/tensorflow/tensorflow/contrib/seq2seq/kernels/
beam_search_ops_gpu.cu.cc 35 const int32 batch = i / beam_width; local
39 Eigen::numext::mini(max_time, ldg(max_sequence_lengths + batch));
45 (batch_size * beam_width * (time_ix) + beam_width * batch + (beam_ix))
  /external/tensorflow/tensorflow/core/kernels/
adjust_contrast_op.h 33 const int batch = input.dimension(0); local
39 scalar_broadcast[0] = batch;
53 reshape_dims[0] = batch;
66 reshape_dims.set(0, batch);
95 const int batch = input.dimension(0); local
101 scalar_broadcast[0] = batch;
115 reshape_dims[0] = batch;
133 reshape_dims.set(0, batch);
deep_conv2d.h 69 int batch; member in struct:tensorflow::Conv2DArgs
84 : batch(0),
  /hardware/libhardware/modules/sensors/dynamic_sensor/
DummyDynamicAccelDaemon.cpp 130 int DummyDynamicAccelDaemon::DummySensor::batch(int64_t /*samplePeriod*/, int64_t /*batchPeriod*/) { function in class:android::SensorHalExt::DummyDynamicAccelDaemon::DummySensor
sensors.cpp 41 device.batch = BatchWrapper;
68 int SensorContext::batch( function in class:SensorContext
72 return mDynamicSensorManager->batch(handle, sampling_period_ns, max_report_latency_ns);
110 return reinterpret_cast<SensorContext *>(dev)->batch(
  /packages/apps/QuickSearchBox/src/com/android/quicksearchbox/util/
BatchingNamedTaskExecutor.java 61 * Instructs the executor to submit the next batch of results.
65 NamedTask[] batch = new NamedTask[0]; local
69 batch = nextTasks.toArray(batch);
71 if (DBG) Log.d(TAG, "Dispatching batch of " + count);
74 for (NamedTask task : batch) {
  /external/javasqlite/src/main/java/SQLite/JDBC2z/
JDBCStatement.java 12 private ArrayList<String> batch; field in class:JDBCStatement
18 this.batch = null;
177 if (batch == null) {
178 batch = new ArrayList<String>(1);
180 batch.add(sql);
184 if (batch == null) {
187 int[] ret = new int[batch.size()];
194 execute((String) batch.get(i));
201 throw new BatchUpdateException("batch failed", ret);
207 if (batch != null)
    [all...]
  /cts/tests/tests/location/src/android/location/cts/asn1/supl2/supl_triggered_response/
RepMode.java 40 batch(3), enum constant in enum:RepMode.Value
  /external/compiler-rt/lib/sanitizer_common/
sanitizer_quarantine.h 33 void *batch[kSize]; member in struct:__sanitizer::QuarantineBatch
104 CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
106 PREFETCH(b->batch[i]);
109 PREFETCH(b->batch[i + kPrefetch]);
110 cb.Recycle((Node*)b->batch[i]);
136 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
140 b->batch[b->count++] = ptr;
  /external/iproute2/ip/
ip.c 51 " ip [ -force ] -batch filename\n"
123 static int batch(const char *name) function
258 } else if (matches(opt, "-batch") == 0) {
308 return batch(batch_file);
  /external/iproute2/tc/
tc.c 220 " tc [-force] -batch filename\n"
252 static int batch(const char *name) function
331 } else if (matches(argv[1], "-batch") == 0) {
362 return batch(batch_file);
  /external/iptables/iptables/
nft.h 35 struct mnl_nlmsg_batch *batch; member in struct:nft_handle
  /external/mesa3d/src/gallium/winsys/i915/drm/
i915_drm_batchbuffer.c 30 i915_drm_batchbuffer(struct i915_winsys_batchbuffer *batch)
32 return (struct i915_drm_batchbuffer *)batch;
36 i915_drm_batchbuffer_reset(struct i915_drm_batchbuffer *batch)
38 struct i915_drm_winsys *idws = i915_drm_winsys(batch->base.iws);
40 if (batch->bo)
41 drm_intel_bo_unreference(batch->bo);
42 batch->bo = drm_intel_bo_alloc(idws->gem_manager,
44 batch->actual_size,
47 memset(batch->base.map, 0, batch->actual_size)
57 struct i915_drm_batchbuffer *batch = CALLOC_STRUCT(i915_drm_batchbuffer); local
100 struct i915_drm_batchbuffer *batch = i915_drm_batchbuffer(ibatch); local
165 struct i915_drm_batchbuffer *batch = i915_drm_batchbuffer(ibatch); local
221 struct i915_drm_batchbuffer *batch = i915_drm_batchbuffer(ibatch); local
    [all...]
  /external/tensorflow/tensorflow/contrib/data/python/ops/
dataset_ops.py 418 def batch(self, batch_size): member in class:Dataset
423 consecutive elements of this dataset to combine in a single batch.
441 consecutive elements of this dataset to combine in a single batch.
448 dimension in each batch.
666 .batch(BATCH_SIZE))
  /external/tensorflow/tensorflow/contrib/labeled_tensor/python/ops/
ops.py 430 if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
433 'called "batch" as their first dimension, '
435 culled_axes = axes.remove('batch')
436 return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
438 return core.Axes([('batch', batch_size)] + list(axes.values()))
452 def batch(labeled_tensors, function
461 See tf.batch.
465 batch_size: The output batch size.
466 num_threads: See tf.batch
    [all...]
  /external/tensorflow/tensorflow/core/grappler/costs/
analytical_cost_estimator_test.cc 51 const int batch = 1; local
61 s.WithOpName("image"), {batch, width, height, num_channels}, DT_FLOAT);
62 auto labels = ops::RandomUniform(s.WithOpName("label"), {batch, num_labels},
73 {batch, width * height * conv_filters});
op_level_cost_estimator.h 64 int64 batch; // Batch size. member in struct:tensorflow::grappler::OpLevelCostEstimator::ConvolutionDimensions
utils_test.cc 59 int batch = 32; local
74 CreateConstOp("input", {batch, rows, cols, in_depth}, input);
81 CreateConstOp("output_backprop", {batch, out_rows, out_cols, out_depth},
86 std::vector<int32>({batch, rows, cols, in_depth}),

Completed in 609 milliseconds

12 3 4 5 6 7 8 91011