HomeSort by relevance Sort by last modified time
    Searched refs:batch (Results 201 - 225 of 709) sorted by null

1 2 3 4 5 6 7 891011>>

  /frameworks/base/cmds/incidentd/src/
Reporter.h 89 ReportRequestSet batch; member in class:android::os::incidentd::Reporter
95 // Run the report as described in the batch and args parameters.
  /hardware/interfaces/sensors/1.0/default/
Sensors.h 47 Return<Result> batch(
  /hardware/invensense/6515/libsensors_iio/
SensorBase.h 98 virtual int batch(int handle, int flags, int64_t period_ns, int64_t timeout);
  /hardware/invensense/65xx/libsensors_iio/
SensorBase.h 97 virtual int batch(int handle, int flags, int64_t period_ns, int64_t timeout);
  /hardware/libhardware/modules/sensors/dynamic_sensor/
sensors.h 52 int batch(int handle, int64_t sampling_period_ns,
  /packages/apps/Gallery2/src/com/android/gallery3d/ui/
SelectionManager.java 158 int batch = 50; local
162 int count = index + batch < total
163 ? batch
173 index += batch;
  /packages/apps/UnifiedEmail/src/com/android/mail/bitmap/
ContactResolver.java 85 // Start to process a new batch.
91 LogUtils.d(TAG, "ContactResolver << batch skip");
96 LogUtils.d(TAG, "ContactResolver >> batch start");
98 // Make a copy of the batch.
99 LinkedHashSet<ContactRequestHolder> batch = new LinkedHashSet<ContactRequestHolder>(mBatch); local
105 mTask = getContactResolverTask(batch);
111 LinkedHashSet<ContactRequestHolder> batch) {
112 return new ContactResolverTask(batch, mResolver, mCache, this);
130 * means that every ContactDrawable on the screen will add its ContactRequest to the batch in
135 * the event queue. Every time something is added to the batch as part of the same layout pass
    [all...]
  /prebuilts/go/darwin-x86/src/internal/trace/
order.go 19 batch int
45 // The high level idea is as follows. Events within an individual batch are in
48 // from each batch (frontier). Then choose subset that is "ready" to be merged,
94 if !batches[f.batch].selected {
95 panic("frontier batch is not selected")
97 batches[f.batch].selected = false
219 for _, batch := range m {
220 events = append(events, batch...)
  /prebuilts/go/linux-x86/src/internal/trace/
order.go 19 batch int
45 // The high level idea is as follows. Events within an individual batch are in
48 // from each batch (frontier). Then choose subset that is "ready" to be merged,
94 if !batches[f.batch].selected {
95 panic("frontier batch is not selected")
97 batches[f.batch].selected = false
219 for _, batch := range m {
220 events = append(events, batch...)
  /device/google/contexthub/sensorhal/
sensors.cpp 58 device.batch = BatchWrapper;
111 int SensorContext::batch( function in class:SensorContext
115 ALOGV("batch");
119 return h->batch(handle, sampling_period_ns, max_report_latency_ns);
185 return reinterpret_cast<SensorContext *>(dev)->batch(
286 int SensorContext::HubConnectionOperation::batch( function in class:SensorContext::HubConnectionOperation
351 int SensorContext::DynamicSensorManagerOperation::batch(int handle, int64_t sampling_period_ns, function in class:SensorContext::DynamicSensorManagerOperation
353 return mDynamicSensorManager->batch(handle, sampling_period_ns, max_report_latency_ns);
  /external/tensorflow/tensorflow/core/kernels/
depthwise_conv_op.cc 227 const int64 total_shards = args.batch * args.out_rows;
229 // Empirically tested to give reasonable performance boosts at batch size 1
230 // without reducing throughput at batch size 32.
284 "strides in the batch and depth dimensions."));
294 // [ batch, in_rows, in_cols, in_depth ]
338 // The first dimension for input is batch.
339 const int32 batch = input.dim_size(0); variable
349 ShapeFromFormat(data_format_, batch, out_rows, out_cols, out_depth);
361 << " Input: [" << batch << ", " << input_rows << ", " << input_cols
365 << ", pad_cols = " << pad_cols << ", output: [" << batch << ", "
    [all...]
pooling_ops_3d_sycl.h 33 SYCL3DPoolParams(const int depth, const int batch, const int in_planes,
40 batch_(batch),
57 SYCL3DPoolParams(const int depth, const int batch, const int in_planes,
63 : SYCL3DPoolParams(depth, batch, in_planes, in_rows, in_cols,
124 MaxPool3DSYCL(const int depth, const int batch, const int in_planes,
132 : p_(depth, batch, in_planes, in_rows, in_cols, out_planes, out_rows,
189 const int batch = GetTensorDim(tensor_in, data_format, 'N'); local
207 MaxPool3DSYCL<T> max_pool(depth, batch, in_planes, in_rows, in_cols,
235 MaxPool3DGradSYCL(const int depth, const int batch, const int in_planes,
245 : p_(depth, batch, in_planes, in_rows, in_cols, output_shape, window
355 const int batch = GetTensorDim(tensor_in, data_format, 'N'); local
595 const int batch = GetTensorDim(tensor_in, data_format, 'N'); local
727 const int batch = GetTensorDim(tensor_in_shape, data_format, 'N'); local
    [all...]
maxpooling_op_gpu.cu.cc 60 // const int output_size = batch * channels * pooled_height * pooled_width;
380 const int32* bottom_data, const int batch, const int height,
386 const int output_size = batch * channels * pooled_height * pooled_width;
398 const T* bottom_data, const int batch, const int height, const int width,
404 const int output_size = batch * channels * pooled_height * pooled_width;
425 const T* bottom_data, const int batch, const int height, const int width,
432 const int bottom_size = batch * channels * height * width;
436 const int top_size = batch * channels * pooled_height * pooled_width;
463 const int batch, const int pooled_height, const int pooled_width,
468 const int num_kernels = batch * channels * pooled_height * pooled_width
    [all...]
depthwise_conv_op_gpu.cu.cc 96 const int batch = thread_id / out_depth / out_width / out_height;
110 const int input_offset_temp = in_height * batch;
178 const int num_batches = args.batch;
247 const int batch = b / batch_blocks;
248 const int block = b - batch * batch_blocks;
252 const int inout_offset = batch * in_size + filter_offset;
338 const int batch = thread_id / out_width / out_height / out_depth;
354 // for each sample in the batch.
364 // pixels for a given batch and input depth. The following
371 (batch * in_depth + in_channel) * (in_height * in_width)
    [all...]
  /external/iptables/iptables/
nft.c 88 struct mnl_nlmsg_batch *batch; member in struct:batch_page
91 /* selected batch page is 256 Kbytes long to load ruleset of
101 /* libmnl needs higher buffer to handle batch overflows */
110 mnl_nftnl_batch_page_add(struct mnl_nlmsg_batch *batch)
118 batch_page->batch = batch;
150 free(batch_page->batch);
174 iov[i].iov_base = mnl_nlmsg_batch_head(batch_page->batch);
175 iov[i].iov_len = mnl_nlmsg_batch_size(batch_page->batch);
179 mnl_nlmsg_batch_head(batch_page->batch),
    [all...]
  /developers/build/prebuilts/gradle/BasicSyncAdapter/Application/src/main/java/com/example/android/basicsyncadapter/
SyncAdapter.java 189 * <p>As an additional optimization, we use a batch operation to perform all database writes at
212 ArrayList<ContentProviderOperation> batch = new ArrayList<ContentProviderOperation>(); local
252 batch.add(ContentProviderOperation.newUpdate(existingUri)
266 batch.add(ContentProviderOperation.newDelete(deleteUri).build());
275 batch.add(ContentProviderOperation.newInsert(FeedContract.Entry.CONTENT_URI)
283 Log.i(TAG, "Merge solution ready. Applying batch update");
284 mContentResolver.applyBatch(FeedContract.CONTENT_AUTHORITY, batch);
  /developers/samples/android/connectivity/sync/BasicSyncAdapter/Application/src/main/java/com/example/android/basicsyncadapter/
SyncAdapter.java 189 * <p>As an additional optimization, we use a batch operation to perform all database writes at
212 ArrayList<ContentProviderOperation> batch = new ArrayList<ContentProviderOperation>(); local
252 batch.add(ContentProviderOperation.newUpdate(existingUri)
266 batch.add(ContentProviderOperation.newDelete(deleteUri).build());
275 batch.add(ContentProviderOperation.newInsert(FeedContract.Entry.CONTENT_URI)
283 Log.i(TAG, "Merge solution ready. Applying batch update");
284 mContentResolver.applyBatch(FeedContract.CONTENT_AUTHORITY, batch);
  /development/samples/browseable/BasicSyncAdapter/src/com.example.android.basicsyncadapter/
SyncAdapter.java 189 * <p>As an additional optimization, we use a batch operation to perform all database writes at
212 ArrayList<ContentProviderOperation> batch = new ArrayList<ContentProviderOperation>(); local
252 batch.add(ContentProviderOperation.newUpdate(existingUri)
266 batch.add(ContentProviderOperation.newDelete(deleteUri).build());
275 batch.add(ContentProviderOperation.newInsert(FeedContract.Entry.CONTENT_URI)
283 Log.i(TAG, "Merge solution ready. Applying batch update");
284 mContentResolver.applyBatch(FeedContract.CONTENT_AUTHORITY, batch);
  /external/mesa3d/src/mesa/drivers/dri/i965/
brw_misc_state.c 90 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
93 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
97 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
99 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
101 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
103 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
638 * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
    [all...]
  /external/mesa3d/src/gallium/drivers/freedreno/a2xx/
fd2_emit.c 187 struct fd_ringbuffer *ring = ctx->batch->draw;
253 ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
254 ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
255 ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
256 ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
  /external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/
math_utils.py 51 covariance_matrix: A [..., N, N] batch of covariance matrices.
75 matrices with the same batch dimension).
167 matrices: [batch size x N x N]
168 powers: Which integer power to raise each matrix to [batch size]
194 the (unrolled) recursive function? [batch size x N x N]
201 computation. Does not change parts of the batch which have a residual
215 # Stop updating if we've reached our base case; some batch elements may
242 def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
243 """Multiply a batch of matrices by a single matrix.
246 tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0)
    [all...]
  /external/mesa3d/src/intel/vulkan/
anv_private.h 669 /* Bytes actually consumed in this batch BO */
685 * that the batch runs out of space.
691 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
692 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
693 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
696 struct anv_batch *batch);
704 _anv_combine_address(struct anv_batch *batch, void *location,
710 assert(batch->start <= location && location < batch->end);
712 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta)
1207 struct anv_batch batch; member in struct:anv_cmd_buffer
1433 struct anv_batch batch; member in struct:anv_pipeline
    [all...]
anv_device.c 811 struct anv_batch *batch)
821 /* Kernel driver requires 8 byte aligned batch length */
822 size = align_u32(batch->next - batch->start, 8);
827 memcpy(bo.map, batch->start, size);
1228 struct anv_batch batch; local
1557 struct anv_batch batch; local
    [all...]
  /external/swiftshader/src/Renderer/
Renderer.hpp 222 int (Renderer::*setupPrimitives)(int batch, int count);
417 int setupSolidTriangles(int batch, int count);
418 int setupWireframeTriangle(int batch, int count);
419 int setupVertexTriangle(int batch, int count);
420 int setupLines(int batch, int count);
421 int setupPoints(int batch, int count);
  /external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
state_saving_rnn_estimator.py 74 # Tensor of batch-major order.
96 containing the length of each sequence in the batch. If `None`, sequences
246 """Reads a batch from a state saving sequence queue.
276 batch: A `NextQueuedSequenceBatch` containing batch_size `SequenceExample`
470 batch = _read_batch(
482 sequence_features = batch.sequences
483 context_features = batch.context
493 state_saver=batch,
500 loss = _multi_value_loss(rnn_activations, labels, batch.length,
507 batch.length, prediction_dict, labels
    [all...]

Completed in 562 milliseconds

1 2 3 4 5 6 7 891011>>