HomeSort by relevance Sort by last modified time
    Searched refs:divup (Results 1 - 16 of 16) sorted by null

  /external/tensorflow/tensorflow/core/kernels/
eigen_pooling.h 96 post_reduce_dims[idxRows] = Eigen::divup(
99 post_reduce_dims[idxCols] = Eigen::divup(
103 post_reduce_dims[idxRows] = Eigen::divup(
105 post_reduce_dims[idxCols] = Eigen::divup(
209 post_reduce_dims[idxPlanes] = Eigen::divup(
212 post_reduce_dims[idxRows] = Eigen::divup(
215 post_reduce_dims[idxCols] = Eigen::divup(
219 post_reduce_dims[idxPlanes] = Eigen::divup(
221 post_reduce_dims[idxRows] = Eigen::divup(
223 post_reduce_dims[idxCols] = Eigen::divup(
    [all...]
eigen_cuboid_convolution.h 126 out_depth = Eigen::divup(inputPlanes - kernelDepth + 1,
128 out_height = Eigen::divup(inputRows - kernelRows + 1,
130 out_width = Eigen::divup(inputCols - kernelCols + 1,
135 Eigen::divup(inputPlanes, static_cast<TensorIndex>(stridePlanes));
137 Eigen::divup(inputRows, static_cast<TensorIndex>(strideRows));
138 out_width = Eigen::divup(inputCols, static_cast<TensorIndex>(strideCols));
eigen_backward_cuboid_convolutions.h 130 Eigen::divup(inputPlanes, static_cast<TensorIndex>(stridePlanes));
132 Eigen::divup(inputRows, static_cast<TensorIndex>(strideRows));
134 Eigen::divup(inputCols, static_cast<TensorIndex>(strideCols));
416 Eigen::divup(inputPlanes, static_cast<TensorIndex>(stridePlanes));
418 Eigen::divup(inputRows, static_cast<TensorIndex>(strideRows));
420 Eigen::divup(inputCols, static_cast<TensorIndex>(strideCols));
eigen_volume_patch.h 98 m_outputPlanes = Eigen::divup(
103 m_outputRows = Eigen::divup(
108 m_outputCols = Eigen::divup(
120 m_outputPlanes = Eigen::divup(
122 m_outputRows = Eigen::divup(m_input_rows_eff - m_patch_rows_eff + 1,
124 m_outputCols = Eigen::divup(m_input_cols_eff - m_patch_cols_eff + 1,
131 m_outputPlanes = Eigen::divup(m_input_planes_eff, m_plane_strides);
132 m_outputRows = Eigen::divup(m_input_rows_eff, m_row_strides);
133 m_outputCols = Eigen::divup(m_input_cols_eff, m_col_strides);
segment_reduction_ops_gpu.cu.cc 156 Eigen::divup(input_outer_dim_size, Index(OuterDimTileSize));
softmax_op_gpu.cu.cc 120 const int numBlocks = Eigen::divup(rows * cols, numThreads);
parameterized_truncated_normal_op.cc 308 int32 adjusted_batches = Eigen::divup(size, adjusted_samples);
reduction_gpu_kernels.cu.h 461 const int num_blocks = std::min(32, Eigen::divup(in_size, num_threads));
558 dim3 block_dim(32, std::min(Eigen::divup(extent_x, rows_per_warp), 32), 1);
560 Eigen::divup(static_cast<unsigned int>(extent_x),
642 int num_blocks = Eigen::divup(extent_y, threads_per_block);
    [all...]
eigen_backward_spatial_convolutions_test.cc 751 const int output_rows = divup(input_rows - patch_rows + 1, stride);
752 const int output_cols = divup(input_cols - patch_cols + 1, stride);
827 const int output_rows = divup(input_rows - patch_rows + 1, stride);
828 const int output_cols = divup(input_cols - patch_cols + 1, stride);
    [all...]
  /external/eigen/unsupported/Eigen/CXX11/src/Tensor/
TensorDeviceThreadPool.h 207 Index block_count = divup(n, block_size);
212 (divup<int>(block_count, numThreads()) * numThreads());
218 Index coarser_block_size = divup(n, prev_block_count - 1);
228 const Index coarser_block_count = divup(n, coarser_block_size);
233 (divup<int>(coarser_block_count, numThreads()) * numThreads());
257 Index mid = first + divup((last - first) / 2, block_size) * block_size;
TensorContractionThreadPool.h 244 Index nm0 = divup(m, bm);
245 Index nn0 = divup(n, bn);
246 Index nk = divup(k, bk);
264 Index nm = divup(nm0, gm);
265 Index nn = divup(nn0, gn);
358 divup<size_t>(bm_ * bk_ * sizeof(LhsScalar), align) * align;
360 divup<size_t>(bn_ * bk_ * sizeof(RhsScalar), align) * align;
659 Index nm0 = divup(m, bm);
665 while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++;
671 nm1 = divup(nm0, gm1)
    [all...]
TensorReductionCuda.h 293 const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
320 const int num_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
386 const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread);
457 const Index input_col_blocks = divup<Index>(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2);
458 const Index num_input_blocks = divup<Index>(input_col_blocks * num_preserved_coeffs, 2);
561 const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
569 const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
607 const int dyn_blocks = divup<int>(num_coeffs, block_size * num_per_thread);
615 const int dyn_blocks = divup<int>(num_preserved_vals, 1024);
680 const Index max_iter = num_preserved_coeffs * divup<Index>(num_coeffs_to_reduce, NumPerThread)
    [all...]
TensorMeta.h 30 T divup(const X x, const Y y) { function in namespace:Eigen
36 T divup(const T x, const T y) { function in namespace:Eigen
TensorExecutor.h 258 const int num_blocks = numext::maxi<int>(numext::mini<int>(max_blocks, divup<int>(size, block_size)), 1);
  /external/tensorflow/tensorflow/contrib/rnn/kernels/
lstm_ops_gpu.cu.cc 198 Eigen::divup(batch_size * (cell_size + input_size), block_dim);
213 dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)),
214 Eigen::divup(cell_size, static_cast<int>(block_dim_2d.y)));
327 dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)),
328 Eigen::divup(cell_size, static_cast<int>(block_dim_2d.y)));
  /external/tensorflow/tensorflow/contrib/cudnn_rnn/kernels/
cudnn_rnn_ops.cc 281 Eigen::divup(byte_size, static_cast<int64>(sizeof(T)));
    [all...]

Completed in 336 milliseconds