HomeSort by relevance Sort by last modified time
    Searched defs:Compute (Results 26 - 50 of 78) sorted by null

12 3 4

  /external/tensorflow/tensorflow/core/kernels/
bincount_op.cc 38 static Status Compute(OpKernelContext* context,
94 void Compute(OpKernelContext* ctx) override {
110 OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, T>::Compute(
bucketize_op_gpu.cu.cc 82 static Status Compute(OpKernelContext* context,
cudnn_pooling_gpu.cc 34 void DnnPooling3dOp<T>::Compute(
117 void DnnPooling3dGradOp<T>::Compute(
histogram_op.cc 36 static Status Compute(OpKernelContext* context,
79 void Compute(OpKernelContext* ctx) override {
112 ctx, functor::HistogramFixedWidthFunctor<Device, T, Tout>::Compute(
histogram_op_gpu.cu.cc 40 static Status Compute(OpKernelContext* context,
matrix_diag_op.cc 49 void Compute(OpKernelContext* context) override {
75 functor::MatrixDiagPart<Device, T>::Compute(
88 void Compute(OpKernelContext* context) override {
111 functor::MatrixDiag<Device, T>::Compute(context->eigen_device<Device>(),
146 static void Compute(const CPUDevice& d,
160 static void Compute(const CPUDevice& d,
179 void MatrixDiag<GPUDevice, T>::Compute( \
184 void MatrixDiagPart<GPUDevice, T>::Compute( \
matrix_set_diag_op.cc 47 void Compute(OpKernelContext* context) override {
87 functor::MatrixSetDiag<Device, T>::Compute(
117 static void Compute(OpKernelContext* context, const CPUDevice& device,
147 void MatrixSetDiag<GPUDevice, T>::Compute( \
sendrecv_ops.cc 77 void SendOp::Compute(OpKernelContext* ctx) {
sparse_tensor_dense_matmul_op_gpu.cu.cc 68 Compute(const GPUDevice& d, typename TTypes<T>::Matrix out,
transpose_op.cc 47 void Compute(OpKernelContext* context) override {
144 void TransposeOp::Compute(OpKernelContext* ctx) {
bias_op.cc 97 void Compute(OpKernelContext* context) override {
147 Compute<2>(context, input, bias, output);
150 Compute<3>(context, input, bias, output);
153 Compute<4>(context, input, bias, output);
156 Compute<5>(context, input, bias, output);
167 void Compute(OpKernelContext* ctx, const Tensor& input, const Tensor& bias,
217 void Compute(OpKernelContext* context) override {
321 void Compute(OpKernelContext* context) override {
343 BiasGPU<T>::compute(context->template eigen_device<Device>(),
380 void Compute(OpKernelContext* context) override
    [all...]
compare_and_bitpack_op.cc 42 void Compute(OpKernelContext* c) override {
89 static EIGEN_STRONG_INLINE void Compute(typename TTypes<T>::ConstMatrix input,
109 static EIGEN_STRONG_INLINE void Compute(
149 ComputeShard<T>::Compute(input, output, thresh, start, limit);
constant_op.cc 83 void ConstantOp::Compute(OpKernelContext* ctx) {
147 void HostConstantOp::Compute(OpKernelContext* ctx) {
181 void Compute(OpKernelContext* context) override {
278 void Compute(OpKernelContext* ctx) override {
348 void Compute(OpKernelContext* ctx) override {
399 void PlaceholderOp::Compute(OpKernelContext* ctx) {
control_flow_ops.cc 26 void SwitchOp::Compute(OpKernelContext* context) {
173 void Compute(OpKernelContext* context) override {
215 void MergeOp::Compute(OpKernelContext* context) {
336 void EnterOp::Compute(OpKernelContext* context) {
431 void ExitOp::Compute(OpKernelContext* context) {
510 void NextIterationOp::Compute(OpKernelContext* context) {
598 void Compute(OpKernelContext* context) override {
646 void Compute(OpKernelContext* context) override {
linalg_ops_common.cc 89 void LinearAlgebraOp<Scalar>::Compute(OpKernelContext* context) {
129 // dimensions to compute the results.
pooling_ops_common.cc 141 void DnnPoolingOp<T>::Compute(
238 void DnnPoolingGradOp<T>::Compute(
sparse_xent_op.h 152 static void Compute(const Device& d, typename TTypes<T>::ConstMatrix logits,
topk_op.cc 46 } else { // k is an input (TopKV2), so we won't know it until Compute.
51 void Compute(OpKernelContext* context) override {
90 Status s = functor::TopKFunctor<Device, T>::Compute(
105 Compute(OpKernelContext* context, bool sorted, int k,
250 Status TopKFunctor<GPUDevice, T>::Compute( \
variable_ops.cc 55 void VariableOp::Compute(OpKernelContext* ctx) {
95 void Compute(OpKernelContext* context) override {
142 void Compute(OpKernelContext* context) override {
168 void Compute(OpKernelContext* context) override {
where_op.cc 72 static Status Compute(OpKernelContext* ctx, const CPUDevice& d,
92 EIGEN_ALWAYS_INLINE static Status Compute(
128 void Compute(OpKernelContext* context) override {
144 Status s = functor::NumTrue<CPUDevice, T, int64>::Compute(
160 Status s = functor::Where<CPUDevice, NDIM, T, int64>::Compute( \
208 Status NumTrue<GPUDevice, T, Tindex>::Compute( \
225 Status Where<GPUDevice, Dims, T, Tindex>::Compute( \
285 Status s = functor::NumTrue<GPUDevice, T, Tindex>::Compute(
311 // validation checking. Currently Where<GPUDevice>::Compute()
324 Status s = functor::Where<GPUDevice, NDIM, T, Tindex>::Compute( \
    [all...]
  /external/googletest/googlemock/src/
gmock-matchers.cc 207 ElementMatcherPairs Compute() {
309 return MaxBipartiteMatchState(g).Compute();
  /external/tensorflow/tensorflow/compiler/jit/
xla_device.cc 238 void XlaDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
239 VLOG(1) << "XlaDevice::Compute " << op_kernel->name() << ":"
246 op_kernel->Compute(context);
  /external/v8/testing/gmock/src/
gmock-matchers.cc 207 ElementMatcherPairs Compute() {
309 return MaxBipartiteMatchState(g).Compute();
  /prebuilts/tools/common/google-api-services-compute/
google-api-services-compute-v1-rev181-1.23.0.jar 
  /build/make/tools/releasetools/
blockimgdiff.py 461 def Compute(self, prefix):
720 # Compute the maximum blocks available for stash based on /cache size and
731 # Now go through all the commands. Compute the required stash size on the
    [all...]

Completed in 694 milliseconds

12 3 4