/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
conv_canonicalization.cc | 47 // A canonical convolution's dimension numbers need to satisfy the 128 // The window of the old convolution is reused, because reshapes only 135 // Reshape the output back to the shape of the original convolution.
|
conv_canonicalization_test.cc | 105 // NHWC for the convolution to hit the Eigen fast path. 108 // HWIO for the convolution to hit the Eigen fast path. 110 // The output of the canonical convolution is in NHWC order (the same as
|
/external/tensorflow/tensorflow/examples/learn/ |
resnet.py | 56 # First convolution expands to 64 channels 83 # 1x1 convolution responsible for reducing dimension 102 # 1x1 convolution responsible for restoring dimension
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
fuse_convolutions.cc | 61 // Set up the new fused version of the convolution op. 116 // Set up the new fused version of the convolution op. 173 // Set up the new fused version of the convolution op.
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
cudnn_convolution_runner.cc | 81 VLOG(3) << "Convolution Algorithm: " << algorithm.algorithm().algo_id(); 84 VLOG(3) << "Convolution kind: " << CudnnConvKindToString(kind); 116 // cuDNN's convolution APIs support the BDYX layout for activations/output and 164 // Add a singleton dimension in the 1D convolution case. 196 "Unable to launch convolution with type %s and algorithm (%lld, %lld)",
|
cudnn_convolution_rewriter_test.cc | 49 // default in HWIO order. For backward filter convolution, we need to swap 88 // A convolution window with stride 1 and zero padding. The size fields are 273 // Verify the convolution's shape is consistent with ShapeInference. 292 // Low padding of the backward input convolution 336 // convolution. 403 // Verify the convolution's shape is consistent with ShapeInference. 450 // Verify the convolution's shape is consistent with ShapeInference. 503 // Verify the convolution's shape is consistent with ShapeInference. 533 // padding on the gradients of backward convolution (b/32744257). 555 // Verify the convolution's shape is consistent with ShapeInference [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
depthwise_conv_op.cc | 47 // In depthwise convolution, one input is convolved into depth_multipler 49 // convolution does. 51 // regular convolution. Please refer to the regular convolution kernels for 162 "Depthwise convolution on CPU is only supported for NHWC format")); 301 // For 2D convolution, there should be 4 dimensions. 373 // If in_depth==1, this operation is just a standard convolution, so
|
mkl_conv_ops.cc | 207 // Create MKL convolution primitives 255 // Create Convolution Primitive 324 // Execute convolution 567 // Describe how the inputs and outputs of Convolution look like. Also 592 // Create memory descriptors for convolution data w/ no specified format. 606 // Create convolution primitive with Bias. 627 // Create convolution primitive without Bias. 725 // add it to the net before convolution. No need to check for output 736 // Create convolution primitive and add it to net. [all...] |
depthwise_conv_op_gpu.cu.cc | 39 // Returns whether depthwise convolution forward or backward input pass can be 52 // Returns whether depthwise convolution backward filter pass can be performed 65 // convolution depending on a template argument of this enum. 68 // A Cuda kernel to compute the depthwise convolution forward pass 159 // CUDA kernel to compute the depthwise convolution forward pass in NHWC format, 164 // performing the convolution. Each thread handles two elements per iteration, 304 // A Cuda kernel to compute the depthwise convolution forward pass 374 // convolution, writing into the output at the end. 439 // CUDA kernel to compute the depthwise convolution forward pass in NCHW format, 444 // performing the convolution. Each thread handles two elements per iteration [all...] |
/external/mesa3d/docs/ |
conform.html | 200 Convolution test passed. 201 Convolution Border test passed. 313 Convolution test passed. 314 Convolution Border test passed. 426 Convolution test passed. 427 Convolution Border test passed. 539 Convolution test passed. 540 Convolution Border test passed. 652 Convolution test passed. 653 Convolution Border test passed [all...] |
/external/tensorflow/tensorflow/stream_executor/ |
dnn.h | 361 // Describes a filter for the convolution. This is the "window" from 427 // Returns the number of weights required as parameters for a convolution 431 // Returns the number of biases required as parameters for a convolution 470 // Describes a convolution. 485 // - vertical_filter_stride: the convolution slides a 2-dimensional window of [all...] |
/external/tensorflow/tensorflow/contrib/fused_conv/ops/ |
fused_conv2d_bias_activation_op.cc | 102 Computes a fused kernel which implements: 2-D convolution, adds side input, 103 with separate scaling on convolution and side inputs, then adds bias and 120 (conceptually.. in reality it is applied after convolution).
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
multithreaded_conv.h | 53 // We have a single global threadpool for all convolution operations. This means 127 // For 1x1 kernel, the 2D convolution is reduced to matrix 140 // the 2D convolution is reduced to matrix multiplication.
|
/external/skia/src/gpu/effects/ |
GrGaussianConvolutionFragmentProcessor.h | 16 * A 1D Gaussian convolution effect. The kernel is computed as an array of 2 * half-width weights.
|
/external/skqp/src/gpu/effects/ |
GrGaussianConvolutionFragmentProcessor.h | 16 * A 1D Gaussian convolution effect. The kernel is computed as an array of 2 * half-width weights.
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
algebraic_simplifier.h | 63 // Enable convolution simplication on platforms where it is profitable.
|
transpose_folding.h | 47 // the instruction argument is implemented as a convolution that supports
|
/external/tensorflow/tensorflow/compiler/xla/tools/parser/ |
hlo_parser_test.cc | 344 // convolution 346 "Convolution", 353 ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f 358 // convolution rank 2 366 ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[1,1]{1,0} %filter), dim_labels=bf_io->bf 371 // convolution backward 379 ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 (…) [all...] |
/external/tensorflow/tensorflow/contrib/tpu/profiler/ |
op_profile.proto | 36 // e.g. for a convolution, this analyzes the image and ignores the kernel.
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
api_def_Conv2D.pbtxt | 58 summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
|
/external/tensorflow/tensorflow/python/ops/ |
batch_norm_benchmark.py | 178 print("Forward convolution (lower layers).") 192 print("Forward/backward convolution (lower layers).") 204 print("Forward convolution (higher layers).") 218 print("Forward/backward convolution (higher layers).")
|
/external/tensorflow/tensorflow/compiler/xla/ |
reference_util.h | 55 // Returns the result of a convolution `lhs <conv> rhs`, with the default 56 // convolution dimension numbers returned from 62 // Returns the result of a convolution `lhs <conv> rhs`, with the given 63 // convolution dimension numbers. 69 // Returns the result of a convolution `lhs <conv> rhs`, with the given 77 // Returns the result of a convolution `lhs <conv> rhs`, with the default 78 // convolution dimension numbers returned from 85 // Returns the result of a convolution `lhs <conv> rhs`. 91 // Returns the result of a separable convolution with the given parameters. 92 // kernel_stride and padding applies to the depthwise convolution durin [all...] |
/external/python/cpython3/Modules/_decimal/libmpdec/ |
sixstep.c | 119 /* An unordered transform is sufficient for convolution. */ 156 /* An unordered transform is sufficient for convolution. */
|
/external/skia/gm/ |
imagefiltersgraph.cpp | 112 // Dilate -> matrix convolution. 114 // convolution) correctly handles a non-zero source offset
|
/external/skqp/gm/ |
imagefiltersgraph.cpp | 112 // Dilate -> matrix convolution. 114 // convolution) correctly handles a non-zero source offset
|