/external/eigen/bench/tensors/ |
tensor_benchmarks_cpu.cc | 146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4); 147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8); 148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12); 150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4); 151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8); 152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12); 154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4); 155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8); 156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12); 158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4) [all...] |
tensor_benchmarks_gpu.cu | 70 BM_FuncWithKernelDimsGPU(convolution, 7, 1); 71 BM_FuncWithKernelDimsGPU(convolution, 1, 7); 72 BM_FuncWithKernelDimsGPU(convolution, 7, 4); 73 BM_FuncWithKernelDimsGPU(convolution, 4, 7); 74 BM_FuncWithKernelDimsGPU(convolution, 7, 64); 75 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
|
tensor_benchmarks_fp16_gpu.cu | 71 BM_FuncWithKernelDimsGPU(convolution, 7, 1); 72 BM_FuncWithKernelDimsGPU(convolution, 1, 7); 73 BM_FuncWithKernelDimsGPU(convolution, 7, 4); 74 BM_FuncWithKernelDimsGPU(convolution, 4, 7); 75 BM_FuncWithKernelDimsGPU(convolution, 7, 64); 76 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
isolated_convolution.hlo | 1 HloModule convolution.167: 3 ENTRY %convolution.167 (parameter.0: f32[16,28,28,128], parameter.1: f32[3,3,128,128]) -> f32[16,28,28,128] { 6 ROOT %convolution.167 = f32[16,28,28,128]{3,0,2,1} convolution(f32[16,28,28,128]{3,0,2,1} %parameter.0, f32[3,3,128,128]{3,2,1,0} %parameter.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01oi->b01f
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
cudnn_convolution_rewriter.cc | 66 // Backward filter convolution is implemented in XLA as the forward 67 // convolution of padded activations and dilated gradients. Padding on 69 // of the forward convolution. 74 // Convolution 78 // Step 2: match paddings and dimension numbers of the forward convolution. 93 VLOG(1) << "Forward convolution's window " 99 VLOG(1) << "Forward convolution's window " 117 << " is a regular forward convolution. No need " 118 "to fold it to a backward filter convolution."; 122 // Step 3: fuse the matched HLOs into a backward convolution instruction [all...] |
pad_insertion.h | 24 // An HLO pass that canonicalizes convolution instructions for GPU codegen. It 25 // inserts Pad instructions before Convolution instructions with uncanonicalized 26 // padding, so that they can be lowered to cuDNN convolution.
|
cudnn_convolution_runner.h | 32 // A way to think about these is that a convolution is defined by three arrays 34 // we can compute the third. For example, a backward-input convolution takes as 36 // to do a forward convolution of "input" using filter, the result would be 40 // example, a backward-input convolution is not actually the mathematical 41 // inverse of a forward convolution. But it's right as far as the shapes and 53 // Calls into cudnn to run the specified convolution. 59 // convolution with half data type is implemented with cudnn PSEUDO_HALF 66 // just ask cudnn how much scratch space it needs for a particular convolution. 69 // call a convolution, you should call the version that takes a scratch
|
pad_insertion.cc | 36 // If the (positive and negative) padding on the input operand of a convolution 37 // can't be folded into a cuDNN convolution libcall (e.g. uneven padding and 110 // If the padding on the kernel operand of a convolution can't be folded into a 111 // cuDNN convolution libcall (e.g. dilation), returns a kPad instruction that 152 // Insert slices and/or pads between the convolution and its input and/or 161 // Remove the padding from convolution's window field. These paddings are 208 // A backward filter convolution with uneven padding can be canonicalized to 228 // negative padding in a backward convolution, and would therefore cause 229 // cuDNN convolution (which doesn't support negative padding) to fail. 240 // Since we move some padding from the backward convolution to the kPad, w [all...] |
/external/tensorflow/tensorflow/compiler/xla/service/ |
transpose_folding.cc | 54 const HloInstruction& convolution, 57 if (HloOpcode::kConvolution != convolution.opcode()) { 62 for (int64 i = 0; i < convolution.operand_count(); ++i) { 63 auto& operand = *convolution.operand(i); 69 return transposable_conv_operands(convolution, operand_set); 96 // Folds the operands of `convolution` that are foldable transposes. 97 // `computation` is the parent HLO computation of `convolution`. 101 auto& convolution = *pair.first; local 109 convolution.convolution_dimension_numbers(); 116 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx) [all...] |
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
ir_emission_utils.cc | 28 const HloInstruction& convolution) { 30 // implementing `convolution` with Eigen convolution: 36 const Shape& input_shape = convolution.operand(0)->shape(); 37 const Shape& kernel_shape = convolution.operand(0)->shape(); 47 if (window_util::HasWindowReversal(convolution.window())) { 52 convolution.convolution_dimension_numbers(); 54 // TODO(b/32897908): add an optimized implementation for 3D convolution. 72 const Shape& output_shape = convolution.shape();
|
conv_canonicalization.h | 28 // In order to hit the fast path of using Eigen's convolution implementation, a 29 // convolution's dimension numbers need to satisfy certain constraints (so 37 return "convolution-canonicalization";
|
cpu_layout_assignment.cc | 104 const HloInstruction* convolution = instruction; local 105 const HloInstruction* lhs_instruction = convolution->operand(0); 106 const HloInstruction* rhs_instruction = convolution->operand(1); 108 // In order to implement `convolution` with Eigen convolution, the layouts 113 Shape output_shape(RowMajorShape(convolution->shape())); 119 constraints->SetOperandLayout(input_shape, convolution, 0)); 121 constraints->SetOperandLayout(filter_shape, convolution, 1)); 123 constraints->SetInstructionLayout(output_shape, convolution));
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
api_def_Conv2DBackpropFilter.pbtxt | 21 Gradients w.r.t. the output of the convolution. 29 the `filter` input of the convolution. 36 of the convolution. Must be in the same order as the dimension specified with 66 summary: "Computes the gradients of convolution with respect to the filter."
|
api_def_Conv2DBackpropInput.pbtxt | 21 Gradients w.r.t. the output of the convolution. 28 w.r.t. the input of the convolution. 35 of the convolution. Must be in the same order as the dimension specified with 65 summary: "Computes the gradients of convolution with respect to the input."
|
api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt | 25 Gradients w.r.t. the output of the convolution. 33 the `filter` input of the convolution. 40 of the convolution. 69 summary: "Computes the gradients of depthwise convolution with respect to the filter."
|
api_def_DepthwiseConv2dNativeBackpropInput.pbtxt | 24 Gradients w.r.t. the output of the convolution. 33 convolution. 40 of the convolution. 69 summary: "Computes the gradients of depthwise convolution with respect to the input."
|
api_def_Conv3DBackpropFilter.pbtxt | 36 summary: "Computes the gradients of 3-D convolution with respect to the filter."
|
api_def_Conv3DBackpropInput.pbtxt | 36 summary: "Computes the gradients of 3-D convolution with respect to the input."
|
/external/tensorflow/tensorflow/python/layers/ |
convolutional.py | 36 """Abstract nD convolution layer (private, used as implementation base). 38 This layer creates a convolution kernel that is convolved 45 rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. 47 of filters in the convolution). 49 length of the convolution window. 51 specifying the stride length of the convolution. 61 the dilation rate to use for dilated convolution. 67 kernel_initializer: An initializer for the convolution kernel. 70 kernel_regularizer: Optional regularizer for the convolution kernel [all...] |
/external/skia/include/effects/ |
SkMatrixConvolutionImageFilter.h | 19 Matrix convolution image filter. This filter applies an NxM image 39 /** Construct a matrix convolution image filter. 44 convolution. This can be used to normalize the 46 @param bias A bias factor added to each pixel after convolution. 48 convolution. This can be used to center the kernel
|
/external/skqp/include/effects/ |
SkMatrixConvolutionImageFilter.h | 19 Matrix convolution image filter. This filter applies an NxM image 39 /** Construct a matrix convolution image filter. 44 convolution. This can be used to normalize the 46 @param bias A bias factor added to each pixel after convolution. 48 convolution. This can be used to center the kernel
|
/external/tensorflow/tensorflow/docs_src/api_guides/python/ |
nn.md | 31 ## Convolution 33 The convolution ops sweep a 2-D filter over a batch of images, applying the 41 Note that although these ops are called "convolution", they are strictly 54 convolution ops depend on the padding scheme chosen: `'SAME'` or `'VALID'`. 115 * @{tf.nn.convolution} 145 to the `Convolution` section for details about the padding calculation. 162 is the max-sum counterpart of standard sum-product convolution: 177 is the min-sum counterpart of standard sum-product convolution: 191 convolution. Please refer to the `Convolution` section for details [all...] |
/cts/suite/audio_quality/test_description/processing/ |
calc_delay.py | 26 def convolution(data0, data1reversed, n): function 27 """calculate convolution part of data0 with data1 from pos n""" 33 return convolution(*args)
|
/external/tensorflow/tensorflow/contrib/bayesflow/python/ops/ |
layers_conv_variational.py | 39 """Abstract nD convolution layer (private, used as implementation base). 41 This layer creates a convolution kernel that is convolved 59 rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. 61 of filters in the convolution). 63 length of the convolution window. 65 specifying the stride length of the convolution. 75 the dilation rate to use for dilated convolution. 118 rank: Python integer, dimensionality of convolution. 120 kernel_size: Size of the convolution window [all...] |
/external/python/cpython3/Modules/_decimal/libmpdec/literature/ |
bignum.txt | 6 Bignum arithmetic in libmpdec uses the scheme for fast convolution 13 The transform in a finite field can be used for convolution in the same 18 Convolution in pseudo-code: 75 convolute.c -> do the actual fast convolution, using one of
|