/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
ir_emission_utils.cc | 28 const HloInstruction& convolution) { 30 // implementing `convolution` with Eigen convolution: 36 const Shape& input_shape = convolution.operand(0)->shape(); 37 const Shape& kernel_shape = convolution.operand(0)->shape(); 47 if (window_util::HasWindowReversal(convolution.window())) { 52 convolution.convolution_dimension_numbers(); 54 // TODO(b/32897908): add an optimized implementation for 3D convolution. 72 const Shape& output_shape = convolution.shape();
|
ir_emission_utils.h | 26 const HloInstruction& convolution);
|
cpu_layout_assignment.cc | 104 const HloInstruction* convolution = instruction; local 105 const HloInstruction* lhs_instruction = convolution->operand(0); 106 const HloInstruction* rhs_instruction = convolution->operand(1); 108 // In order to implement `convolution` with Eigen convolution, the layouts 113 Shape output_shape(RowMajorShape(convolution->shape())); 119 constraints->SetOperandLayout(input_shape, convolution, 0)); 121 constraints->SetOperandLayout(filter_shape, convolution, 1)); 123 constraints->SetInstructionLayout(output_shape, convolution));
|
ir_emitter.cc | [all...] |
ir_emitter.h | 126 Status HandleConvolution(HloInstruction* convolution) override;
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
transpose_folding.cc | 54 const HloInstruction& convolution, 57 if (HloOpcode::kConvolution != convolution.opcode()) { 62 for (int64 i = 0; i < convolution.operand_count(); ++i) { 63 auto& operand = *convolution.operand(i); 69 return transposable_conv_operands(convolution, operand_set); 96 // Folds the operands of `convolution` that are foldable transposes. 97 // `computation` is the parent HLO computation of `convolution`. 101 auto& convolution = *pair.first; local 109 convolution.convolution_dimension_numbers(); 116 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx) [all...] |
dfs_hlo_visitor_with_default.h | 85 Status HandleConvolution(HloInstructionPtr convolution) override { 86 return DefaultAction(convolution);
|
hlo_cost_analysis.cc | 381 Status HloCostAnalysis::HandleConvolution(const HloInstruction* convolution) { 382 auto rhs_instruction = convolution->operand(1); 383 const auto& dnums = convolution->convolution_dimension_numbers(); 385 convolution->shape().dimensions(dnums.output_feature_dimension()); 393 const int64 output_elements = ShapeUtil::ElementsIn(convolution->shape());
|
hlo_verifier.cc | 75 Status ShapeVerifier::HandleConvolution(HloInstruction* convolution) { 79 convolution->operand(0)->shape(), convolution->operand(1)->shape(), 80 convolution->window(), convolution->convolution_dimension_numbers())); 81 return CheckShape(convolution, expected); [all...] |
algebraic_simplifier.cc | 141 Status HandleConvolution(HloInstruction* convolution) override; 303 // Disable convolution simplication on platforms where it causes a slowdown. [all...] |
hlo_cost_analysis.h | 69 Status HandleConvolution(const HloInstruction* convolution) override;
|
hlo_verifier.h | 43 Status HandleConvolution(HloInstruction* convolution) override;
|
/external/eigen/bench/tensors/ |
tensor_benchmarks_cpu.cc | 146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4); 147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8); 148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12); 150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4); 151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8); 152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12); 154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4); 155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8); 156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12); 158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4) [all...] |
tensor_benchmarks.h | 417 void convolution(int num_iters, int kernel_x, int kernel_y) { function in class:BenchmarkSuite
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
atrous_convolution_test.py | 15 """Tests for atrous convolution functionality in tensorflow.ops.nn.""" 104 y1 = nn_ops.convolution( 106 y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs) 116 y = nn_ops.convolution( 123 y = nn_ops.convolution( 221 result = nn_ops.convolution( 223 result = nn_ops.convolution( 231 y1 = nn_ops.convolution( 236 y1 = nn_ops.convolution( 257 output = nn_ops.convolution( [all...] |
conv_ops_test.py | 48 """Iterator for smaller versions of convolution shapes in 2015 Inception. 56 Tuple (input_size, filter_size, out_size, stride, padding), the convolution 170 """Verifies the output values of the convolution function. 227 conv_strides: [row_stride, col_stride] for the convolution; 282 expected = nn_ops.convolution( [all...] |
/cts/suite/audio_quality/test_description/processing/ |
calc_delay.py | 26 def convolution(data0, data1reversed, n): function 27 """calculate convolution part of data0 with data1 from pos n""" 33 return convolution(*args)
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
ir_emitter.h | 77 Status HandleConvolution(HloInstruction* convolution) override;
|
ir_emitter_unnested.h | 65 Status HandleConvolution(HloInstruction* convolution) override;
|
ir_emitter.cc | 600 Status IrEmitter::HandleConvolution(HloInstruction* convolution) { 601 if (ShapeUtil::HasZeroElements(convolution->shape())) { 605 // TODO(b/31409998): Support convolution with dilation. 607 "Hit a case for convolution that is not implemented on GPU."); [all...] |
/external/tensorflow/tensorflow/compiler/xla/tools/parser/ |
hlo_parser_test.cc | 344 // convolution 346 "Convolution", 353 ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f 358 // convolution rank 2 366 ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[1,1]{1,0} %filter), dim_labels=bf_io->bf 371 // convolution backward 379 ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 (…) [all...] |
/external/tensorflow/tensorflow/contrib/quantize/python/ |
graph_matcher_test.py | 44 return layers.convolution(
|
/external/tensorflow/tensorflow/contrib/model_pruning/python/layers/ |
core_layers.py | 42 """Abstract nD convolution layer (private, used as implementation base). 44 This layer creates a convolution kernel that is convolved 52 rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. 54 of filters in the convolution). 56 length of the convolution window. 58 specifying the stride length of the convolution. 68 the dilation rate to use for dilated convolution. 74 kernel_initializer: An initializer for the convolution kernel. 77 kernel_regularizer: Optional regularizer for the convolution kernel [all...] |
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
layers.py | 60 'conv2d_in_plane', 'conv2d_transpose', 'conv3d_transpose', 'convolution', 918 def convolution(inputs, function [all...] |
/external/ImageMagick/www/api/ |
effect.php | 164 <p>ConvolveImage() applies a custom convolution kernel to the image.</p> 221 <p>EdgeImage() finds edges in an image. Radius defines the radius of the convolution filter. Use a radius of 0 and EdgeImage() selects a suitable radius for you.</p>
|