/external/libxcam/modules/ocl/ |
cv_image_deblurring.cpp | 136 int filter_size = (int)(std::max(3, ((thresholded.rows + thresholded.cols) / 2) / 10)); local 137 if (!(filter_size & 1)) 139 filter_size++; 141 cv::Mat filter = cv::Mat::ones (filter_size, filter_size, CV_32FC1) / (float)(filter_size * filter_size - 1); 142 filter.at<float> (filter_size / 2, filter_size / 2) = 0;
|
/external/tensorflow/tensorflow/contrib/lite/kernels/ |
padding.h | 20 inline int ComputePadding(int stride, int in_size, int filter_size, 22 int padding = ((out_size - 1) * stride + filter_size - in_size) / 2;
|
conv_test.cc | 176 const int filter_size = 3; local 185 {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}}, 230 const int filter_size = 3; local 239 {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}}, 284 const int filter_size = 3; local 293 {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}}, 340 const int filter_size = 3 local [all...] |
/external/tensorflow/tensorflow/python/kernel_tests/ |
depthwise_conv_op_test.py | 37 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 67 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 170 for index, (input_size, filter_size, _, stride, 173 filter_size, "stride:", stride, "padding:", padding) 176 input_size, filter_size, stride, padding, data_type, use_gpu=True) 182 for index, (input_size, filter_size, _, stride, 185 "*", filter_size, "stride:", stride, "padding:", padding) 189 filter_size, 317 filter_size = 1 319 filter_size *= [all...] |
neon_depthwise_conv_op_test.py | 35 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 62 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 157 for index, (input_size, filter_size, _, stride, 162 input_size, filter_size, stride, padding, use_gpu=True) 164 input_size, filter_size, stride, padding, use_gpu=False) 170 for index, (input_size, filter_size, _, stride, 175 filter_size,
|
conv_ops_test.py | 56 Tuple (input_size, filter_size, out_size, stride, padding), the convolution [all...] |
conv_ops_3d_test.py | 306 filter_size = 1 308 filter_size *= x 310 filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
|
/external/tensorflow/tensorflow/core/kernels/ |
conv_grad_ops_3d.cc | 68 const std::array<int64, 3> filter_size = {{filter_shape.dim_size(0), \ 87 OP_REQUIRES_OK(context, Get3dOutputSize(input_size, filter_size, strides, \ 108 const auto padded_out_planes = input_size[0] + filter_size[0] - 1; \ 109 const auto padded_out_rows = input_size[1] + filter_size[1] - 1; \ 110 const auto padded_out_cols = input_size[2] + filter_size[2] - 1; \ 111 const auto top_pad_planes = filter_size[0] - 1 - padding[0]; \ 112 const auto top_pad_rows = filter_size[1] - 1 - padding[1]; \ 113 const auto left_pad_cols = filter_size[2] - 1 - padding[2]; \ 207 {filter_size[0], filter_size[1], filter_size[2], out_depth, in_depth}) [all...] |
mkl_conv_grad_input_ops.cc | 145 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 151 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 184 mkl_context.filter_size[0] = dims.spatial_dims[1].filter_size; 185 mkl_context.filter_size[1] = dims.spatial_dims[0].filter_size; 186 mkl_context.filter_size[2] = dims.in_depth; 187 mkl_context.filter_size[3] = dims.out_depth; 190 mkl_context.filter_size[2] * mkl_context.filter_size[3] 244 size_t filter_size[4]; member in struct:tensorflow::MklConv2DCustomBackpropInputOp::__anon39840 [all...] |
quantized_conv_ops_test.cc | 80 const int filter_size = 3; local 85 {filter_size, filter_size, depth, filter_count}); 156 const int filter_size = 3; local 159 TensorShape({filter_size, filter_size, depth, filter_count}), 199 const int filter_size = 3; local 202 TensorShape({filter_size, filter_size, depth, filter_count}), 242 const int filter_size = 3 local 292 const int filter_size = 3; local [all...] |
conv_ops_test.cc | 111 const int filter_size = 3; local 113 Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count}); 164 int filter_size, int filter_count, 177 Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, 219 int x_padding, int filter_size, 231 Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, 381 const int filter_size = 3 local 441 const int filter_size = 2; local [all...] |
conv_grad_input_ops.cc | 255 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 261 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 372 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 378 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 402 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 408 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 413 const int filter_total_size = dims.spatial_dims[0].filter_size * 414 dims.spatial_dims[1].filter_size * 510 dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size, 511 dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom [all...] |
conv_grad_ops.cc | 54 dim->filter_size = filter_shape.dim_size(filter_spatial_dim); 59 TF_RETURN_IF_ERROR(GetWindowedOutputSizeV2(dim->input_size, dim->filter_size, 67 " filter: ", dim->filter_size, " output: ", dim->output_size, 71 int64 effective_filter_size = (dim->filter_size - 1) * dim->dilation + 1;
|
conv_grad_filter_ops.cc | 252 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 258 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 365 dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, 371 dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, 390 const int filter_total_size = dims.spatial_dims[0].filter_size * 391 dims.spatial_dims[1].filter_size * 473 dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size, 474 dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom, 662 (dims.spatial_dims[0].filter_size - 1) * 670 (dims.spatial_dims[1].filter_size - 1) [all...] |
mkl_conv_grad_filter_ops.cc | 141 backprop_dims.spatial_dims[0].filter_size, 147 backprop_dims.spatial_dims[1].filter_size, 186 mkl_context.filter_sizes[0] = backprop_dims.spatial_dims[1].filter_size; 187 mkl_context.filter_sizes[1] = backprop_dims.spatial_dims[0].filter_size; 199 backprop_dims.spatial_dims[1].filter_size;
|
/external/tensorflow/tensorflow/python/layers/ |
utils.py | 110 def conv_output_length(input_length, filter_size, padding, stride, dilation=1): 115 filter_size: integer. 126 dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) 136 def conv_input_length(output_length, filter_size, padding, stride): 141 filter_size: integer. 152 pad = filter_size // 2 156 pad = filter_size - 1 157 return (output_length - 1) * stride - 2 * pad + filter_size 160 def deconv_output_length(input_length, filter_size, padding, stride) [all...] |
/external/tensorflow/tensorflow/compiler/tests/ |
depthwise_conv_op_test.py | 67 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 97 Tuple (input_size, filter_size, out_size, stride, padding), the depthwise 197 for index, (input_size, filter_size, _, stride, 200 filter_size, "stride:", stride, "padding:", padding) 205 input_size, filter_size, stride, padding, data_type) 208 for index, (input_size, filter_size, _, stride, 211 "*", filter_size, "stride:", stride, "padding:", padding) 217 filter_size, 345 for index, (input_size, filter_size, output_size, stride, 348 input_size, "*", filter_size, "stride:", stride, "padding:" [all...] |
/external/tensorflow/tensorflow/core/framework/ |
common_shape_fns.h | 74 Status GetWindowedOutputSize(int64 input_size, int64 filter_size, int64 stride, 99 Status GetWindowedOutputSizeV2(int64 input_size, int64 filter_size, 108 Status GetWindowedOutputSizeVerbose(int64 input_size, int64 filter_size, 115 Status GetWindowedOutputSizeVerboseV2(int64 input_size, int64 filter_size, 146 DimensionOrConstant filter_size, 154 DimensionOrConstant filter_size,
|
common_shape_fns.cc | 20 Status GetWindowedOutputSizeVerboseV2(int64 input_size, int64 filter_size, 34 int64 effective_filter_size = (filter_size - 1) * dilation_rate + 1; 61 Status GetWindowedOutputSizeVerbose(int64 input_size, int64 filter_size, 65 return GetWindowedOutputSizeVerboseV2(input_size, filter_size, 71 Status GetWindowedOutputSize(int64 input_size, int64 filter_size, int64 stride, 75 return GetWindowedOutputSizeVerbose(input_size, filter_size, stride, 80 Status GetWindowedOutputSizeV2(int64 input_size, int64 filter_size, 85 return GetWindowedOutputSizeVerboseV2(input_size, filter_size, dilation_rate, 125 shape_inference::DimensionOrConstant filter_size, int64 dilation_rate, 143 c->Subtract(c->MakeDim(filter_size), 1, &window_size)) [all...] |
/external/vulkan-validation-layers/windowsRuntimeInstaller/ |
configure_runtime.c | 257 int filter_size = snprintf(NULL, 0, pattern, install_path) + 1; local 258 if(filter_size < 0) { 261 char* filter = malloc(filter_size); 262 snprintf(filter, filter_size, pattern, install_path); 634 int filter_size = snprintf(NULL, 0, pattern, path, name, abi_major, extension) + 1; local 635 if(filter_size < 0) { 638 char* filter = malloc(filter_size); 639 snprintf(filter, filter_size, pattern, path, name, abi_major, extension);
|
/external/tensorflow/tensorflow/python/framework/ |
common_shapes.py | 106 def get_conv_output_size(input_size, filter_size, strides, padding_type): 109 filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size]) 112 if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size): 116 zip(filter_size, input_size)): 118 "Filter: %r Input: %r" % (filter_size, input_size)) 130 for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
cudnn_convolution_rewriter.cc | 130 int64 filter_size = conv->shape().dimensions(output_spatial_dims[i]); local 131 dim->set_size(filter_size); 146 // = (output_size - 1) * stride + filter_size 151 int64 padded_input_size = filter_size + (output_size - 1) * dim->stride();
|
/frameworks/ml/nn/common/include/ |
OperationsUtils.h | 104 int32_t filter_size, int32_t padding_implicit, 111 int32_t tmp = (out_size - 1) * stride + filter_size;
|
/external/tensorflow/tensorflow/contrib/fused_conv/python/ops/ |
fused_conv2d_bias_activation_op_test.py | 45 Tuple (input_size, filter_size, out_size, stride, padding), the convolution 187 filter_size = np.prod(filter_in_sizes) 192 x2 = [f * 1.0 for f in range(1, filter_size + 1)] 554 def GetInceptionFwdTest(input_size, filter_size, stride, padding, 559 tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size, 562 tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride, 564 self._CompareFwdValues(input_size, filter_size, [stride, stride], padding) [all...] |
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
layout_optimizer_test.cc | 41 Output SimpleConv2D(tensorflow::Scope* s, int input_size, int filter_size, 43 return SimpleConv2D(s, input_size, filter_size, padding, ""); 46 Output SimpleConv2D(tensorflow::Scope* s, int input_size, int filter_size, 62 {filter_size, filter_size, input_depth, filter_count}); 74 int filter_size, const string& padding) { 75 return SimpleConv2DBackpropInput(s, input_size, filter_size, padding, true); 79 int filter_size, const string& padding, 95 {filter_size, filter_size, input_depth, filter_count}) [all...] |