HomeSort by relevance Sort by last modified time
    Searched refs:conv2d (Results 1 - 25 of 67) sorted by null

1 2 3

  /external/tensorflow/tensorflow/contrib/slim/python/slim/nets/
inception_v2.py 84 layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d,
121 net = layers.conv2d(
131 net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
146 branch_0 = layers.conv2d(
149 branch_1 = layers.conv2d(
154 branch_1 = layers.conv2d(
157 branch_2 = layers.conv2d(
162 branch_2 = layers.conv2d(
164 branch_2 = layers.conv2d(
168 branch_3 = layers.conv2d(
    [all...]
inception_v3.py 107 [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
112 net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
118 net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
124 net = layers.conv2d(
137 net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
143 net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
157 [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
164 branch_0 = layers.conv2d(
167 branch_1 = layers.conv2d(
169 branch_1 = layers.conv2d(
    [all...]
inception_v1.py 62 [layers.conv2d, layers_lib.fully_connected],
65 [layers.conv2d, layers_lib.max_pool2d], stride=1, padding='SAME'):
67 net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
77 net = layers.conv2d(net, 64, [1, 1], scope=end_point)
82 net = layers.conv2d(net, 192, [3, 3], scope=end_point)
95 branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
97 branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
98 branch_1 = layers.conv2d(
101 branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
102 branch_2 = layers.conv2d(
    [all...]
vgg.py 66 [layers.conv2d, layers_lib.fully_connected],
70 with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
82 Note: All the fully_connected layers have been transformed to conv2d layers.
100 # Collect outputs for conv2d, fully_connected and max_pool2d.
102 [layers.conv2d, layers_lib.max_pool2d],
105 inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
107 net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
109 net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
111 net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
113 net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5'
    [all...]
alexnet.py 54 [layers.conv2d, layers_lib.fully_connected],
58 with arg_scope([layers.conv2d], padding='SAME'):
76 Note: All the fully_connected layers have been transformed to conv2d layers.
97 # Collect outputs for conv2d, fully_connected and max_pool2d.
99 [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
101 net = layers.conv2d(
104 net = layers.conv2d(net, 192, [5, 5], scope='conv2')
106 net = layers.conv2d(net, 384, [3, 3], scope='conv3')
107 net = layers.conv2d(net, 384, [3, 3], scope='conv4')
108 net = layers.conv2d(net, 256, [3, 3], scope='conv5'
    [all...]
overfeat.py 50 [layers.conv2d, layers_lib.fully_connected],
54 with arg_scope([layers.conv2d], padding='SAME'):
74 Note: All the fully_connected layers have been transformed to conv2d layers.
94 # Collect outputs for conv2d, fully_connected and max_pool2d
96 [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
98 net = layers.conv2d(
101 net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
103 net = layers.conv2d(net, 512, [3, 3], scope='conv3')
104 net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
105 net = layers.conv2d(net, 1024, [3, 3], scope='conv5'
    [all...]
resnet_v2.py 105 shortcut = layers_lib.conv2d(
113 residual = layers_lib.conv2d(
117 residual = layers_lib.conv2d(
200 [layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
213 [layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
226 net = layers_lib.conv2d(
resnet_v1.py 109 shortcut = layers.conv2d(
116 residual = layers.conv2d(
120 residual = layers.conv2d(
196 [layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
212 net = layers.conv2d(
resnet_utils.py 90 When stride > 1, then we do explicit zero-padding, followed by conv2d with
99 net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
105 net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
124 return layers_lib.conv2d(
139 return layers_lib.conv2d(
256 [layers_lib.conv2d],
  /external/tensorflow/tensorflow/contrib/receptive_field/python/util/
receptive_field_test.py 51 l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
54 l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
55 l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
79 l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
105 l1 = slim.conv2d(l1_pad, 1, [5, 5], stride=2, scope='L1', padding='VALID')
107 l2 = slim.conv2d(x, 1, [3, 3], stride=1, scope='L2', padding='VALID')
108 l3 = slim.conv2d(l2, 1, [3, 3], stride=1, scope='L3', padding='VALID')
132 l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
134 l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
135 l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID'
    [all...]
graph_compute_order_test.py 43 l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
46 l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
51 l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
53 l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
127 'L1/Conv2D': [224, 224],
129 'L2/Conv2D': [225, 225],
132 'L5/Conv2D': [56, 56],
133 'L6/Conv2D': [56, 56],
138 'L1/Conv2D': [56, 56],
140 'L2/Conv2D': [112, 112]
    [all...]
parse_layer_parameters_test.py 45 l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
48 l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
53 l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
55 l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
70 l1_node_name = 'L1/Conv2D'
84 l2_node_name = 'L2/Conv2D'
116 l5_node_name = 'L5/Conv2D'
123 l6_node_name = 'L6/Conv2D'
  /external/tensorflow/tensorflow/examples/tutorials/mnist/
mnist_deep.py 63 h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
73 h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
103 def conv2d(x, W): function
104 """conv2d returns a 2d convolution layer with full stride."""
105 return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
  /external/tensorflow/tensorflow/python/ops/
conv2d_benchmark.py 15 """Benchmark for Conv2D op."""
36 """builds a graph containing a sequence of conv2d operations.
47 num_iters: number of iterations to run conv2d.
60 conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format="NHWC")
64 conv2d_op = nn_ops.conv2d(
69 warmup_conv2d_op = nn_ops.conv2d(
74 warmup_conv2d_op = nn_ops.conv2d(
82 """Benchmark conv2d!"""
98 num_iters: number of iterations to run conv2d.
141 print("conv2d benchmark:"
    [all...]
  /external/tensorflow/tensorflow/python/profiler/internal/
model_analyzer_testlib.py 49 x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
54 x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
83 r1 = nn_ops.conv2d(image, kernel1, [1, 2, 2, 1], padding='SAME')
89 r2 = nn_ops.conv2d(image, kernel2, [1, 2, 2, 1], padding='SAME')
print_model_analysis_test.py 60 x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
  /external/tensorflow/tensorflow/python/layers/
maxout_test.py 50 graph = conv_layers.conv2d(inputs, 10, 3, padding="SAME")
56 graph = conv_layers.conv2d(inputs, 3, 10, strides=(1, 1))
  /external/tensorflow/tensorflow/contrib/specs/python/
specs_ops.py 80 Cx = Fun(layers.conv2d)
81 Cs = Fun(layers.conv2d, activation_fn=math_ops.sigmoid)
82 Ct = Fun(layers.conv2d, activation_fn=math_ops.tanh)
83 Cr = Fun(layers.conv2d, activation_fn=nn_ops.relu)
84 Cm = Fun(layers.conv2d, activation_fn=nn_ops.softmax)
85 Cl = Fun(layers.conv2d, activation_fn=None)
  /external/tensorflow/tensorflow/examples/learn/
resnet.py 58 net = tf.layers.conv2d(
71 net = tf.layers.conv2d(
85 conv = tf.layers.conv2d(
94 conv = tf.layers.conv2d(
105 conv = tf.layers.conv2d(
121 net = tf.layers.conv2d(
mnist.py 40 h_conv1 = tf.layers.conv2d(
51 h_conv2 = tf.layers.conv2d(
text_classification_cnn.py 53 conv1 = tf.layers.conv2d(
70 conv2 = tf.layers.conv2d(
  /external/tensorflow/tensorflow/python/kernel_tests/
conv2d_backprop_filter_grad_test.py 45 conv_out = nn_ops.conv2d(
79 conv_out = nn_ops.conv2d(
atrous_conv2d_test.py 80 y2 = nn_ops.conv2d(
99 net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
100 net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
102 net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
125 # y2: space_to_batch, three conv2d in a row, batch_to_space
130 y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
131 y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
132 y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
  /external/tensorflow/tensorflow/contrib/quantize/python/
quantize_test.py 32 conv2d = layers.conv2d variable
51 conv = conv2d(inputs, 32, [5, 5], stride=2, padding='SAME',
73 conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
  /external/tensorflow/tensorflow/examples/tutorials/layers/
cnn_mnist.py 38 conv1 = tf.layers.conv2d(
56 conv2 = tf.layers.conv2d(

Completed in 371 milliseconds

1 2 3