/external/tensorflow/tensorflow/contrib/model_pruning/examples/cifar10/ |
cifar10_input.py | 121 images, label_batch = tf.train.shuffle_batch( 128 images, label_batch = tf.train.batch( 159 filename_queue = tf.train.string_input_producer(filenames) 194 print('Filling queue with %d CIFAR images before starting to train. ' 210 eval_data: bool, indicating if one should use the train or eval data set. 232 filename_queue = tf.train.string_input_producer(filenames)
|
/external/tensorflow/tensorflow/examples/learn/ |
iris_custom_decay_dnn.py | 53 if mode == tf.estimator.ModeKeys.TRAIN: 54 global_step = tf.train.get_global_step() 55 learning_rate = tf.train.exponential_decay( 58 optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate) 78 # Train. 81 classifier.train(input_fn=train_input_fn, steps=1000)
|
iris_custom_model.py | 54 if mode == tf.estimator.ModeKeys.TRAIN: 55 optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) 56 train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) 75 # Train. 78 classifier.train(input_fn=train_input_fn, steps=1000)
|
/external/tensorflow/tensorflow/examples/saved_model/integration_tests/ |
use_text_rnn_model.py | 38 model.train(tf.constant(sentences))
|
/external/tensorflow/tensorflow/contrib/distribute/python/examples/ |
keras_model_with_estimator.py | 51 optimizer = tf.train.GradientDescentOptimizer(0.2) 65 # Train and evaluate the model. 66 keras_estimator.train(input_fn=input_fn, steps=10)
|
/external/tensorflow/tensorflow/contrib/model_pruning/python/ |
learning.py | 31 optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum) 46 learning.train(train_op, 61 def train(train_op, function 86 """Wrapper around tf-slim's train function. 139 sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of 165 total_loss, _ = _slim.learning.train(
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
training.py | 36 optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum) 42 tf.contrib.training.train(train_op, my_log_dir) 48 In order to use the `train` function, one needs a train_op: an `Operation` that 144 tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold) 177 tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold) 208 tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold) 240 tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold) 266 'train', 392 variables_to_train: an optional list of variables to train. If None, it will 476 def train(train_op function [all...] |
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
mnist_with_summaries.py | 38 def train(): function 127 with tf.name_scope('train'): 128 train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( 141 train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) 145 # Train the model, and also write summaries. 149 def feed_dict(train): 151 if train or FLAGS.fake_data: 152 xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data) 164 else: # Record train set summaries, and train [all...] |
mnist_softmax_xla.py | 57 train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) 69 # Train 72 batch_xs, batch_ys = mnist.train.next_batch(100)
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
optimizers.py | 37 from tensorflow.python.training import training as train 40 "Adagrad": train.AdagradOptimizer, 41 "Adam": train.AdamOptimizer, 42 "Ftrl": train.FtrlOptimizer, 43 "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long 44 "RMSProp": train.RMSPropOptimizer, 45 "SGD": train.GradientDescentOptimizer, 79 optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`. 82 optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`. 86 optimizer=tf.train.AdagradOptimizer)` [all...] |
/external/tensorflow/tensorflow/contrib/eager/python/examples/l2hmc/ |
main.py | 38 global_step = tf.train.get_or_create_global_step() 56 learning_rate = tf.train.exponential_decay( 58 optimizer = tf.train.AdamOptimizer(learning_rate) 59 checkpointer = tf.train.Checkpoint( 65 latest_path = tf.train.latest_checkpoint(FLAGS.train_dir) 155 """Train the sampler for one iteration."""
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/gan/ |
mnist_test.py | 65 step_counter = tf.train.get_or_create_global_step() 71 generator_optimizer = tf.train.AdamOptimizer(0.001) 73 discriminator_optimizer = tf.train.AdamOptimizer(0.001) 91 self._report('train', start, measure_batches, batch_size)
|
mnist.py | 207 dataset: Dataset of images to train on. 274 tf.data.Dataset.from_tensor_slices(data.train.images).shuffle(60000) 281 'generator_optimizer': tf.train.AdamOptimizer(FLAGS.lr), 282 'discriminator_optimizer': tf.train.AdamOptimizer(FLAGS.lr), 283 'step_counter': tf.train.get_or_create_global_step(), 290 latest_cpkt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) 293 checkpoint = tf.train.Checkpoint(**model_objects)
|
/external/tensorflow/tensorflow/contrib/predictor/ |
testing_common.py | 81 def get_arithmetic_input_fn(core=True, train=False): 90 if train: 96 if train:
|
/external/tensorflow/tensorflow/contrib/session_bundle/example/ |
export_half_plus_two.py | 63 save = tf.train.Saver( 69 write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else 70 tf.train.SaverDef.V1)
|
/external/tensorflow/tensorflow/examples/how_tos/reading_data/ |
fully_connected_reader.py | 15 """Train and Eval the MNIST network. 18 to a TFRecords file containing tf.train.Example protocol buffers. 43 TRAIN_FILE = 'train.tfrecords' 84 def inputs(train, batch_size, num_epochs): 88 train: Selects between the training (True) and validation (False) data. 91 train forever. 107 if train else VALIDATION_FILE) 134 """Train MNIST for a number of steps.""" 140 train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) 148 # Add to the Graph operations that train the model [all...] |
/external/tensorflow/tensorflow/contrib/learn/python/learn/datasets/ |
mnist.py | 246 train = fake() 249 return base.Datasets(train=train, validation=validation, test=test) 254 TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' 255 TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' 290 train = DataSet(train_images, train_labels, **options) 294 return base.Datasets(train=train, validation=validation, test=test)
|
/external/tensorflow/tensorflow/examples/tf2_showcase/ |
mnist.py | 54 name='train_epochs', default=10, help='Number of epochs to train') 140 def train(model, optimizer, dataset, step_counter, log_interval=None, function 197 optimizer = tf.train.MomentumOptimizer( 201 train_dir = os.path.join(flags_obj.model_dir, 'summaries', 'train') 211 step_counter = tf.train.get_or_create_global_step() 212 checkpoint = tf.train.Checkpoint( 215 checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) 217 # Train and evaluate for a set number of epochs. 221 train(model, optimizer, train_ds, step_counter,
|
/external/tensorflow/tensorflow/lite/experimental/examples/lstm/ |
bidirectional_sequence_lstm_test.py | 30 # Number of steps to train model. 116 opt = tf.train.AdamOptimizer( 123 batch_x, batch_y = self.mnist.train.next_batch( 141 saver = tf.train.Saver() 146 b1, _ = self.mnist.train.next_batch(batch_size=1) 197 saver = tf.train.Saver() 215 saver = tf.train.Saver()
|
unidirectional_sequence_lstm_test.py | 29 # Number of steps to train model. 107 opt = tf.train.AdamOptimizer( 114 batch_x, batch_y = self.mnist.train.next_batch( 130 saver = tf.train.Saver() 135 b1, _ = self.mnist.train.next_batch(batch_size=1) 182 saver = tf.train.Saver() 200 saver = tf.train.Saver()
|
unidirectional_sequence_rnn_test.py | 32 # Number of steps to train model. 103 opt = tf.train.AdamOptimizer( 109 batch_x, batch_y = self.mnist.train.next_batch( 122 saver: saver created by tf.train.Saver() 142 saver = tf.train.Saver() 147 b1, _ = self.mnist.train.next_batch(batch_size=1) 190 saver = tf.train.Saver() 208 saver = tf.train.Saver()
|
bidirectional_sequence_rnn_test.py | 34 # Number of steps to train model. 135 opt = tf.train.AdamOptimizer( 142 batch_x, batch_y = self.mnist.train.next_batch( 165 saver = tf.train.Saver() 170 b1, _ = self.mnist.train.next_batch(batch_size=1) 218 saver = tf.train.Saver() 239 saver = tf.train.Saver() 262 saver = tf.train.Saver() 288 saver = tf.train.Saver()
|
/external/tensorflow/tensorflow/examples/get_started/regression/ |
imports85.py | 75 """Load the imports85 data as a (train,test) pair of `Dataset`. 84 A (train,test) pair of `Datasets` 141 train = (base_dataset 152 return train, test 196 # Split the data into train/test subsets.
|
/external/tensorflow/tensorflow/python/ops/ |
batch_norm_benchmark.py | 68 def build_graph(device, input_shape, axes, num_layers, mode, scale, train): 78 train: if true, also run backprop. 98 if train: 111 if train: 127 train, num_iters): 137 train: if true, also run backprop. 146 train) 154 print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" % 155 (device, len(input_shape), len(axes), num_layers, mode, scale, train, 161 "train_{train}") [all...] |
/external/tensorflow/tensorflow/contrib/eager/python/examples/revnet/ |
main_estimator.py | 15 """Estimator workflow with RevNet train on CIFAR-10.""" 36 mode: One of `ModeKeys.TRAIN`, `ModeKeys.EVAL` or 'ModeKeys.PREDICT' 50 if mode == tf.estimator.ModeKeys.TRAIN: 51 global_step = tf.train.get_or_create_global_step() 52 learning_rate = tf.train.piecewise_constant( 54 optimizer = tf.train.MomentumOptimizer( 98 split: One of `train`, `validation`, `train_all`, and `test` 106 if split == "train_all" or split == "train": 166 # Train and evaluate estimator 167 revnet_estimator.train(input_fn=train_input_fn [all...] |