Home | History | Annotate | Download | only in gan
      1 # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 
     16 from __future__ import absolute_import
     17 from __future__ import division
     18 from __future__ import print_function
     19 
     20 import tempfile
     21 import time
     22 
     23 import tensorflow as tf
     24 
     25 import tensorflow.contrib.eager as tfe
     26 from tensorflow.contrib.eager.python.examples.gan import mnist
     27 
     28 NOISE_DIM = 100
     29 # Big enough so that summaries are never recorded.
     30 # Lower this value if would like to benchmark with some summaries.
     31 SUMMARY_INTERVAL = 10000
     32 SUMMARY_FLUSH_MS = 100  # Flush summaries every 100ms
     33 
     34 
     35 def data_format():
     36   return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
     37 
     38 
     39 def device():
     40   return '/gpu:0' if tfe.num_gpus() else '/cpu:0'
     41 
     42 
     43 class MnistEagerGanBenchmark(tf.test.Benchmark):
     44 
     45   def _report(self, test_name, start, num_iters, batch_size):
     46     avg_time = (time.time() - start) / num_iters
     47     dev = 'gpu' if tfe.num_gpus() else 'cpu'
     48     name = 'eager_%s_%s_batch_%d_%s' % (test_name, dev, batch_size,
     49                                         data_format())
     50     extras = {'examples_per_sec': batch_size / avg_time}
     51     self.report_benchmark(
     52         iters=num_iters, wall_time=avg_time, name=name, extras=extras)
     53 
     54   def benchmark_train(self):
     55     for batch_size in [64, 128, 256]:
     56       # Generate some random data.
     57       burn_batches, measure_batches = (3, 100)
     58       burn_images = [tf.random_normal([batch_size, 784])
     59                      for _ in range(burn_batches)]
     60       burn_dataset = tf.data.Dataset.from_tensor_slices(burn_images)
     61       measure_images = [tf.random_normal([batch_size, 784])
     62                         for _ in range(measure_batches)]
     63       measure_dataset = tf.data.Dataset.from_tensor_slices(measure_images)
     64 
     65       tf.train.get_or_create_global_step()
     66       with tf.device(device()):
     67         # Create the models and optimizers
     68         generator = mnist.Generator(data_format())
     69         discriminator = mnist.Discriminator(data_format())
     70         with tf.variable_scope('generator'):
     71           generator_optimizer = tf.train.AdamOptimizer(0.001)
     72         with tf.variable_scope('discriminator'):
     73           discriminator_optimizer = tf.train.AdamOptimizer(0.001)
     74 
     75         with tf.contrib.summary.create_file_writer(
     76             tempfile.mkdtemp(), flush_millis=SUMMARY_FLUSH_MS).as_default():
     77 
     78           # warm up
     79           mnist.train_one_epoch(generator, discriminator, generator_optimizer,
     80                                 discriminator_optimizer,
     81                                 burn_dataset, log_interval=SUMMARY_INTERVAL,
     82                                 noise_dim=NOISE_DIM)
     83           # measure
     84           start = time.time()
     85           mnist.train_one_epoch(generator, discriminator, generator_optimizer,
     86                                 discriminator_optimizer,
     87                                 measure_dataset, log_interval=SUMMARY_INTERVAL,
     88                                 noise_dim=NOISE_DIM)
     89           self._report('train', start, measure_batches, batch_size)
     90 
     91   def benchmark_generate(self):
     92     for batch_size in [64, 128, 256]:
     93       with tf.device(device()):
     94         # Using random weights. This will generate garbage.
     95         generator = mnist.Generator(data_format())
     96 
     97         num_burn, num_iters = (30, 1000)
     98         for _ in range(num_burn):
     99           noise = tf.random_uniform(shape=[batch_size, NOISE_DIM],
    100                                     minval=-1., maxval=1.)
    101           generator(noise)
    102 
    103         start = time.time()
    104         for _ in range(num_iters):
    105           noise = tf.random_uniform(shape=[batch_size, NOISE_DIM],
    106                                     minval=-1., maxval=1.)
    107           generator(noise)
    108         self._report('generate', start, num_iters, batch_size)
    109 
    110 
    111 if __name__ == '__main__':
    112   tfe.enable_eager_execution()
    113   tf.test.main()
    114