Home | History | Annotate | Download | only in training
      1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 
     16 """GradientDescent for TensorFlow."""
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 from tensorflow.python.framework import ops
     22 from tensorflow.python.ops import math_ops
     23 from tensorflow.python.ops import resource_variable_ops
     24 from tensorflow.python.training import optimizer
     25 from tensorflow.python.training import training_ops
     26 from tensorflow.python.util.tf_export import tf_export
     27 
     28 
     29 @tf_export("train.GradientDescentOptimizer")
     30 class GradientDescentOptimizer(optimizer.Optimizer):
     31   """Optimizer that implements the gradient descent algorithm.
     32   """
     33 
     34   def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
     35     """Construct a new gradient descent optimizer.
     36 
     37     Args:
     38       learning_rate: A Tensor or a floating point value.  The learning
     39         rate to use.
     40       use_locking: If True use locks for update operations.
     41       name: Optional name prefix for the operations created when applying
     42         gradients. Defaults to "GradientDescent".
     43     """
     44     super(GradientDescentOptimizer, self).__init__(use_locking, name)
     45     self._learning_rate = learning_rate
     46 
     47   def _apply_dense(self, grad, var):
     48     return training_ops.apply_gradient_descent(
     49         var,
     50         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
     51         grad,
     52         use_locking=self._use_locking).op
     53 
     54   def _resource_apply_dense(self, grad, handle):
     55     return training_ops.resource_apply_gradient_descent(
     56         handle.handle, math_ops.cast(self._learning_rate_tensor,
     57                                      grad.dtype.base_dtype),
     58         grad, use_locking=self._use_locking)
     59 
     60   def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
     61     return resource_variable_ops.resource_scatter_add(
     62         handle.handle, indices, -grad * self._learning_rate)
     63 
     64   def _apply_sparse_duplicate_indices(self, grad, var):
     65     delta = ops.IndexedSlices(
     66         grad.values *
     67         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
     68         grad.indices, grad.dense_shape)
     69     return var.scatter_sub(delta, use_locking=self._use_locking)
     70 
     71   def _prepare(self):
     72     self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
     73                                                        name="learning_rate")
     74