Home | History | Annotate | Download | only in keras
      1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 """Tests for Keras loss functions."""
     16 
     17 from __future__ import absolute_import
     18 from __future__ import division
     19 from __future__ import print_function
     20 
     21 import os
     22 import shutil
     23 
     24 import numpy as np
     25 
     26 from tensorflow.python import keras
     27 from tensorflow.python.framework import constant_op
     28 from tensorflow.python.framework import dtypes
     29 from tensorflow.python.framework import test_util
     30 from tensorflow.python.keras.utils import losses_utils
     31 from tensorflow.python.platform import test
     32 
     33 try:
     34   import h5py  # pylint:disable=g-import-not-at-top
     35 except ImportError:
     36   h5py = None
     37 
     38 ALL_LOSSES = [keras.losses.mean_squared_error,
     39               keras.losses.mean_absolute_error,
     40               keras.losses.mean_absolute_percentage_error,
     41               keras.losses.mean_squared_logarithmic_error,
     42               keras.losses.squared_hinge,
     43               keras.losses.hinge,
     44               keras.losses.categorical_crossentropy,
     45               keras.losses.binary_crossentropy,
     46               keras.losses.kullback_leibler_divergence,
     47               keras.losses.poisson,
     48               keras.losses.cosine_similarity,
     49               keras.losses.logcosh,
     50               keras.losses.categorical_hinge]
     51 
     52 
     53 class _MSEMAELoss(object):
     54   """Loss function with internal state, for testing serialization code."""
     55 
     56   def __init__(self, mse_fraction):
     57     self.mse_fraction = mse_fraction
     58 
     59   def __call__(self, y_true, y_pred, sample_weight=None):
     60     return (self.mse_fraction * keras.losses.mse(y_true, y_pred) +
     61             (1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred))
     62 
     63   def get_config(self):
     64     return {'mse_fraction': self.mse_fraction}
     65 
     66 
     67 class KerasLossesTest(test.TestCase):
     68 
     69   def test_objective_shapes_3d(self):
     70     with self.cached_session():
     71       y_a = keras.backend.variable(np.random.random((5, 6, 7)))
     72       y_b = keras.backend.variable(np.random.random((5, 6, 7)))
     73       for obj in ALL_LOSSES:
     74         objective_output = obj(y_a, y_b)
     75         self.assertListEqual(objective_output.get_shape().as_list(), [5, 6])
     76 
     77   def test_objective_shapes_2d(self):
     78     with self.cached_session():
     79       y_a = keras.backend.variable(np.random.random((6, 7)))
     80       y_b = keras.backend.variable(np.random.random((6, 7)))
     81       for obj in ALL_LOSSES:
     82         objective_output = obj(y_a, y_b)
     83         self.assertListEqual(objective_output.get_shape().as_list(), [6,])
     84 
     85   def test_cce_one_hot(self):
     86     with self.cached_session():
     87       y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
     88       y_b = keras.backend.variable(np.random.random((5, 6, 7)))
     89       objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
     90       assert keras.backend.eval(objective_output).shape == (5, 6)
     91 
     92       y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
     93       y_b = keras.backend.variable(np.random.random((6, 7)))
     94       objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
     95       assert keras.backend.eval(objective_output).shape == (6,)
     96 
     97   @test_util.run_in_graph_and_eager_modes
     98   def test_categorical_crossentropy_loss(self):
     99     target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
    100     logits = keras.backend.variable(np.random.random((5, 1)))
    101     softmax_output = keras.backend.softmax(logits)
    102     output_from_logit = keras.losses.categorical_crossentropy(
    103         target, logits, from_logits=True)
    104     output_from_softmax = keras.losses.categorical_crossentropy(
    105         target, softmax_output)
    106     np.testing.assert_allclose(
    107         keras.backend.eval(output_from_logit),
    108         keras.backend.eval(output_from_softmax), atol=1e-5)
    109 
    110   @test_util.run_in_graph_and_eager_modes
    111   def test_sparse_categorical_crossentropy_loss(self):
    112     target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
    113     logits = keras.backend.variable(np.random.random((5, 1)))
    114     softmax_output = keras.backend.softmax(logits)
    115     output_from_logit = keras.losses.sparse_categorical_crossentropy(
    116         target, logits, from_logits=True)
    117     output_from_softmax = keras.losses.sparse_categorical_crossentropy(
    118         target, softmax_output)
    119     np.testing.assert_allclose(
    120         keras.backend.eval(output_from_logit),
    121         keras.backend.eval(output_from_softmax), atol=1e-5)
    122 
    123   @test_util.run_in_graph_and_eager_modes
    124   def test_binary_crossentropy_loss(self):
    125     target = keras.backend.variable(np.random.randint(0, 1, (5, 1)))
    126     logits = keras.backend.variable(np.random.random((5, 1)))
    127     sigmoid_output = keras.backend.sigmoid(logits)
    128     output_from_logit = keras.losses.binary_crossentropy(
    129         target, logits, from_logits=True)
    130     output_from_sigmoid = keras.losses.binary_crossentropy(
    131         target, sigmoid_output)
    132     np.testing.assert_allclose(
    133         keras.backend.eval(output_from_logit),
    134         keras.backend.eval(output_from_sigmoid), atol=1e-5)
    135 
    136   def test_serialization(self):
    137     fn = keras.losses.get('mse')
    138     config = keras.losses.serialize(fn)
    139     new_fn = keras.losses.deserialize(config)
    140     self.assertEqual(fn, new_fn)
    141 
    142   def test_categorical_hinge(self):
    143     y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
    144                                               [0.1, 0.2, 0.7]]))
    145     y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
    146     expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
    147     loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
    148     self.assertAllClose(expected_loss, np.mean(loss))
    149 
    150   def test_serializing_loss_class(self):
    151     orig_loss_class = _MSEMAELoss(0.3)
    152     with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
    153       serialized = keras.losses.serialize(orig_loss_class)
    154 
    155     with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
    156       deserialized = keras.losses.deserialize(serialized)
    157     assert isinstance(deserialized, _MSEMAELoss)
    158     assert deserialized.mse_fraction == 0.3
    159 
    160   def test_serializing_model_with_loss_class(self):
    161     tmpdir = self.get_temp_dir()
    162     self.addCleanup(shutil.rmtree, tmpdir)
    163     model_filename = os.path.join(tmpdir, 'custom_loss.h5')
    164 
    165     with self.cached_session():
    166       with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
    167         loss = _MSEMAELoss(0.3)
    168         inputs = keras.layers.Input((2,))
    169         outputs = keras.layers.Dense(1, name='model_output')(inputs)
    170         model = keras.models.Model(inputs, outputs)
    171         model.compile(optimizer='sgd', loss={'model_output': loss})
    172         model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
    173 
    174         if h5py is None:
    175           return
    176 
    177         model.save(model_filename)
    178 
    179       with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
    180         loaded_model = keras.models.load_model(model_filename)
    181         loaded_model.predict(np.random.rand(128, 2))
    182 
    183   def test_loss_wrapper(self):
    184     loss_fn = keras.losses.get('mse')
    185     mse_obj = keras.losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__)
    186 
    187     self.assertEqual(mse_obj.name, 'mean_squared_error')
    188     self.assertEqual(mse_obj.reduction,
    189                      losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)
    190 
    191     y_true = constant_op.constant([[1., 9.], [2., 5.]])
    192     y_pred = constant_op.constant([[4., 8.], [12., 3.]])
    193     sample_weight = constant_op.constant([1.2, 0.5])
    194     loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
    195 
    196     # mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
    197     # mse = [5, 52]
    198     # weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
    199     # reduced_weighted_mse = (6 + 26) / 2 =
    200     self.assertAllClose(self.evaluate(loss), 16, 1e-2)
    201 
    202 
    203 @test_util.run_all_in_graph_and_eager_modes
    204 class MeanSquaredErrorTest(test.TestCase):
    205 
    206   def test_config(self):
    207     mse_obj = keras.losses.MeanSquaredError(
    208         reduction=losses_utils.ReductionV2.SUM, name='mse_1')
    209     self.assertEqual(mse_obj.name, 'mse_1')
    210     self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM)
    211 
    212   def test_all_correct_unweighted(self):
    213     mse_obj = keras.losses.MeanSquaredError()
    214     y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
    215     loss = mse_obj(y_true, y_true)
    216     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    217 
    218   def test_unweighted(self):
    219     mse_obj = keras.losses.MeanSquaredError()
    220     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    221     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    222                                   shape=(2, 3),
    223                                   dtype=dtypes.float32)
    224     loss = mse_obj(y_true, y_pred)
    225     self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
    226 
    227   def test_scalar_weighted(self):
    228     mse_obj = keras.losses.MeanSquaredError()
    229     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    230     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    231                                   shape=(2, 3),
    232                                   dtype=dtypes.float32)
    233     loss = mse_obj(y_true, y_pred, sample_weight=2.3)
    234     self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
    235 
    236   def test_sample_weighted(self):
    237     mse_obj = keras.losses.MeanSquaredError()
    238     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    239     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    240                                   shape=(2, 3),
    241                                   dtype=dtypes.float32)
    242     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    243     loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
    244     self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
    245 
    246   def test_timestep_weighted(self):
    247     mse_obj = keras.losses.MeanSquaredError()
    248     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    249     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    250                                   shape=(2, 3, 1),
    251                                   dtype=dtypes.float32)
    252     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
    253     loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
    254     self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
    255 
    256   def test_zero_weighted(self):
    257     mse_obj = keras.losses.MeanSquaredError()
    258     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    259     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    260                                   shape=(2, 3),
    261                                   dtype=dtypes.float32)
    262     loss = mse_obj(y_true, y_pred, sample_weight=0)
    263     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    264 
    265   def test_invalid_sample_weight(self):
    266     mse_obj = keras.losses.MeanSquaredError()
    267     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    268     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
    269     sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
    270     with self.assertRaisesRegexp(
    271         ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
    272       mse_obj(y_true, y_pred, sample_weight=sample_weight)
    273 
    274   def test_no_reduction(self):
    275     mse_obj = keras.losses.MeanSquaredError(
    276         reduction=losses_utils.ReductionV2.NONE)
    277     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    278     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    279                                   shape=(2, 3),
    280                                   dtype=dtypes.float32)
    281     loss = mse_obj(y_true, y_pred, sample_weight=2.3)
    282     loss = self.evaluate(loss)
    283     self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
    284 
    285   def test_sum_reduction(self):
    286     mse_obj = keras.losses.MeanSquaredError(
    287         reduction=losses_utils.ReductionV2.SUM)
    288     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    289     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    290                                   shape=(2, 3),
    291                                   dtype=dtypes.float32)
    292     loss = mse_obj(y_true, y_pred, sample_weight=2.3)
    293     self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
    294 
    295 
    296 @test_util.run_all_in_graph_and_eager_modes
    297 class MeanAbsoluteErrorTest(test.TestCase):
    298 
    299   def test_config(self):
    300     mae_obj = keras.losses.MeanAbsoluteError(
    301         reduction=losses_utils.ReductionV2.SUM, name='mae_1')
    302     self.assertEqual(mae_obj.name, 'mae_1')
    303     self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM)
    304 
    305   def test_all_correct_unweighted(self):
    306     mae_obj = keras.losses.MeanAbsoluteError()
    307     y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
    308     loss = mae_obj(y_true, y_true)
    309     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    310 
    311   def test_unweighted(self):
    312     mae_obj = keras.losses.MeanAbsoluteError()
    313     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    314     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    315                                   shape=(2, 3),
    316                                   dtype=dtypes.float32)
    317     loss = mae_obj(y_true, y_pred)
    318     self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
    319 
    320   def test_scalar_weighted(self):
    321     mae_obj = keras.losses.MeanAbsoluteError()
    322     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    323     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    324                                   shape=(2, 3),
    325                                   dtype=dtypes.float32)
    326     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
    327     self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
    328 
    329   def test_sample_weighted(self):
    330     mae_obj = keras.losses.MeanAbsoluteError()
    331     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    332     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    333                                   shape=(2, 3),
    334                                   dtype=dtypes.float32)
    335     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    336     loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
    337     self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
    338 
    339   def test_timestep_weighted(self):
    340     mae_obj = keras.losses.MeanAbsoluteError()
    341     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    342     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    343                                   shape=(2, 3, 1),
    344                                   dtype=dtypes.float32)
    345     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
    346     loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
    347     self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
    348 
    349   def test_zero_weighted(self):
    350     mae_obj = keras.losses.MeanAbsoluteError()
    351     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    352     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    353                                   shape=(2, 3),
    354                                   dtype=dtypes.float32)
    355     loss = mae_obj(y_true, y_pred, sample_weight=0)
    356     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    357 
    358   def test_invalid_sample_weight(self):
    359     mae_obj = keras.losses.MeanAbsoluteError()
    360     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    361     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
    362     sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
    363     with self.assertRaisesRegexp(
    364         ValueError, r'Shapes \(2, 2\) and \(2, 3\) are incompatible'):
    365       mae_obj(y_true, y_pred, sample_weight=sample_weight)
    366 
    367   def test_no_reduction(self):
    368     mae_obj = keras.losses.MeanAbsoluteError(
    369         reduction=losses_utils.ReductionV2.NONE)
    370     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    371     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    372                                   shape=(2, 3),
    373                                   dtype=dtypes.float32)
    374     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
    375     loss = self.evaluate(loss)
    376     self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
    377 
    378   def test_sum_reduction(self):
    379     mae_obj = keras.losses.MeanAbsoluteError(
    380         reduction=losses_utils.ReductionV2.SUM)
    381     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    382     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    383                                   shape=(2, 3),
    384                                   dtype=dtypes.float32)
    385     loss = mae_obj(y_true, y_pred, sample_weight=2.3)
    386     self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
    387 
    388 
    389 @test_util.run_all_in_graph_and_eager_modes
    390 class MeanAbsolutePercentageErrorTest(test.TestCase):
    391 
    392   def test_config(self):
    393     mape_obj = keras.losses.MeanAbsolutePercentageError(
    394         reduction=losses_utils.ReductionV2.SUM, name='mape_1')
    395     self.assertEqual(mape_obj.name, 'mape_1')
    396     self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM)
    397 
    398   def test_unweighted(self):
    399     mape_obj = keras.losses.MeanAbsolutePercentageError()
    400     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    401     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    402                                   shape=(2, 3),
    403                                   dtype=dtypes.float32)
    404     loss = mape_obj(y_true, y_pred)
    405     self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
    406 
    407   def test_scalar_weighted(self):
    408     mape_obj = keras.losses.MeanAbsolutePercentageError()
    409     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    410     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    411                                   shape=(2, 3),
    412                                   dtype=dtypes.float32)
    413     loss = mape_obj(y_true, y_pred, sample_weight=2.3)
    414     self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
    415 
    416   def test_sample_weighted(self):
    417     mape_obj = keras.losses.MeanAbsolutePercentageError()
    418     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    419     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    420                                   shape=(2, 3),
    421                                   dtype=dtypes.float32)
    422     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    423     loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
    424     self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
    425 
    426   def test_timestep_weighted(self):
    427     mape_obj = keras.losses.MeanAbsolutePercentageError()
    428     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    429     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    430                                   shape=(2, 3, 1),
    431                                   dtype=dtypes.float32)
    432     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
    433     loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
    434     self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
    435 
    436   def test_zero_weighted(self):
    437     mape_obj = keras.losses.MeanAbsolutePercentageError()
    438     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    439     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    440                                   shape=(2, 3),
    441                                   dtype=dtypes.float32)
    442     loss = mape_obj(y_true, y_pred, sample_weight=0)
    443     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    444 
    445 
    446 @test_util.run_all_in_graph_and_eager_modes
    447 class MeanSquaredLogarithmicErrorTest(test.TestCase):
    448 
    449   def test_config(self):
    450     msle_obj = keras.losses.MeanSquaredLogarithmicError(
    451         reduction=losses_utils.ReductionV2.SUM, name='mape_1')
    452     self.assertEqual(msle_obj.name, 'mape_1')
    453     self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM)
    454 
    455   def test_unweighted(self):
    456     msle_obj = keras.losses.MeanSquaredLogarithmicError()
    457     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    458     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    459                                   shape=(2, 3),
    460                                   dtype=dtypes.float32)
    461     loss = msle_obj(y_true, y_pred)
    462     self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
    463 
    464   def test_scalar_weighted(self):
    465     msle_obj = keras.losses.MeanSquaredLogarithmicError()
    466     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    467     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    468                                   shape=(2, 3),
    469                                   dtype=dtypes.float32)
    470     loss = msle_obj(y_true, y_pred, sample_weight=2.3)
    471     self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
    472 
    473   def test_sample_weighted(self):
    474     msle_obj = keras.losses.MeanSquaredLogarithmicError()
    475     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    476     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    477                                   shape=(2, 3),
    478                                   dtype=dtypes.float32)
    479     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    480     loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
    481     self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
    482 
    483   def test_timestep_weighted(self):
    484     msle_obj = keras.losses.MeanSquaredLogarithmicError()
    485     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
    486     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    487                                   shape=(2, 3, 1),
    488                                   dtype=dtypes.float32)
    489     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
    490     loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
    491     self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
    492 
    493   def test_zero_weighted(self):
    494     msle_obj = keras.losses.MeanSquaredLogarithmicError()
    495     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
    496     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
    497                                   shape=(2, 3),
    498                                   dtype=dtypes.float32)
    499     loss = msle_obj(y_true, y_pred, sample_weight=0)
    500     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    501 
    502 
    503 @test_util.run_all_in_graph_and_eager_modes
    504 class CosineSimilarityTest(test.TestCase):
    505 
    506   def l2_norm(self, x, axis):
    507     epsilon = 1e-12
    508     square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
    509     x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
    510     return np.multiply(x, x_inv_norm)
    511 
    512   def setup(self, axis=1):
    513     self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
    514     self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
    515 
    516     y_true = self.l2_norm(self.np_y_true, axis)
    517     y_pred = self.l2_norm(self.np_y_pred, axis)
    518     self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
    519 
    520     self.y_true = constant_op.constant(self.np_y_true)
    521     self.y_pred = constant_op.constant(self.np_y_pred)
    522 
    523   def test_config(self):
    524     cosine_obj = keras.losses.CosineSimilarity(
    525         axis=2, reduction=losses_utils.ReductionV2.SUM, name='cosine_loss')
    526     self.assertEqual(cosine_obj.name, 'cosine_loss')
    527     self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM)
    528 
    529   def test_unweighted(self):
    530     self.setup()
    531     cosine_obj = keras.losses.CosineSimilarity()
    532     loss = cosine_obj(self.y_true, self.y_pred)
    533     expected_loss = np.mean(self.expected_loss)
    534     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
    535 
    536   def test_scalar_weighted(self):
    537     self.setup()
    538     cosine_obj = keras.losses.CosineSimilarity()
    539     sample_weight = 2.3
    540     loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
    541     expected_loss = np.mean(self.expected_loss * sample_weight)
    542     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
    543 
    544   def test_sample_weighted(self):
    545     self.setup()
    546     cosine_obj = keras.losses.CosineSimilarity()
    547     sample_weight = np.asarray([1.2, 3.4])
    548     loss = cosine_obj(
    549         self.y_true,
    550         self.y_pred,
    551         sample_weight=constant_op.constant(sample_weight))
    552     expected_loss = np.mean(self.expected_loss * sample_weight)
    553     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
    554 
    555   def test_timestep_weighted(self):
    556     self.setup()
    557     cosine_obj = keras.losses.CosineSimilarity()
    558     np_y_true = self.np_y_true.reshape((2, 3, 1))
    559     np_y_pred = self.np_y_pred.reshape((2, 3, 1))
    560     sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
    561 
    562     y_true = self.l2_norm(np_y_true, 2)
    563     y_pred = self.l2_norm(np_y_pred, 2)
    564     expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
    565 
    566     y_true = constant_op.constant(np_y_true)
    567     y_pred = constant_op.constant(np_y_pred)
    568     loss = cosine_obj(
    569         y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
    570 
    571     expected_loss = np.mean(expected_loss * sample_weight)
    572     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
    573 
    574   def test_zero_weighted(self):
    575     self.setup()
    576     cosine_obj = keras.losses.CosineSimilarity()
    577     loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
    578     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
    579 
    580   def test_axis(self):
    581     self.setup(axis=1)
    582     cosine_obj = keras.losses.CosineSimilarity(axis=1)
    583     loss = cosine_obj(self.y_true, self.y_pred)
    584     expected_loss = np.mean(self.expected_loss)
    585     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
    586 
    587 
    588 @test_util.run_all_in_graph_and_eager_modes
    589 class BinaryCrossentropyTest(test.TestCase):
    590 
    591   def test_config(self):
    592     bce_obj = keras.losses.BinaryCrossentropy(
    593         reduction=losses_utils.ReductionV2.SUM, name='bce_1')
    594     self.assertEqual(bce_obj.name, 'bce_1')
    595     self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM)
    596 
    597   def test_all_correct_unweighted(self):
    598     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
    599                                   dtype=dtypes.float32)
    600     bce_obj = keras.losses.BinaryCrossentropy()
    601     loss = bce_obj(y_true, y_true)
    602     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    603 
    604     # Test with logits.
    605     logits = constant_op.constant([[100.0, -100.0, -100.0],
    606                                    [-100.0, 100.0, -100.0],
    607                                    [-100.0, -100.0, 100.0]])
    608     bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
    609     loss = bce_obj(y_true, logits)
    610     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    611 
    612   def test_unweighted(self):
    613     y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
    614     y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
    615     bce_obj = keras.losses.BinaryCrossentropy()
    616     loss = bce_obj(y_true, y_pred)
    617 
    618     # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
    619     # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
    620     # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
    621 
    622     # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
    623     #      = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
    624     #         -log(Y_MAX + EPSILON), -log(1)]
    625     #      = [0, 15.33, 0, 0]
    626     # Reduced loss = 15.33 / 4
    627 
    628     self.assertAlmostEqual(self.evaluate(loss), 3.833, 3)
    629 
    630     # Test with logits.
    631     y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
    632     logits = constant_op.constant([[100.0, -100.0, 100.0],
    633                                    [100.0, 100.0, -100.0]])
    634     bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
    635     loss = bce_obj(y_true, logits)
    636 
    637     # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
    638     #            (where x = logits and z = y_true)
    639     #      = [((100 - 100 * 1 + log(1 + exp(-100))) +
    640     #          (0 + 100 * 0 + log(1 + exp(-100))) +
    641     #          (100 - 100 * 1 + log(1 + exp(-100))),
    642     #         ((100 - 100 * 0 + log(1 + exp(-100))) +
    643     #          (100 - 100 * 1 + log(1 + exp(-100))) +
    644     #          (0 + 100 * 1 + log(1 + exp(-100))))]
    645     #      = [(0 + 0 + 0) / 3, 200 / 3]
    646     # Reduced loss = (0 + 66.666) / 2
    647 
    648     self.assertAlmostEqual(self.evaluate(loss), 33.333, 3)
    649 
    650   def test_scalar_weighted(self):
    651     bce_obj = keras.losses.BinaryCrossentropy()
    652     y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
    653     y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
    654     loss = bce_obj(y_true, y_pred, sample_weight=2.3)
    655 
    656     # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
    657     # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
    658     # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
    659 
    660     # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
    661     #      = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
    662     #         -log(Y_MAX + EPSILON), -log(1)]
    663     #      = [0, 15.33, 0, 0]
    664     # Weighted loss = [0, 15.33 * 2.3, 0, 0]
    665     # Reduced loss = 15.33 * 2.3 / 4
    666 
    667     self.assertAlmostEqual(self.evaluate(loss), 8.817, 3)
    668 
    669     # Test with logits.
    670     y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
    671     logits = constant_op.constant([[100.0, -100.0, 100.0],
    672                                    [100.0, 100.0, -100.0]])
    673     bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
    674     loss = bce_obj(y_true, logits, sample_weight=2.3)
    675 
    676     # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
    677     #            (where x = logits and z = y_true)
    678     # Loss = [(0 + 0 + 0) / 3, 200 / 3]
    679     # Weighted loss = [0 * 2.3, 66.666 * 2.3]
    680     # Reduced loss = (0 + 66.666 * 2.3) / 2
    681 
    682     self.assertAlmostEqual(self.evaluate(loss), 76.667, 3)
    683 
    684   def test_sample_weighted(self):
    685     bce_obj = keras.losses.BinaryCrossentropy()
    686     y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
    687     y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
    688     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    689     loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
    690 
    691     # EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
    692     # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
    693     # y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
    694 
    695     # Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
    696     #      = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
    697     #         -log(Y_MAX + EPSILON), -log(1)]
    698     #      = [0, 15.33, 0, 0]
    699     # Reduced loss = 15.33 * 1.2 / 4
    700 
    701     self.assertAlmostEqual(self.evaluate(loss), 4.6, 3)
    702 
    703     # Test with logits.
    704     y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
    705     logits = constant_op.constant([[100.0, -100.0, 100.0],
    706                                    [100.0, 100.0, -100.0]])
    707     weights = constant_op.constant([4, 3])
    708     bce_obj = keras.losses.BinaryCrossentropy(from_logits=True)
    709     loss = bce_obj(y_true, logits, sample_weight=weights)
    710 
    711     # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
    712     #            (where x = logits and z = y_true)
    713     # Loss = [(0 + 0 + 0)/3, 200 / 3]
    714     # Weighted loss = [0 * 4, 66.666 * 3]
    715     # Reduced loss = (0 + 66.666 * 3) / 2
    716 
    717     self.assertAlmostEqual(self.evaluate(loss), 100, 3)
    718 
    719   def test_no_reduction(self):
    720     y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
    721     logits = constant_op.constant([[100.0, -100.0, 100.0],
    722                                    [100.0, 100.0, -100.0]])
    723     bce_obj = keras.losses.BinaryCrossentropy(
    724         from_logits=True, reduction=losses_utils.ReductionV2.NONE)
    725     loss = bce_obj(y_true, logits)
    726 
    727     # Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
    728     #            (where x = logits and z = y_true)
    729     # Loss = [(0 + 0 + 0)/3, (200)/3]
    730 
    731     self.assertAllClose((0., 66.6666), self.evaluate(loss), 3)
    732 
    733   def test_label_smoothing(self):
    734     logits = constant_op.constant([[100.0, -100.0, -100.0]])
    735     y_true = constant_op.constant([[1, 0, 1]])
    736     label_smoothing = 0.1
    737     # Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
    738     #            (where x = logits and z = y_true)
    739     # Label smoothing: z' = z * (1 - L) + 0.5L
    740     #                  1  = 1 - 0.5L
    741     #                  0  = 0.5L
    742     # Applying the above two fns to the given input:
    743     # (100 - 100 * (1 - 0.5 L)  + 0 +
    744     #  0   + 100 * (0.5 L)      + 0 +
    745     #  0   + 100 * (1 - 0.5 L)  + 0) * (1/3)
    746     #  = (100 + 50L) * 1/3
    747     bce_obj = keras.losses.BinaryCrossentropy(
    748         from_logits=True, label_smoothing=label_smoothing)
    749     loss = bce_obj(y_true, logits)
    750     expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
    751     self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
    752 
    753 
    754 @test_util.run_all_in_graph_and_eager_modes
    755 class CategoricalCrossentropyTest(test.TestCase):
    756 
    757   def test_config(self):
    758     cce_obj = keras.losses.CategoricalCrossentropy(
    759         reduction=losses_utils.ReductionV2.SUM, name='bce_1')
    760     self.assertEqual(cce_obj.name, 'bce_1')
    761     self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
    762 
    763   def test_all_correct_unweighted(self):
    764     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
    765                                   dtype=dtypes.int64)
    766     y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
    767                                   dtype=dtypes.float32)
    768     cce_obj = keras.losses.CategoricalCrossentropy()
    769     loss = cce_obj(y_true, y_pred)
    770     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    771 
    772     # Test with logits.
    773     logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
    774     cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
    775     loss = cce_obj(y_true, logits)
    776     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    777 
    778   def test_unweighted(self):
    779     cce_obj = keras.losses.CategoricalCrossentropy()
    780     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    781     y_pred = constant_op.constant(
    782         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    783     loss = cce_obj(y_true, y_pred)
    784     self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
    785 
    786     # Test with logits.
    787     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    788     cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
    789     loss = cce_obj(y_true, logits)
    790     self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
    791 
    792   def test_scalar_weighted(self):
    793     cce_obj = keras.losses.CategoricalCrossentropy()
    794     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    795     y_pred = constant_op.constant(
    796         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    797     loss = cce_obj(y_true, y_pred, sample_weight=2.3)
    798     self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
    799 
    800     # Test with logits.
    801     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    802     cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
    803     loss = cce_obj(y_true, logits, sample_weight=2.3)
    804     self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
    805 
    806   def test_sample_weighted(self):
    807     cce_obj = keras.losses.CategoricalCrossentropy()
    808     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    809     y_pred = constant_op.constant(
    810         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    811     sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
    812     loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
    813     self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
    814 
    815     # Test with logits.
    816     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    817     cce_obj = keras.losses.CategoricalCrossentropy(from_logits=True)
    818     loss = cce_obj(y_true, logits, sample_weight=sample_weight)
    819     self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
    820 
    821   def test_no_reduction(self):
    822     y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    823     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    824     cce_obj = keras.losses.CategoricalCrossentropy(
    825         from_logits=True, reduction=losses_utils.ReductionV2.NONE)
    826     loss = cce_obj(y_true, logits)
    827     self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
    828 
    829   def test_label_smoothing(self):
    830     logits = constant_op.constant([[100.0, -100.0, -100.0]])
    831     y_true = constant_op.constant([[1, 0, 0]])
    832     label_smoothing = 0.1
    833     # Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
    834     # where for a softmax activation
    835     # \log q_i = x_i - \log \sum_j \exp x_j
    836     #          = x_i - x_max - \log \sum_j \exp (x_j - x_max)
    837     # For our activations, [100, -100, -100]
    838     # \log ( exp(0) + exp(-200) + exp(-200) ) = 0
    839     # so our log softmaxes become: [0, -200, -200]
    840     # Label smoothing: z' = z * (1 - L) + L/n
    841     #                  1  = 1 - L + L/n
    842     #                  0  = L/n
    843     # Applying the above two fns to the given input:
    844     # -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
    845     cce_obj = keras.losses.CategoricalCrossentropy(
    846         from_logits=True, label_smoothing=label_smoothing)
    847     loss = cce_obj(y_true, logits)
    848     expected_value = 400.0 * label_smoothing / 3.0
    849     self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
    850 
    851 
    852 @test_util.run_all_in_graph_and_eager_modes
    853 class SparseCategoricalCrossentropyTest(test.TestCase):
    854 
    855   def test_all_correct_unweighted(self):
    856     y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
    857     y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
    858                                   dtype=dtypes.float32)
    859     cce_obj = keras.losses.SparseCategoricalCrossentropy()
    860     loss = cce_obj(y_true, y_pred)
    861     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    862 
    863     # Test with logits.
    864     logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
    865     cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    866     loss = cce_obj(y_true, logits)
    867     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
    868 
    869   def test_unweighted(self):
    870     cce_obj = keras.losses.SparseCategoricalCrossentropy()
    871     y_true = constant_op.constant([0, 1, 2])
    872     y_pred = constant_op.constant(
    873         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    874     loss = cce_obj(y_true, y_pred)
    875     self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
    876 
    877     # Test with logits.
    878     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    879     cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    880     loss = cce_obj(y_true, logits)
    881     self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
    882 
    883   def test_scalar_weighted(self):
    884     cce_obj = keras.losses.SparseCategoricalCrossentropy()
    885     y_true = constant_op.constant([[0], [1], [2]])
    886     y_pred = constant_op.constant(
    887         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    888     loss = cce_obj(y_true, y_pred, sample_weight=2.3)
    889     self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
    890 
    891     # Test with logits.
    892     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    893     cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    894     loss = cce_obj(y_true, logits, sample_weight=2.3)
    895     self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
    896 
    897   def test_sample_weighted(self):
    898     cce_obj = keras.losses.SparseCategoricalCrossentropy()
    899     y_true = constant_op.constant([[0], [1], [2]])
    900     y_pred = constant_op.constant(
    901         [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
    902     sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
    903     loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
    904     self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
    905 
    906     # Test with logits.
    907     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    908     cce_obj = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    909     loss = cce_obj(y_true, logits, sample_weight=sample_weight)
    910     self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
    911 
    912   def test_no_reduction(self):
    913     y_true = constant_op.constant([[0], [1], [2]])
    914     logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
    915     cce_obj = keras.losses.SparseCategoricalCrossentropy(
    916         from_logits=True, reduction=losses_utils.ReductionV2.NONE)
    917     loss = cce_obj(y_true, logits)
    918     self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
    919 
    920 
    921 @test_util.run_all_in_graph_and_eager_modes
    922 class HingeTest(test.TestCase):
    923 
    924   def test_config(self):
    925     hinge_obj = keras.losses.Hinge(
    926         reduction=losses_utils.ReductionV2.SUM, name='hinge_loss')
    927     self.assertEqual(hinge_obj.name, 'hinge_loss')
    928     self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM)
    929 
    930   def test_unweighted(self):
    931     hinge_obj = keras.losses.Hinge()
    932     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
    933     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
    934                                    [-0.25, -1., 0.5, 0.6]])
    935 
    936     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
    937 
    938     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
    939     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
    940     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
    941     # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
    942     #      = [0.6, 0.4125]
    943     # reduced loss = (0.6 + 0.4125) / 2
    944 
    945     loss = hinge_obj(y_true, y_pred)
    946     self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3)
    947 
    948   def test_scalar_weighted(self):
    949     hinge_obj = keras.losses.Hinge()
    950     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
    951     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
    952                                    [-0.25, -1., 0.5, 0.6]])
    953 
    954     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
    955 
    956     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
    957     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
    958     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
    959     # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
    960     #      = [0.6, 0.4125]
    961     # weighted_loss = [0.6 * 2.3, 0.4125 * 2.3]
    962     # reduced loss = (0.6 + 0.4125) * 2.3 / 2
    963 
    964     loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
    965     self.assertAlmostEqual(self.evaluate(loss), 1.164, 3)
    966 
    967     # Verify we get the same output when the same input is given
    968     loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
    969     self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3)
    970 
    971   def test_sample_weighted(self):
    972     hinge_obj = keras.losses.Hinge()
    973     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
    974     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
    975                                    [-0.25, -1., 0.5, 0.6]])
    976 
    977     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
    978 
    979     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
    980     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
    981     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
    982     # loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
    983     #      = [0.6, 0.4125]
    984     # weighted loss = [0.6 * 1.2, 0.4125 * 3.4]
    985     # reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2
    986 
    987     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
    988     loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
    989     self.assertAllClose(self.evaluate(loss), 1.061, 1e-3)
    990 
    991   def test_timestep_weighted(self):
    992     hinge_obj = keras.losses.Hinge()
    993     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
    994     y_pred = constant_op.constant(
    995         [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
    996     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
    997 
    998     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
    999 
   1000     # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
   1001     # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
   1002     #                    [[0.25], [1], [0.5], [0.6]]]
   1003     # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
   1004     #                        [[0.75], [0], [0.5], [0.4]]]
   1005     # loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
   1006     # weighted loss    = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]]
   1007     # reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8
   1008 
   1009     loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
   1010     self.assertAllClose(self.evaluate(loss), 2.012, 1e-3)
   1011 
   1012   def test_zero_weighted(self):
   1013     hinge_obj = keras.losses.Hinge()
   1014     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
   1015     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
   1016                                    [-0.25, -1., 0.5, 0.6]])
   1017     loss = hinge_obj(y_true, y_pred, sample_weight=0)
   1018     self.assertAllClose(self.evaluate(loss), 0., 1e-3)
   1019 
   1020 
   1021 @test_util.run_all_in_graph_and_eager_modes
   1022 class SquaredHingeTest(test.TestCase):
   1023 
   1024   def test_config(self):
   1025     sq_hinge_obj = keras.losses.SquaredHinge(
   1026         reduction=losses_utils.ReductionV2.SUM, name='sq_hinge_loss')
   1027     self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss')
   1028     self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
   1029 
   1030   def test_unweighted(self):
   1031     sq_hinge_obj = keras.losses.SquaredHinge()
   1032     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
   1033     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
   1034                                    [-0.25, -1., 0.5, 0.6]])
   1035 
   1036     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
   1037 
   1038     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
   1039     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
   1040     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
   1041     # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
   1042     # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
   1043     #                                         [0.5625, 0, 0.25, 0.16]]
   1044     # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
   1045     #      = [0.485, 0.2431]
   1046     # reduced loss = (0.485 + 0.2431) / 2
   1047 
   1048     loss = sq_hinge_obj(y_true, y_pred)
   1049     self.assertAllClose(self.evaluate(loss), 0.364, 1e-3)
   1050 
   1051   def test_scalar_weighted(self):
   1052     sq_hinge_obj = keras.losses.SquaredHinge()
   1053     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
   1054     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
   1055                                    [-0.25, -1., 0.5, 0.6]])
   1056 
   1057     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
   1058 
   1059     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
   1060     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
   1061     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
   1062     # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
   1063     # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
   1064     #                                         [0.5625, 0, 0.25, 0.16]]
   1065     # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
   1066     #      = [0.485, 0.2431]
   1067     # weighted loss = [0.485 * 2.3, 0.2431 * 2.3]
   1068     # reduced loss = (0.485 + 0.2431) * 2.3 / 2
   1069 
   1070     loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
   1071     self.assertAllClose(self.evaluate(loss), 0.837, 1e-3)
   1072 
   1073     # Verify we get the same output when the same input is given
   1074     loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
   1075     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1076 
   1077   def test_sample_weighted(self):
   1078     sq_hinge_obj = keras.losses.SquaredHinge()
   1079     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
   1080     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
   1081                                    [-0.25, -1., 0.5, 0.6]])
   1082 
   1083     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
   1084 
   1085     # y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
   1086     # y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
   1087     # 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
   1088     # max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
   1089     # squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
   1090     #                                         [0.5625, 0, 0.25, 0.16]]
   1091     # loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
   1092     #      = [0.485, 0.2431]
   1093     # weighted loss = [0.485 * 1.2, 0.2431 * 3.4]
   1094     # reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2
   1095 
   1096     sample_weight = constant_op.constant([1.2, 3.4])
   1097     loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
   1098     self.assertAllClose(self.evaluate(loss), 0.704, 1e-3)
   1099 
   1100   def test_timestep_weighted(self):
   1101     sq_hinge_obj = keras.losses.SquaredHinge()
   1102     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
   1103     y_pred = constant_op.constant(
   1104         [[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
   1105     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
   1106 
   1107     # loss = max(0, 1-y_true * y_pred), where y_true is -1/1
   1108 
   1109     # y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
   1110     # y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
   1111     #                    [[0.25], [1], [0.5], [0.6]]]
   1112     # 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
   1113     #                        [[0.75], [0], [0.5], [0.4]]]
   1114     # loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]]
   1115     # weighted loss    = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]]
   1116     # reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8
   1117 
   1118     loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
   1119     self.assertAllClose(self.evaluate(loss), 1.542, 1e-3)
   1120 
   1121   def test_zero_weighted(self):
   1122     sq_hinge_obj = keras.losses.SquaredHinge()
   1123     y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
   1124     y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
   1125                                    [-0.25, -1., 0.5, 0.6]])
   1126     loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
   1127     self.assertAllClose(self.evaluate(loss), 0., 1e-3)
   1128 
   1129 
   1130 @test_util.run_all_in_graph_and_eager_modes
   1131 class CategoricalHingeTest(test.TestCase):
   1132 
   1133   def test_config(self):
   1134     cat_hinge_obj = keras.losses.CategoricalHinge(
   1135         reduction=losses_utils.ReductionV2.SUM, name='cat_hinge_loss')
   1136     self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss')
   1137     self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
   1138 
   1139   def test_unweighted(self):
   1140     cat_hinge_obj = keras.losses.CategoricalHinge()
   1141     y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
   1142     y_pred = constant_op.constant([4, 8, 12, 8],
   1143                                   shape=(2, 2),
   1144                                   dtype=dtypes.float32)
   1145     loss = cat_hinge_obj(y_true, y_pred)
   1146 
   1147     # pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
   1148     # neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
   1149     # cat_hinge = max(0., neg - pos + 1.) = [0, 65]
   1150     # reduced_loss = (0 + 65)/2 = 32.5
   1151     self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
   1152 
   1153   def test_scalar_weighted(self):
   1154     cat_hinge_obj = keras.losses.CategoricalHinge()
   1155     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
   1156     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
   1157                                   shape=(2, 3),
   1158                                   dtype=dtypes.float32)
   1159     loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
   1160     self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
   1161 
   1162     # Verify we get the same output when the same input is given
   1163     loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
   1164     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1165 
   1166   def test_sample_weighted(self):
   1167     cat_hinge_obj = keras.losses.CategoricalHinge()
   1168     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
   1169     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
   1170                                   shape=(2, 3),
   1171                                   dtype=dtypes.float32)
   1172     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
   1173     loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
   1174     self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
   1175 
   1176   def test_timestep_weighted(self):
   1177     cat_hinge_obj = keras.losses.CategoricalHinge()
   1178     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
   1179     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
   1180                                   shape=(2, 3, 1),
   1181                                   dtype=dtypes.float32)
   1182     sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
   1183     loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
   1184     self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
   1185 
   1186   def test_zero_weighted(self):
   1187     cat_hinge_obj = keras.losses.CategoricalHinge()
   1188     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
   1189     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
   1190                                   shape=(2, 3),
   1191                                   dtype=dtypes.float32)
   1192     loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
   1193     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
   1194 
   1195 
   1196 @test_util.run_all_in_graph_and_eager_modes
   1197 class LogCoshTest(test.TestCase):
   1198 
   1199   def setup(self):
   1200     y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
   1201     y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
   1202 
   1203     self.batch_size = 6
   1204     error = y_pred - y_true
   1205     self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
   1206 
   1207     self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
   1208     self.y_true = constant_op.constant(y_true)
   1209 
   1210   def test_config(self):
   1211     logcosh_obj = keras.losses.LogCosh(
   1212         reduction=losses_utils.ReductionV2.SUM, name='logcosh_loss')
   1213     self.assertEqual(logcosh_obj.name, 'logcosh_loss')
   1214     self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM)
   1215 
   1216   def test_unweighted(self):
   1217     self.setup()
   1218     logcosh_obj = keras.losses.LogCosh()
   1219 
   1220     loss = logcosh_obj(self.y_true, self.y_pred)
   1221     expected_loss = np.sum(self.expected_losses) / self.batch_size
   1222     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1223 
   1224   def test_scalar_weighted(self):
   1225     self.setup()
   1226     logcosh_obj = keras.losses.LogCosh()
   1227     sample_weight = 2.3
   1228 
   1229     loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1230     expected_loss = sample_weight * np.sum(
   1231         self.expected_losses) / self.batch_size
   1232     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1233 
   1234     # Verify we get the same output when the same input is given
   1235     loss_2 = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1236     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1237 
   1238   def test_sample_weighted(self):
   1239     self.setup()
   1240     logcosh_obj = keras.losses.LogCosh()
   1241 
   1242     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
   1243     loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1244 
   1245     expected_loss = np.multiply(
   1246         self.expected_losses,
   1247         np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
   1248     expected_loss = np.sum(expected_loss) / self.batch_size
   1249     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1250 
   1251   def test_timestep_weighted(self):
   1252     self.setup()
   1253     logcosh_obj = keras.losses.LogCosh()
   1254     y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
   1255     y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
   1256     error = y_pred - y_true
   1257     expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
   1258     sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
   1259 
   1260     y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
   1261     y_true = constant_op.constant(y_true)
   1262     loss = logcosh_obj(
   1263         y_true,
   1264         y_pred,
   1265         sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
   1266     expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
   1267     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1268 
   1269   def test_zero_weighted(self):
   1270     self.setup()
   1271     logcosh_obj = keras.losses.LogCosh()
   1272     sample_weight = 0
   1273     loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1274     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
   1275 
   1276 
   1277 @test_util.run_all_in_graph_and_eager_modes
   1278 class PoissonTest(test.TestCase):
   1279 
   1280   def setup(self):
   1281     self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
   1282     self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
   1283 
   1284     self.batch_size = 6
   1285     self.expected_losses = self.np_y_pred - np.multiply(self.np_y_true,
   1286                                                         np.log(self.np_y_pred))
   1287 
   1288     self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
   1289     self.y_true = constant_op.constant(self.np_y_true)
   1290 
   1291   def test_config(self):
   1292     poisson_obj = keras.losses.Poisson(
   1293         reduction=losses_utils.ReductionV2.SUM, name='poisson')
   1294     self.assertEqual(poisson_obj.name, 'poisson')
   1295     self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM)
   1296 
   1297   def test_unweighted(self):
   1298     self.setup()
   1299     poisson_obj = keras.losses.Poisson()
   1300 
   1301     loss = poisson_obj(self.y_true, self.y_pred)
   1302     expected_loss = np.sum(self.expected_losses) / self.batch_size
   1303     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1304 
   1305   def test_scalar_weighted(self):
   1306     self.setup()
   1307     poisson_obj = keras.losses.Poisson()
   1308     sample_weight = 2.3
   1309     loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1310 
   1311     expected_loss = sample_weight * np.sum(
   1312         self.expected_losses) / self.batch_size
   1313     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1314     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1315 
   1316     # Verify we get the same output when the same input is given
   1317     loss_2 = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1318     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1319 
   1320   def test_sample_weighted(self):
   1321     self.setup()
   1322     poisson_obj = keras.losses.Poisson()
   1323 
   1324     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
   1325     loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1326 
   1327     expected_loss = np.multiply(
   1328         self.expected_losses,
   1329         np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
   1330     expected_loss = np.sum(expected_loss) / self.batch_size
   1331     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1332 
   1333   def test_timestep_weighted(self):
   1334     self.setup()
   1335     poisson_obj = keras.losses.Poisson()
   1336     y_true = self.np_y_true.reshape(2, 3, 1)
   1337     y_pred = self.np_y_pred.reshape(2, 3, 1)
   1338     sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
   1339     expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
   1340 
   1341     y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
   1342     y_true = constant_op.constant(y_true)
   1343 
   1344     loss = poisson_obj(
   1345         y_true,
   1346         y_pred,
   1347         sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
   1348     expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
   1349     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1350 
   1351   def test_zero_weighted(self):
   1352     self.setup()
   1353     poisson_obj = keras.losses.Poisson()
   1354     loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
   1355     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
   1356 
   1357 
   1358 @test_util.run_all_in_graph_and_eager_modes
   1359 class KLDivergenceTest(test.TestCase):
   1360 
   1361   def setup(self):
   1362     self.np_y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
   1363     self.np_y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
   1364 
   1365     self.batch_size = 2
   1366     self.expected_losses = np.multiply(self.np_y_true,
   1367                                        np.log(self.np_y_true / self.np_y_pred))
   1368 
   1369     self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
   1370     self.y_true = constant_op.constant(self.np_y_true)
   1371 
   1372   def test_config(self):
   1373     k_obj = keras.losses.KLDivergence(
   1374         reduction=losses_utils.ReductionV2.SUM, name='kld')
   1375     self.assertEqual(k_obj.name, 'kld')
   1376     self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM)
   1377 
   1378   def test_unweighted(self):
   1379     self.setup()
   1380     k_obj = keras.losses.KLDivergence()
   1381 
   1382     loss = k_obj(self.y_true, self.y_pred)
   1383     expected_loss = np.sum(self.expected_losses) / self.batch_size
   1384     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1385 
   1386   def test_scalar_weighted(self):
   1387     self.setup()
   1388     k_obj = keras.losses.KLDivergence()
   1389     sample_weight = 2.3
   1390 
   1391     loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1392     expected_loss = sample_weight * np.sum(
   1393         self.expected_losses) / self.batch_size
   1394     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1395 
   1396     # Verify we get the same output when the same input is given
   1397     loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1398     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1399 
   1400   def test_sample_weighted(self):
   1401     self.setup()
   1402     k_obj = keras.losses.KLDivergence()
   1403     sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
   1404     loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1405 
   1406     expected_loss = np.multiply(
   1407         self.expected_losses,
   1408         np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3))
   1409     expected_loss = np.sum(expected_loss) / self.batch_size
   1410     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1411 
   1412   def test_timestep_weighted(self):
   1413     self.setup()
   1414     k_obj = keras.losses.KLDivergence()
   1415     y_true = self.np_y_true.reshape(2, 3, 1)
   1416     y_pred = self.np_y_pred.reshape(2, 3, 1)
   1417     sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
   1418     expected_losses = np.sum(
   1419         np.multiply(y_true, np.log(y_true / y_pred)), axis=-1)
   1420 
   1421     y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
   1422     y_true = constant_op.constant(y_true)
   1423     loss = k_obj(
   1424         y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
   1425 
   1426     num_timesteps = 3
   1427     expected_loss = np.sum(expected_losses * sample_weight) / (
   1428         self.batch_size * num_timesteps)
   1429     self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
   1430 
   1431   def test_zero_weighted(self):
   1432     self.setup()
   1433     k_obj = keras.losses.KLDivergence()
   1434     loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
   1435     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
   1436 
   1437 
   1438 @test_util.run_all_in_graph_and_eager_modes
   1439 class HuberLossTest(test.TestCase):
   1440 
   1441   def huber_loss(self, y_true, y_pred, delta=1.0):
   1442     error = y_pred - y_true
   1443     abs_error = np.abs(error)
   1444 
   1445     quadratic = np.minimum(abs_error, delta)
   1446     linear = np.subtract(abs_error, quadratic)
   1447     return np.add(
   1448         np.multiply(0.5, np.multiply(quadratic, quadratic)),
   1449         np.multiply(delta, linear))
   1450 
   1451   def setup(self, delta=1.0):
   1452     self.np_y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
   1453     self.np_y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
   1454 
   1455     self.batch_size = 6
   1456     self.expected_losses = self.huber_loss(self.np_y_true, self.np_y_pred,
   1457                                            delta)
   1458 
   1459     self.y_pred = constant_op.constant(self.np_y_pred)
   1460     self.y_true = constant_op.constant(self.np_y_true)
   1461 
   1462   def test_config(self):
   1463     h_obj = keras.losses.Huber(
   1464         reduction=losses_utils.ReductionV2.SUM, name='huber')
   1465     self.assertEqual(h_obj.name, 'huber')
   1466     self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM)
   1467 
   1468   def test_all_correct(self):
   1469     self.setup()
   1470     h_obj = keras.losses.Huber()
   1471     loss = h_obj(self.y_true, self.y_true)
   1472     self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
   1473 
   1474   def test_unweighted(self):
   1475     self.setup()
   1476     h_obj = keras.losses.Huber()
   1477     loss = h_obj(self.y_true, self.y_pred)
   1478     actual_loss = np.sum(self.expected_losses) / self.batch_size
   1479     self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
   1480 
   1481   def test_scalar_weighted(self):
   1482     self.setup()
   1483     h_obj = keras.losses.Huber()
   1484     sample_weight = 2.3
   1485     loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1486     actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
   1487     self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
   1488 
   1489     # Verify we get the same output when the same input is given
   1490     loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1491     self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
   1492 
   1493   def test_sample_weighted(self):
   1494     self.setup()
   1495     h_obj = keras.losses.Huber()
   1496     sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
   1497 
   1498     loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1499     actual_loss = np.multiply(
   1500         self.expected_losses,
   1501         np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
   1502     actual_loss = np.sum(actual_loss) / self.batch_size
   1503     self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
   1504 
   1505   def test_timestep_weighted(self):
   1506     self.setup()
   1507     h_obj = keras.losses.Huber()
   1508     y_pred = self.np_y_pred.reshape((2, 3, 1))
   1509     y_true = self.np_y_true.reshape((2, 3, 1))
   1510     expected_losses = self.huber_loss(y_true, y_pred)
   1511 
   1512     y_pred = constant_op.constant(y_pred)
   1513     y_true = constant_op.constant(y_true)
   1514     sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
   1515     loss = h_obj(
   1516         y_true,
   1517         y_pred,
   1518         sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
   1519     actual_loss = np.multiply(expected_losses, sample_weight)
   1520     actual_loss = np.sum(actual_loss) / self.batch_size
   1521     self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
   1522 
   1523   def test_zero_weighted(self):
   1524     self.setup()
   1525     h_obj = keras.losses.Huber()
   1526     sample_weight = 0
   1527     loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1528     self.assertAlmostEqual(self.evaluate(loss), 0., 3)
   1529 
   1530   def test_non_default_delta(self):
   1531     self.setup(delta=0.8)
   1532     h_obj = keras.losses.Huber(delta=0.8)
   1533     sample_weight = 2.3
   1534     loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
   1535     actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
   1536     self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
   1537 
   1538 
   1539 if __name__ == '__main__':
   1540   test.main()
   1541