1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================= 15 16 """Contains the convolutional layer classes and their functional aliases. 17 """ 18 from __future__ import absolute_import 19 from __future__ import division 20 from __future__ import print_function 21 22 from tensorflow.python.keras import layers as keras_layers 23 from tensorflow.python.layers import base 24 from tensorflow.python.ops import init_ops 25 from tensorflow.python.util import deprecation 26 from tensorflow.python.util.tf_export import tf_export 27 28 29 @tf_export(v1=['layers.Conv1D']) 30 class Conv1D(keras_layers.Conv1D, base.Layer): 31 """1D convolution layer (e.g. temporal convolution). 32 33 This layer creates a convolution kernel that is convolved 34 (actually cross-correlated) with the layer input to produce a tensor of 35 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 36 a bias vector is created and added to the outputs. Finally, if 37 `activation` is not `None`, it is applied to the outputs as well. 38 39 Arguments: 40 filters: Integer, the dimensionality of the output space (i.e. the number 41 of filters in the convolution). 42 kernel_size: An integer or tuple/list of a single integer, specifying the 43 length of the 1D convolution window. 44 strides: An integer or tuple/list of a single integer, 45 specifying the stride length of the convolution. 46 Specifying any stride value != 1 is incompatible with specifying 47 any `dilation_rate` value != 1. 48 padding: One of `"valid"` or `"same"` (case-insensitive). 49 data_format: A string, one of `channels_last` (default) or `channels_first`. 50 The ordering of the dimensions in the inputs. 51 `channels_last` corresponds to inputs with shape 52 `(batch, length, channels)` while `channels_first` corresponds to 53 inputs with shape `(batch, channels, length)`. 54 dilation_rate: An integer or tuple/list of a single integer, specifying 55 the dilation rate to use for dilated convolution. 56 Currently, specifying any `dilation_rate` value != 1 is 57 incompatible with specifying any `strides` value != 1. 58 activation: Activation function. Set it to None to maintain a 59 linear activation. 60 use_bias: Boolean, whether the layer uses a bias. 61 kernel_initializer: An initializer for the convolution kernel. 62 bias_initializer: An initializer for the bias vector. If None, the default 63 initializer will be used. 64 kernel_regularizer: Optional regularizer for the convolution kernel. 65 bias_regularizer: Optional regularizer for the bias vector. 66 activity_regularizer: Optional regularizer function for the output. 67 kernel_constraint: Optional projection function to be applied to the 68 kernel after being updated by an `Optimizer` (e.g. used to implement 69 norm constraints or value constraints for layer weights). The function 70 must take as input the unprojected variable and must return the 71 projected variable (which must have the same shape). Constraints are 72 not safe to use when doing asynchronous distributed training. 73 bias_constraint: Optional projection function to be applied to the 74 bias after being updated by an `Optimizer`. 75 trainable: Boolean, if `True` also add variables to the graph collection 76 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 77 name: A string, the name of the layer. 78 """ 79 80 def __init__(self, filters, 81 kernel_size, 82 strides=1, 83 padding='valid', 84 data_format='channels_last', 85 dilation_rate=1, 86 activation=None, 87 use_bias=True, 88 kernel_initializer=None, 89 bias_initializer=init_ops.zeros_initializer(), 90 kernel_regularizer=None, 91 bias_regularizer=None, 92 activity_regularizer=None, 93 kernel_constraint=None, 94 bias_constraint=None, 95 trainable=True, 96 name=None, 97 **kwargs): 98 super(Conv1D, self).__init__( 99 filters=filters, 100 kernel_size=kernel_size, 101 strides=strides, 102 padding=padding, 103 data_format=data_format, 104 dilation_rate=dilation_rate, 105 activation=activation, 106 use_bias=use_bias, 107 kernel_initializer=kernel_initializer, 108 bias_initializer=bias_initializer, 109 kernel_regularizer=kernel_regularizer, 110 bias_regularizer=bias_regularizer, 111 activity_regularizer=activity_regularizer, 112 kernel_constraint=kernel_constraint, 113 bias_constraint=bias_constraint, 114 trainable=trainable, 115 name=name, **kwargs) 116 117 118 @deprecation.deprecated( 119 date=None, 120 instructions='Use `tf.keras.layers.Conv1D` instead.') 121 @tf_export(v1=['layers.conv1d']) 122 def conv1d(inputs, 123 filters, 124 kernel_size, 125 strides=1, 126 padding='valid', 127 data_format='channels_last', 128 dilation_rate=1, 129 activation=None, 130 use_bias=True, 131 kernel_initializer=None, 132 bias_initializer=init_ops.zeros_initializer(), 133 kernel_regularizer=None, 134 bias_regularizer=None, 135 activity_regularizer=None, 136 kernel_constraint=None, 137 bias_constraint=None, 138 trainable=True, 139 name=None, 140 reuse=None): 141 """Functional interface for 1D convolution layer (e.g. temporal convolution). 142 143 This layer creates a convolution kernel that is convolved 144 (actually cross-correlated) with the layer input to produce a tensor of 145 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 146 a bias vector is created and added to the outputs. Finally, if 147 `activation` is not `None`, it is applied to the outputs as well. 148 149 Arguments: 150 inputs: Tensor input. 151 filters: Integer, the dimensionality of the output space (i.e. the number 152 of filters in the convolution). 153 kernel_size: An integer or tuple/list of a single integer, specifying the 154 length of the 1D convolution window. 155 strides: An integer or tuple/list of a single integer, 156 specifying the stride length of the convolution. 157 Specifying any stride value != 1 is incompatible with specifying 158 any `dilation_rate` value != 1. 159 padding: One of `"valid"` or `"same"` (case-insensitive). 160 data_format: A string, one of `channels_last` (default) or `channels_first`. 161 The ordering of the dimensions in the inputs. 162 `channels_last` corresponds to inputs with shape 163 `(batch, length, channels)` while `channels_first` corresponds to 164 inputs with shape `(batch, channels, length)`. 165 dilation_rate: An integer or tuple/list of a single integer, specifying 166 the dilation rate to use for dilated convolution. 167 Currently, specifying any `dilation_rate` value != 1 is 168 incompatible with specifying any `strides` value != 1. 169 activation: Activation function. Set it to None to maintain a 170 linear activation. 171 use_bias: Boolean, whether the layer uses a bias. 172 kernel_initializer: An initializer for the convolution kernel. 173 bias_initializer: An initializer for the bias vector. If None, the default 174 initializer will be used. 175 kernel_regularizer: Optional regularizer for the convolution kernel. 176 bias_regularizer: Optional regularizer for the bias vector. 177 activity_regularizer: Optional regularizer function for the output. 178 kernel_constraint: Optional projection function to be applied to the 179 kernel after being updated by an `Optimizer` (e.g. used to implement 180 norm constraints or value constraints for layer weights). The function 181 must take as input the unprojected variable and must return the 182 projected variable (which must have the same shape). Constraints are 183 not safe to use when doing asynchronous distributed training. 184 bias_constraint: Optional projection function to be applied to the 185 bias after being updated by an `Optimizer`. 186 trainable: Boolean, if `True` also add variables to the graph collection 187 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 188 name: A string, the name of the layer. 189 reuse: Boolean, whether to reuse the weights of a previous layer 190 by the same name. 191 192 Returns: 193 Output tensor. 194 195 Raises: 196 ValueError: if eager execution is enabled. 197 """ 198 layer = Conv1D( 199 filters=filters, 200 kernel_size=kernel_size, 201 strides=strides, 202 padding=padding, 203 data_format=data_format, 204 dilation_rate=dilation_rate, 205 activation=activation, 206 use_bias=use_bias, 207 kernel_initializer=kernel_initializer, 208 bias_initializer=bias_initializer, 209 kernel_regularizer=kernel_regularizer, 210 bias_regularizer=bias_regularizer, 211 activity_regularizer=activity_regularizer, 212 kernel_constraint=kernel_constraint, 213 bias_constraint=bias_constraint, 214 trainable=trainable, 215 name=name, 216 _reuse=reuse, 217 _scope=name) 218 return layer.apply(inputs) 219 220 221 @tf_export(v1=['layers.Conv2D']) 222 class Conv2D(keras_layers.Conv2D, base.Layer): 223 """2D convolution layer (e.g. spatial convolution over images). 224 225 This layer creates a convolution kernel that is convolved 226 (actually cross-correlated) with the layer input to produce a tensor of 227 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 228 a bias vector is created and added to the outputs. Finally, if 229 `activation` is not `None`, it is applied to the outputs as well. 230 231 Arguments: 232 filters: Integer, the dimensionality of the output space (i.e. the number 233 of filters in the convolution). 234 kernel_size: An integer or tuple/list of 2 integers, specifying the 235 height and width of the 2D convolution window. 236 Can be a single integer to specify the same value for 237 all spatial dimensions. 238 strides: An integer or tuple/list of 2 integers, 239 specifying the strides of the convolution along the height and width. 240 Can be a single integer to specify the same value for 241 all spatial dimensions. 242 Specifying any stride value != 1 is incompatible with specifying 243 any `dilation_rate` value != 1. 244 padding: One of `"valid"` or `"same"` (case-insensitive). 245 data_format: A string, one of `channels_last` (default) or `channels_first`. 246 The ordering of the dimensions in the inputs. 247 `channels_last` corresponds to inputs with shape 248 `(batch, height, width, channels)` while `channels_first` corresponds to 249 inputs with shape `(batch, channels, height, width)`. 250 251 dilation_rate: An integer or tuple/list of 2 integers, specifying 252 the dilation rate to use for dilated convolution. 253 Can be a single integer to specify the same value for 254 all spatial dimensions. 255 Currently, specifying any `dilation_rate` value != 1 is 256 incompatible with specifying any stride value != 1. 257 activation: Activation function. Set it to None to maintain a 258 linear activation. 259 use_bias: Boolean, whether the layer uses a bias. 260 kernel_initializer: An initializer for the convolution kernel. 261 bias_initializer: An initializer for the bias vector. If None, the default 262 initializer will be used. 263 kernel_regularizer: Optional regularizer for the convolution kernel. 264 bias_regularizer: Optional regularizer for the bias vector. 265 activity_regularizer: Optional regularizer function for the output. 266 kernel_constraint: Optional projection function to be applied to the 267 kernel after being updated by an `Optimizer` (e.g. used to implement 268 norm constraints or value constraints for layer weights). The function 269 must take as input the unprojected variable and must return the 270 projected variable (which must have the same shape). Constraints are 271 not safe to use when doing asynchronous distributed training. 272 bias_constraint: Optional projection function to be applied to the 273 bias after being updated by an `Optimizer`. 274 trainable: Boolean, if `True` also add variables to the graph collection 275 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 276 name: A string, the name of the layer. 277 """ 278 279 def __init__(self, filters, 280 kernel_size, 281 strides=(1, 1), 282 padding='valid', 283 data_format='channels_last', 284 dilation_rate=(1, 1), 285 activation=None, 286 use_bias=True, 287 kernel_initializer=None, 288 bias_initializer=init_ops.zeros_initializer(), 289 kernel_regularizer=None, 290 bias_regularizer=None, 291 activity_regularizer=None, 292 kernel_constraint=None, 293 bias_constraint=None, 294 trainable=True, 295 name=None, 296 **kwargs): 297 super(Conv2D, self).__init__( 298 filters=filters, 299 kernel_size=kernel_size, 300 strides=strides, 301 padding=padding, 302 data_format=data_format, 303 dilation_rate=dilation_rate, 304 activation=activation, 305 use_bias=use_bias, 306 kernel_initializer=kernel_initializer, 307 bias_initializer=bias_initializer, 308 kernel_regularizer=kernel_regularizer, 309 bias_regularizer=bias_regularizer, 310 activity_regularizer=activity_regularizer, 311 kernel_constraint=kernel_constraint, 312 bias_constraint=bias_constraint, 313 trainable=trainable, 314 name=name, **kwargs) 315 316 317 @deprecation.deprecated( 318 date=None, 319 instructions='Use `tf.keras.layers.Conv2D` instead.') 320 @tf_export(v1=['layers.conv2d']) 321 def conv2d(inputs, 322 filters, 323 kernel_size, 324 strides=(1, 1), 325 padding='valid', 326 data_format='channels_last', 327 dilation_rate=(1, 1), 328 activation=None, 329 use_bias=True, 330 kernel_initializer=None, 331 bias_initializer=init_ops.zeros_initializer(), 332 kernel_regularizer=None, 333 bias_regularizer=None, 334 activity_regularizer=None, 335 kernel_constraint=None, 336 bias_constraint=None, 337 trainable=True, 338 name=None, 339 reuse=None): 340 """Functional interface for the 2D convolution layer. 341 342 This layer creates a convolution kernel that is convolved 343 (actually cross-correlated) with the layer input to produce a tensor of 344 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 345 a bias vector is created and added to the outputs. Finally, if 346 `activation` is not `None`, it is applied to the outputs as well. 347 348 Arguments: 349 inputs: Tensor input. 350 filters: Integer, the dimensionality of the output space (i.e. the number 351 of filters in the convolution). 352 kernel_size: An integer or tuple/list of 2 integers, specifying the 353 height and width of the 2D convolution window. 354 Can be a single integer to specify the same value for 355 all spatial dimensions. 356 strides: An integer or tuple/list of 2 integers, 357 specifying the strides of the convolution along the height and width. 358 Can be a single integer to specify the same value for 359 all spatial dimensions. 360 Specifying any stride value != 1 is incompatible with specifying 361 any `dilation_rate` value != 1. 362 padding: One of `"valid"` or `"same"` (case-insensitive). 363 data_format: A string, one of `channels_last` (default) or `channels_first`. 364 The ordering of the dimensions in the inputs. 365 `channels_last` corresponds to inputs with shape 366 `(batch, height, width, channels)` while `channels_first` corresponds to 367 inputs with shape `(batch, channels, height, width)`. 368 369 dilation_rate: An integer or tuple/list of 2 integers, specifying 370 the dilation rate to use for dilated convolution. 371 Can be a single integer to specify the same value for 372 all spatial dimensions. 373 Currently, specifying any `dilation_rate` value != 1 is 374 incompatible with specifying any stride value != 1. 375 activation: Activation function. Set it to None to maintain a 376 linear activation. 377 use_bias: Boolean, whether the layer uses a bias. 378 kernel_initializer: An initializer for the convolution kernel. 379 bias_initializer: An initializer for the bias vector. If None, the default 380 initializer will be used. 381 kernel_regularizer: Optional regularizer for the convolution kernel. 382 bias_regularizer: Optional regularizer for the bias vector. 383 activity_regularizer: Optional regularizer function for the output. 384 kernel_constraint: Optional projection function to be applied to the 385 kernel after being updated by an `Optimizer` (e.g. used to implement 386 norm constraints or value constraints for layer weights). The function 387 must take as input the unprojected variable and must return the 388 projected variable (which must have the same shape). Constraints are 389 not safe to use when doing asynchronous distributed training. 390 bias_constraint: Optional projection function to be applied to the 391 bias after being updated by an `Optimizer`. 392 trainable: Boolean, if `True` also add variables to the graph collection 393 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 394 name: A string, the name of the layer. 395 reuse: Boolean, whether to reuse the weights of a previous layer 396 by the same name. 397 398 Returns: 399 Output tensor. 400 401 Raises: 402 ValueError: if eager execution is enabled. 403 """ 404 layer = Conv2D( 405 filters=filters, 406 kernel_size=kernel_size, 407 strides=strides, 408 padding=padding, 409 data_format=data_format, 410 dilation_rate=dilation_rate, 411 activation=activation, 412 use_bias=use_bias, 413 kernel_initializer=kernel_initializer, 414 bias_initializer=bias_initializer, 415 kernel_regularizer=kernel_regularizer, 416 bias_regularizer=bias_regularizer, 417 activity_regularizer=activity_regularizer, 418 kernel_constraint=kernel_constraint, 419 bias_constraint=bias_constraint, 420 trainable=trainable, 421 name=name, 422 _reuse=reuse, 423 _scope=name) 424 return layer.apply(inputs) 425 426 427 @tf_export(v1=['layers.Conv3D']) 428 class Conv3D(keras_layers.Conv3D, base.Layer): 429 """3D convolution layer (e.g. spatial convolution over volumes). 430 431 This layer creates a convolution kernel that is convolved 432 (actually cross-correlated) with the layer input to produce a tensor of 433 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 434 a bias vector is created and added to the outputs. Finally, if 435 `activation` is not `None`, it is applied to the outputs as well. 436 437 Arguments: 438 filters: Integer, the dimensionality of the output space (i.e. the number 439 of filters in the convolution). 440 kernel_size: An integer or tuple/list of 3 integers, specifying the 441 depth, height and width of the 3D convolution window. 442 Can be a single integer to specify the same value for 443 all spatial dimensions. 444 strides: An integer or tuple/list of 3 integers, 445 specifying the strides of the convolution along the depth, 446 height and width. 447 Can be a single integer to specify the same value for 448 all spatial dimensions. 449 Specifying any stride value != 1 is incompatible with specifying 450 any `dilation_rate` value != 1. 451 padding: One of `"valid"` or `"same"` (case-insensitive). 452 data_format: A string, one of `channels_last` (default) or `channels_first`. 453 The ordering of the dimensions in the inputs. 454 `channels_last` corresponds to inputs with shape 455 `(batch, depth, height, width, channels)` while `channels_first` 456 corresponds to inputs with shape 457 `(batch, channels, depth, height, width)`. 458 dilation_rate: An integer or tuple/list of 3 integers, specifying 459 the dilation rate to use for dilated convolution. 460 Can be a single integer to specify the same value for 461 all spatial dimensions. 462 Currently, specifying any `dilation_rate` value != 1 is 463 incompatible with specifying any stride value != 1. 464 activation: Activation function. Set it to None to maintain a 465 linear activation. 466 use_bias: Boolean, whether the layer uses a bias. 467 kernel_initializer: An initializer for the convolution kernel. 468 bias_initializer: An initializer for the bias vector. If None, the default 469 initializer will be used. 470 kernel_regularizer: Optional regularizer for the convolution kernel. 471 bias_regularizer: Optional regularizer for the bias vector. 472 activity_regularizer: Optional regularizer function for the output. 473 kernel_constraint: Optional projection function to be applied to the 474 kernel after being updated by an `Optimizer` (e.g. used to implement 475 norm constraints or value constraints for layer weights). The function 476 must take as input the unprojected variable and must return the 477 projected variable (which must have the same shape). Constraints are 478 not safe to use when doing asynchronous distributed training. 479 bias_constraint: Optional projection function to be applied to the 480 bias after being updated by an `Optimizer`. 481 trainable: Boolean, if `True` also add variables to the graph collection 482 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 483 name: A string, the name of the layer. 484 """ 485 486 def __init__(self, filters, 487 kernel_size, 488 strides=(1, 1, 1), 489 padding='valid', 490 data_format='channels_last', 491 dilation_rate=(1, 1, 1), 492 activation=None, 493 use_bias=True, 494 kernel_initializer=None, 495 bias_initializer=init_ops.zeros_initializer(), 496 kernel_regularizer=None, 497 bias_regularizer=None, 498 activity_regularizer=None, 499 kernel_constraint=None, 500 bias_constraint=None, 501 trainable=True, 502 name=None, 503 **kwargs): 504 super(Conv3D, self).__init__( 505 filters=filters, 506 kernel_size=kernel_size, 507 strides=strides, 508 padding=padding, 509 data_format=data_format, 510 dilation_rate=dilation_rate, 511 activation=activation, 512 use_bias=use_bias, 513 kernel_initializer=kernel_initializer, 514 bias_initializer=bias_initializer, 515 kernel_regularizer=kernel_regularizer, 516 bias_regularizer=bias_regularizer, 517 activity_regularizer=activity_regularizer, 518 kernel_constraint=kernel_constraint, 519 bias_constraint=bias_constraint, 520 trainable=trainable, 521 name=name, **kwargs) 522 523 524 @deprecation.deprecated( 525 date=None, 526 instructions='Use `tf.keras.layers.Conv3D` instead.') 527 @tf_export(v1=['layers.conv3d']) 528 def conv3d(inputs, 529 filters, 530 kernel_size, 531 strides=(1, 1, 1), 532 padding='valid', 533 data_format='channels_last', 534 dilation_rate=(1, 1, 1), 535 activation=None, 536 use_bias=True, 537 kernel_initializer=None, 538 bias_initializer=init_ops.zeros_initializer(), 539 kernel_regularizer=None, 540 bias_regularizer=None, 541 activity_regularizer=None, 542 kernel_constraint=None, 543 bias_constraint=None, 544 trainable=True, 545 name=None, 546 reuse=None): 547 """Functional interface for the 3D convolution layer. 548 549 This layer creates a convolution kernel that is convolved 550 (actually cross-correlated) with the layer input to produce a tensor of 551 outputs. If `use_bias` is True (and a `bias_initializer` is provided), 552 a bias vector is created and added to the outputs. Finally, if 553 `activation` is not `None`, it is applied to the outputs as well. 554 555 Arguments: 556 inputs: Tensor input. 557 filters: Integer, the dimensionality of the output space (i.e. the number 558 of filters in the convolution). 559 kernel_size: An integer or tuple/list of 3 integers, specifying the 560 depth, height and width of the 3D convolution window. 561 Can be a single integer to specify the same value for 562 all spatial dimensions. 563 strides: An integer or tuple/list of 3 integers, 564 specifying the strides of the convolution along the depth, 565 height and width. 566 Can be a single integer to specify the same value for 567 all spatial dimensions. 568 Specifying any stride value != 1 is incompatible with specifying 569 any `dilation_rate` value != 1. 570 padding: One of `"valid"` or `"same"` (case-insensitive). 571 data_format: A string, one of `channels_last` (default) or `channels_first`. 572 The ordering of the dimensions in the inputs. 573 `channels_last` corresponds to inputs with shape 574 `(batch, depth, height, width, channels)` while `channels_first` 575 corresponds to inputs with shape 576 `(batch, channels, depth, height, width)`. 577 dilation_rate: An integer or tuple/list of 3 integers, specifying 578 the dilation rate to use for dilated convolution. 579 Can be a single integer to specify the same value for 580 all spatial dimensions. 581 Currently, specifying any `dilation_rate` value != 1 is 582 incompatible with specifying any stride value != 1. 583 activation: Activation function. Set it to None to maintain a 584 linear activation. 585 use_bias: Boolean, whether the layer uses a bias. 586 kernel_initializer: An initializer for the convolution kernel. 587 bias_initializer: An initializer for the bias vector. If None, the default 588 initializer will be used. 589 kernel_regularizer: Optional regularizer for the convolution kernel. 590 bias_regularizer: Optional regularizer for the bias vector. 591 activity_regularizer: Optional regularizer function for the output. 592 kernel_constraint: Optional projection function to be applied to the 593 kernel after being updated by an `Optimizer` (e.g. used to implement 594 norm constraints or value constraints for layer weights). The function 595 must take as input the unprojected variable and must return the 596 projected variable (which must have the same shape). Constraints are 597 not safe to use when doing asynchronous distributed training. 598 bias_constraint: Optional projection function to be applied to the 599 bias after being updated by an `Optimizer`. 600 trainable: Boolean, if `True` also add variables to the graph collection 601 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 602 name: A string, the name of the layer. 603 reuse: Boolean, whether to reuse the weights of a previous layer 604 by the same name. 605 606 Returns: 607 Output tensor. 608 609 Raises: 610 ValueError: if eager execution is enabled. 611 """ 612 layer = Conv3D( 613 filters=filters, 614 kernel_size=kernel_size, 615 strides=strides, 616 padding=padding, 617 data_format=data_format, 618 dilation_rate=dilation_rate, 619 activation=activation, 620 use_bias=use_bias, 621 kernel_initializer=kernel_initializer, 622 bias_initializer=bias_initializer, 623 kernel_regularizer=kernel_regularizer, 624 bias_regularizer=bias_regularizer, 625 activity_regularizer=activity_regularizer, 626 kernel_constraint=kernel_constraint, 627 bias_constraint=bias_constraint, 628 trainable=trainable, 629 name=name, 630 _reuse=reuse, 631 _scope=name) 632 return layer.apply(inputs) 633 634 635 @tf_export(v1=['layers.SeparableConv1D']) 636 class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer): 637 """Depthwise separable 1D convolution. 638 639 This layer performs a depthwise convolution that acts separately on 640 channels, followed by a pointwise convolution that mixes channels. 641 If `use_bias` is True and a bias initializer is provided, 642 it adds a bias vector to the output. 643 It then optionally applies an activation function to produce the final output. 644 645 Arguments: 646 filters: Integer, the dimensionality of the output space (i.e. the number 647 of filters in the convolution). 648 kernel_size: A single integer specifying the spatial 649 dimensions of the filters. 650 strides: A single integer specifying the strides 651 of the convolution. 652 Specifying any `stride` value != 1 is incompatible with specifying 653 any `dilation_rate` value != 1. 654 padding: One of `"valid"` or `"same"` (case-insensitive). 655 data_format: A string, one of `channels_last` (default) or `channels_first`. 656 The ordering of the dimensions in the inputs. 657 `channels_last` corresponds to inputs with shape 658 `(batch, length, channels)` while `channels_first` corresponds to 659 inputs with shape `(batch, channels, length)`. 660 dilation_rate: A single integer, specifying 661 the dilation rate to use for dilated convolution. 662 Currently, specifying any `dilation_rate` value != 1 is 663 incompatible with specifying any stride value != 1. 664 depth_multiplier: The number of depthwise convolution output channels for 665 each input channel. The total number of depthwise convolution output 666 channels will be equal to `num_filters_in * depth_multiplier`. 667 activation: Activation function. Set it to None to maintain a 668 linear activation. 669 use_bias: Boolean, whether the layer uses a bias. 670 depthwise_initializer: An initializer for the depthwise convolution kernel. 671 pointwise_initializer: An initializer for the pointwise convolution kernel. 672 bias_initializer: An initializer for the bias vector. If None, the default 673 initializer will be used. 674 depthwise_regularizer: Optional regularizer for the depthwise 675 convolution kernel. 676 pointwise_regularizer: Optional regularizer for the pointwise 677 convolution kernel. 678 bias_regularizer: Optional regularizer for the bias vector. 679 activity_regularizer: Optional regularizer function for the output. 680 depthwise_constraint: Optional projection function to be applied to the 681 depthwise kernel after being updated by an `Optimizer` (e.g. used for 682 norm constraints or value constraints for layer weights). The function 683 must take as input the unprojected variable and must return the 684 projected variable (which must have the same shape). Constraints are 685 not safe to use when doing asynchronous distributed training. 686 pointwise_constraint: Optional projection function to be applied to the 687 pointwise kernel after being updated by an `Optimizer`. 688 bias_constraint: Optional projection function to be applied to the 689 bias after being updated by an `Optimizer`. 690 trainable: Boolean, if `True` also add variables to the graph collection 691 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 692 name: A string, the name of the layer. 693 """ 694 695 def __init__(self, filters, 696 kernel_size, 697 strides=1, 698 padding='valid', 699 data_format='channels_last', 700 dilation_rate=1, 701 depth_multiplier=1, 702 activation=None, 703 use_bias=True, 704 depthwise_initializer=None, 705 pointwise_initializer=None, 706 bias_initializer=init_ops.zeros_initializer(), 707 depthwise_regularizer=None, 708 pointwise_regularizer=None, 709 bias_regularizer=None, 710 activity_regularizer=None, 711 depthwise_constraint=None, 712 pointwise_constraint=None, 713 bias_constraint=None, 714 trainable=True, 715 name=None, 716 **kwargs): 717 super(SeparableConv1D, self).__init__( 718 filters=filters, 719 kernel_size=kernel_size, 720 strides=strides, 721 padding=padding, 722 data_format=data_format, 723 dilation_rate=dilation_rate, 724 depth_multiplier=depth_multiplier, 725 activation=activation, 726 use_bias=use_bias, 727 depthwise_initializer=depthwise_initializer, 728 pointwise_initializer=pointwise_initializer, 729 bias_initializer=bias_initializer, 730 depthwise_regularizer=depthwise_regularizer, 731 pointwise_regularizer=pointwise_regularizer, 732 bias_regularizer=bias_regularizer, 733 activity_regularizer=activity_regularizer, 734 depthwise_constraint=depthwise_constraint, 735 pointwise_constraint=pointwise_constraint, 736 bias_constraint=bias_constraint, 737 trainable=trainable, 738 name=name, 739 **kwargs) 740 741 742 @tf_export(v1=['layers.SeparableConv2D']) 743 class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer): 744 """Depthwise separable 2D convolution. 745 746 This layer performs a depthwise convolution that acts separately on 747 channels, followed by a pointwise convolution that mixes channels. 748 If `use_bias` is True and a bias initializer is provided, 749 it adds a bias vector to the output. 750 It then optionally applies an activation function to produce the final output. 751 752 Arguments: 753 filters: Integer, the dimensionality of the output space (i.e. the number 754 of filters in the convolution). 755 kernel_size: A tuple or list of 2 integers specifying the spatial 756 dimensions of the filters. Can be a single integer to specify the same 757 value for all spatial dimensions. 758 strides: A tuple or list of 2 positive integers specifying the strides 759 of the convolution. Can be a single integer to specify the same value for 760 all spatial dimensions. 761 Specifying any `stride` value != 1 is incompatible with specifying 762 any `dilation_rate` value != 1. 763 padding: One of `"valid"` or `"same"` (case-insensitive). 764 data_format: A string, one of `channels_last` (default) or `channels_first`. 765 The ordering of the dimensions in the inputs. 766 `channels_last` corresponds to inputs with shape 767 `(batch, height, width, channels)` while `channels_first` corresponds to 768 inputs with shape `(batch, channels, height, width)`. 769 770 dilation_rate: An integer or tuple/list of 2 integers, specifying 771 the dilation rate to use for dilated convolution. 772 Can be a single integer to specify the same value for 773 all spatial dimensions. 774 Currently, specifying any `dilation_rate` value != 1 is 775 incompatible with specifying any stride value != 1. 776 depth_multiplier: The number of depthwise convolution output channels for 777 each input channel. The total number of depthwise convolution output 778 channels will be equal to `num_filters_in * depth_multiplier`. 779 activation: Activation function. Set it to None to maintain a 780 linear activation. 781 use_bias: Boolean, whether the layer uses a bias. 782 depthwise_initializer: An initializer for the depthwise convolution kernel. 783 pointwise_initializer: An initializer for the pointwise convolution kernel. 784 bias_initializer: An initializer for the bias vector. If None, the default 785 initializer will be used. 786 depthwise_regularizer: Optional regularizer for the depthwise 787 convolution kernel. 788 pointwise_regularizer: Optional regularizer for the pointwise 789 convolution kernel. 790 bias_regularizer: Optional regularizer for the bias vector. 791 activity_regularizer: Optional regularizer function for the output. 792 depthwise_constraint: Optional projection function to be applied to the 793 depthwise kernel after being updated by an `Optimizer` (e.g. used for 794 norm constraints or value constraints for layer weights). The function 795 must take as input the unprojected variable and must return the 796 projected variable (which must have the same shape). Constraints are 797 not safe to use when doing asynchronous distributed training. 798 pointwise_constraint: Optional projection function to be applied to the 799 pointwise kernel after being updated by an `Optimizer`. 800 bias_constraint: Optional projection function to be applied to the 801 bias after being updated by an `Optimizer`. 802 trainable: Boolean, if `True` also add variables to the graph collection 803 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 804 name: A string, the name of the layer. 805 """ 806 807 def __init__(self, filters, 808 kernel_size, 809 strides=(1, 1), 810 padding='valid', 811 data_format='channels_last', 812 dilation_rate=(1, 1), 813 depth_multiplier=1, 814 activation=None, 815 use_bias=True, 816 depthwise_initializer=None, 817 pointwise_initializer=None, 818 bias_initializer=init_ops.zeros_initializer(), 819 depthwise_regularizer=None, 820 pointwise_regularizer=None, 821 bias_regularizer=None, 822 activity_regularizer=None, 823 depthwise_constraint=None, 824 pointwise_constraint=None, 825 bias_constraint=None, 826 trainable=True, 827 name=None, 828 **kwargs): 829 super(SeparableConv2D, self).__init__( 830 filters=filters, 831 kernel_size=kernel_size, 832 strides=strides, 833 padding=padding, 834 data_format=data_format, 835 dilation_rate=dilation_rate, 836 depth_multiplier=depth_multiplier, 837 activation=activation, 838 use_bias=use_bias, 839 depthwise_initializer=depthwise_initializer, 840 pointwise_initializer=pointwise_initializer, 841 bias_initializer=bias_initializer, 842 depthwise_regularizer=depthwise_regularizer, 843 pointwise_regularizer=pointwise_regularizer, 844 bias_regularizer=bias_regularizer, 845 activity_regularizer=activity_regularizer, 846 depthwise_constraint=depthwise_constraint, 847 pointwise_constraint=pointwise_constraint, 848 bias_constraint=bias_constraint, 849 trainable=trainable, 850 name=name, 851 **kwargs) 852 853 854 @deprecation.deprecated( 855 date=None, 856 instructions='Use `tf.keras.layers.SeparableConv1D` instead.') 857 @tf_export(v1=['layers.separable_conv1d']) 858 def separable_conv1d(inputs, 859 filters, 860 kernel_size, 861 strides=1, 862 padding='valid', 863 data_format='channels_last', 864 dilation_rate=1, 865 depth_multiplier=1, 866 activation=None, 867 use_bias=True, 868 depthwise_initializer=None, 869 pointwise_initializer=None, 870 bias_initializer=init_ops.zeros_initializer(), 871 depthwise_regularizer=None, 872 pointwise_regularizer=None, 873 bias_regularizer=None, 874 activity_regularizer=None, 875 depthwise_constraint=None, 876 pointwise_constraint=None, 877 bias_constraint=None, 878 trainable=True, 879 name=None, 880 reuse=None): 881 """Functional interface for the depthwise separable 1D convolution layer. 882 883 This layer performs a depthwise convolution that acts separately on 884 channels, followed by a pointwise convolution that mixes channels. 885 If `use_bias` is True and a bias initializer is provided, 886 it adds a bias vector to the output. 887 It then optionally applies an activation function to produce the final output. 888 889 Arguments: 890 inputs: Input tensor. 891 filters: Integer, the dimensionality of the output space (i.e. the number 892 of filters in the convolution). 893 kernel_size: A single integer specifying the spatial 894 dimensions of the filters. 895 strides: A single integer specifying the strides 896 of the convolution. 897 Specifying any `stride` value != 1 is incompatible with specifying 898 any `dilation_rate` value != 1. 899 padding: One of `"valid"` or `"same"` (case-insensitive). 900 data_format: A string, one of `channels_last` (default) or `channels_first`. 901 The ordering of the dimensions in the inputs. 902 `channels_last` corresponds to inputs with shape 903 `(batch, length, channels)` while `channels_first` corresponds to 904 inputs with shape `(batch, channels, length)`. 905 dilation_rate: A single integer, specifying 906 the dilation rate to use for dilated convolution. 907 Currently, specifying any `dilation_rate` value != 1 is 908 incompatible with specifying any stride value != 1. 909 depth_multiplier: The number of depthwise convolution output channels for 910 each input channel. The total number of depthwise convolution output 911 channels will be equal to `num_filters_in * depth_multiplier`. 912 activation: Activation function. Set it to None to maintain a 913 linear activation. 914 use_bias: Boolean, whether the layer uses a bias. 915 depthwise_initializer: An initializer for the depthwise convolution kernel. 916 pointwise_initializer: An initializer for the pointwise convolution kernel. 917 bias_initializer: An initializer for the bias vector. If None, the default 918 initializer will be used. 919 depthwise_regularizer: Optional regularizer for the depthwise 920 convolution kernel. 921 pointwise_regularizer: Optional regularizer for the pointwise 922 convolution kernel. 923 bias_regularizer: Optional regularizer for the bias vector. 924 activity_regularizer: Optional regularizer function for the output. 925 depthwise_constraint: Optional projection function to be applied to the 926 depthwise kernel after being updated by an `Optimizer` (e.g. used for 927 norm constraints or value constraints for layer weights). The function 928 must take as input the unprojected variable and must return the 929 projected variable (which must have the same shape). Constraints are 930 not safe to use when doing asynchronous distributed training. 931 pointwise_constraint: Optional projection function to be applied to the 932 pointwise kernel after being updated by an `Optimizer`. 933 bias_constraint: Optional projection function to be applied to the 934 bias after being updated by an `Optimizer`. 935 trainable: Boolean, if `True` also add variables to the graph collection 936 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 937 name: A string, the name of the layer. 938 reuse: Boolean, whether to reuse the weights of a previous layer 939 by the same name. 940 941 Returns: 942 Output tensor. 943 944 Raises: 945 ValueError: if eager execution is enabled. 946 """ 947 layer = SeparableConv1D( 948 filters=filters, 949 kernel_size=kernel_size, 950 strides=strides, 951 padding=padding, 952 data_format=data_format, 953 dilation_rate=dilation_rate, 954 depth_multiplier=depth_multiplier, 955 activation=activation, 956 use_bias=use_bias, 957 depthwise_initializer=depthwise_initializer, 958 pointwise_initializer=pointwise_initializer, 959 bias_initializer=bias_initializer, 960 depthwise_regularizer=depthwise_regularizer, 961 pointwise_regularizer=pointwise_regularizer, 962 bias_regularizer=bias_regularizer, 963 activity_regularizer=activity_regularizer, 964 depthwise_constraint=depthwise_constraint, 965 pointwise_constraint=pointwise_constraint, 966 bias_constraint=bias_constraint, 967 trainable=trainable, 968 name=name, 969 _reuse=reuse, 970 _scope=name) 971 return layer.apply(inputs) 972 973 974 @deprecation.deprecated( 975 date=None, 976 instructions='Use `tf.keras.layers.SeparableConv2D` instead.') 977 @tf_export(v1=['layers.separable_conv2d']) 978 def separable_conv2d(inputs, 979 filters, 980 kernel_size, 981 strides=(1, 1), 982 padding='valid', 983 data_format='channels_last', 984 dilation_rate=(1, 1), 985 depth_multiplier=1, 986 activation=None, 987 use_bias=True, 988 depthwise_initializer=None, 989 pointwise_initializer=None, 990 bias_initializer=init_ops.zeros_initializer(), 991 depthwise_regularizer=None, 992 pointwise_regularizer=None, 993 bias_regularizer=None, 994 activity_regularizer=None, 995 depthwise_constraint=None, 996 pointwise_constraint=None, 997 bias_constraint=None, 998 trainable=True, 999 name=None, 1000 reuse=None): 1001 """Functional interface for the depthwise separable 2D convolution layer. 1002 1003 This layer performs a depthwise convolution that acts separately on 1004 channels, followed by a pointwise convolution that mixes channels. 1005 If `use_bias` is True and a bias initializer is provided, 1006 it adds a bias vector to the output. 1007 It then optionally applies an activation function to produce the final output. 1008 1009 Arguments: 1010 inputs: Input tensor. 1011 filters: Integer, the dimensionality of the output space (i.e. the number 1012 of filters in the convolution). 1013 kernel_size: A tuple or list of 2 integers specifying the spatial 1014 dimensions of the filters. Can be a single integer to specify the same 1015 value for all spatial dimensions. 1016 strides: A tuple or list of 2 positive integers specifying the strides 1017 of the convolution. Can be a single integer to specify the same value for 1018 all spatial dimensions. 1019 Specifying any `stride` value != 1 is incompatible with specifying 1020 any `dilation_rate` value != 1. 1021 padding: One of `"valid"` or `"same"` (case-insensitive). 1022 data_format: A string, one of `channels_last` (default) or `channels_first`. 1023 The ordering of the dimensions in the inputs. 1024 `channels_last` corresponds to inputs with shape 1025 `(batch, height, width, channels)` while `channels_first` corresponds to 1026 inputs with shape `(batch, channels, height, width)`. 1027 1028 dilation_rate: An integer or tuple/list of 2 integers, specifying 1029 the dilation rate to use for dilated convolution. 1030 Can be a single integer to specify the same value for 1031 all spatial dimensions. 1032 Currently, specifying any `dilation_rate` value != 1 is 1033 incompatible with specifying any stride value != 1. 1034 depth_multiplier: The number of depthwise convolution output channels for 1035 each input channel. The total number of depthwise convolution output 1036 channels will be equal to `num_filters_in * depth_multiplier`. 1037 activation: Activation function. Set it to None to maintain a 1038 linear activation. 1039 use_bias: Boolean, whether the layer uses a bias. 1040 depthwise_initializer: An initializer for the depthwise convolution kernel. 1041 pointwise_initializer: An initializer for the pointwise convolution kernel. 1042 bias_initializer: An initializer for the bias vector. If None, the default 1043 initializer will be used. 1044 depthwise_regularizer: Optional regularizer for the depthwise 1045 convolution kernel. 1046 pointwise_regularizer: Optional regularizer for the pointwise 1047 convolution kernel. 1048 bias_regularizer: Optional regularizer for the bias vector. 1049 activity_regularizer: Optional regularizer function for the output. 1050 depthwise_constraint: Optional projection function to be applied to the 1051 depthwise kernel after being updated by an `Optimizer` (e.g. used for 1052 norm constraints or value constraints for layer weights). The function 1053 must take as input the unprojected variable and must return the 1054 projected variable (which must have the same shape). Constraints are 1055 not safe to use when doing asynchronous distributed training. 1056 pointwise_constraint: Optional projection function to be applied to the 1057 pointwise kernel after being updated by an `Optimizer`. 1058 bias_constraint: Optional projection function to be applied to the 1059 bias after being updated by an `Optimizer`. 1060 trainable: Boolean, if `True` also add variables to the graph collection 1061 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 1062 name: A string, the name of the layer. 1063 reuse: Boolean, whether to reuse the weights of a previous layer 1064 by the same name. 1065 1066 Returns: 1067 Output tensor. 1068 1069 Raises: 1070 ValueError: if eager execution is enabled. 1071 """ 1072 layer = SeparableConv2D( 1073 filters=filters, 1074 kernel_size=kernel_size, 1075 strides=strides, 1076 padding=padding, 1077 data_format=data_format, 1078 dilation_rate=dilation_rate, 1079 depth_multiplier=depth_multiplier, 1080 activation=activation, 1081 use_bias=use_bias, 1082 depthwise_initializer=depthwise_initializer, 1083 pointwise_initializer=pointwise_initializer, 1084 bias_initializer=bias_initializer, 1085 depthwise_regularizer=depthwise_regularizer, 1086 pointwise_regularizer=pointwise_regularizer, 1087 bias_regularizer=bias_regularizer, 1088 activity_regularizer=activity_regularizer, 1089 depthwise_constraint=depthwise_constraint, 1090 pointwise_constraint=pointwise_constraint, 1091 bias_constraint=bias_constraint, 1092 trainable=trainable, 1093 name=name, 1094 _reuse=reuse, 1095 _scope=name) 1096 return layer.apply(inputs) 1097 1098 1099 @tf_export(v1=['layers.Conv2DTranspose']) 1100 class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer): 1101 """Transposed 2D convolution layer (sometimes called 2D Deconvolution). 1102 1103 The need for transposed convolutions generally arises 1104 from the desire to use a transformation going in the opposite direction 1105 of a normal convolution, i.e., from something that has the shape of the 1106 output of some convolution to something that has the shape of its input 1107 while maintaining a connectivity pattern that is compatible with 1108 said convolution. 1109 1110 Arguments: 1111 filters: Integer, the dimensionality of the output space (i.e. the number 1112 of filters in the convolution). 1113 kernel_size: A tuple or list of 2 positive integers specifying the spatial 1114 dimensions of the filters. Can be a single integer to specify the same 1115 value for all spatial dimensions. 1116 strides: A tuple or list of 2 positive integers specifying the strides 1117 of the convolution. Can be a single integer to specify the same value for 1118 all spatial dimensions. 1119 padding: one of `"valid"` or `"same"` (case-insensitive). 1120 data_format: A string, one of `channels_last` (default) or `channels_first`. 1121 The ordering of the dimensions in the inputs. 1122 `channels_last` corresponds to inputs with shape 1123 `(batch, height, width, channels)` while `channels_first` corresponds to 1124 inputs with shape `(batch, channels, height, width)`. 1125 activation: Activation function. Set it to None to maintain a 1126 linear activation. 1127 use_bias: Boolean, whether the layer uses a bias. 1128 kernel_initializer: An initializer for the convolution kernel. 1129 bias_initializer: An initializer for the bias vector. If None, the default 1130 initializer will be used. 1131 kernel_regularizer: Optional regularizer for the convolution kernel. 1132 bias_regularizer: Optional regularizer for the bias vector. 1133 activity_regularizer: Optional regularizer function for the output. 1134 kernel_constraint: Optional projection function to be applied to the 1135 kernel after being updated by an `Optimizer` (e.g. used to implement 1136 norm constraints or value constraints for layer weights). The function 1137 must take as input the unprojected variable and must return the 1138 projected variable (which must have the same shape). Constraints are 1139 not safe to use when doing asynchronous distributed training. 1140 bias_constraint: Optional projection function to be applied to the 1141 bias after being updated by an `Optimizer`. 1142 trainable: Boolean, if `True` also add variables to the graph collection 1143 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 1144 name: A string, the name of the layer. 1145 """ 1146 1147 def __init__(self, filters, 1148 kernel_size, 1149 strides=(1, 1), 1150 padding='valid', 1151 data_format='channels_last', 1152 activation=None, 1153 use_bias=True, 1154 kernel_initializer=None, 1155 bias_initializer=init_ops.zeros_initializer(), 1156 kernel_regularizer=None, 1157 bias_regularizer=None, 1158 activity_regularizer=None, 1159 kernel_constraint=None, 1160 bias_constraint=None, 1161 trainable=True, 1162 name=None, 1163 **kwargs): 1164 super(Conv2DTranspose, self).__init__( 1165 filters=filters, 1166 kernel_size=kernel_size, 1167 strides=strides, 1168 padding=padding, 1169 data_format=data_format, 1170 activation=activation, 1171 use_bias=use_bias, 1172 kernel_initializer=kernel_initializer, 1173 bias_initializer=bias_initializer, 1174 kernel_regularizer=kernel_regularizer, 1175 bias_regularizer=bias_regularizer, 1176 activity_regularizer=activity_regularizer, 1177 kernel_constraint=kernel_constraint, 1178 bias_constraint=bias_constraint, 1179 trainable=trainable, 1180 name=name, 1181 **kwargs) 1182 1183 1184 @deprecation.deprecated( 1185 date=None, 1186 instructions='Use `tf.keras.layers.Conv2DTranspose` instead.') 1187 @tf_export(v1=['layers.conv2d_transpose']) 1188 def conv2d_transpose(inputs, 1189 filters, 1190 kernel_size, 1191 strides=(1, 1), 1192 padding='valid', 1193 data_format='channels_last', 1194 activation=None, 1195 use_bias=True, 1196 kernel_initializer=None, 1197 bias_initializer=init_ops.zeros_initializer(), 1198 kernel_regularizer=None, 1199 bias_regularizer=None, 1200 activity_regularizer=None, 1201 kernel_constraint=None, 1202 bias_constraint=None, 1203 trainable=True, 1204 name=None, 1205 reuse=None): 1206 """Functional interface for transposed 2D convolution layer. 1207 1208 The need for transposed convolutions generally arises 1209 from the desire to use a transformation going in the opposite direction 1210 of a normal convolution, i.e., from something that has the shape of the 1211 output of some convolution to something that has the shape of its input 1212 while maintaining a connectivity pattern that is compatible with 1213 said convolution. 1214 1215 Arguments: 1216 inputs: Input tensor. 1217 filters: Integer, the dimensionality of the output space (i.e. the number 1218 of filters in the convolution). 1219 kernel_size: A tuple or list of 2 positive integers specifying the spatial 1220 dimensions of the filters. Can be a single integer to specify the same 1221 value for all spatial dimensions. 1222 strides: A tuple or list of 2 positive integers specifying the strides 1223 of the convolution. Can be a single integer to specify the same value for 1224 all spatial dimensions. 1225 padding: one of `"valid"` or `"same"` (case-insensitive). 1226 data_format: A string, one of `channels_last` (default) or `channels_first`. 1227 The ordering of the dimensions in the inputs. 1228 `channels_last` corresponds to inputs with shape 1229 `(batch, height, width, channels)` while `channels_first` corresponds to 1230 inputs with shape `(batch, channels, height, width)`. 1231 activation: Activation function. Set it to `None` to maintain a 1232 linear activation. 1233 use_bias: Boolean, whether the layer uses a bias. 1234 kernel_initializer: An initializer for the convolution kernel. 1235 bias_initializer: An initializer for the bias vector. If `None`, the default 1236 initializer will be used. 1237 kernel_regularizer: Optional regularizer for the convolution kernel. 1238 bias_regularizer: Optional regularizer for the bias vector. 1239 activity_regularizer: Optional regularizer function for the output. 1240 kernel_constraint: Optional projection function to be applied to the 1241 kernel after being updated by an `Optimizer` (e.g. used to implement 1242 norm constraints or value constraints for layer weights). The function 1243 must take as input the unprojected variable and must return the 1244 projected variable (which must have the same shape). Constraints are 1245 not safe to use when doing asynchronous distributed training. 1246 bias_constraint: Optional projection function to be applied to the 1247 bias after being updated by an `Optimizer`. 1248 trainable: Boolean, if `True` also add variables to the graph collection 1249 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 1250 name: A string, the name of the layer. 1251 reuse: Boolean, whether to reuse the weights of a previous layer 1252 by the same name. 1253 1254 Returns: 1255 Output tensor. 1256 1257 Raises: 1258 ValueError: if eager execution is enabled. 1259 """ 1260 layer = Conv2DTranspose( 1261 filters=filters, 1262 kernel_size=kernel_size, 1263 strides=strides, 1264 padding=padding, 1265 data_format=data_format, 1266 activation=activation, 1267 use_bias=use_bias, 1268 kernel_initializer=kernel_initializer, 1269 bias_initializer=bias_initializer, 1270 kernel_regularizer=kernel_regularizer, 1271 bias_regularizer=bias_regularizer, 1272 activity_regularizer=activity_regularizer, 1273 kernel_constraint=kernel_constraint, 1274 bias_constraint=bias_constraint, 1275 trainable=trainable, 1276 name=name, 1277 _reuse=reuse, 1278 _scope=name) 1279 return layer.apply(inputs) 1280 1281 1282 @tf_export(v1=['layers.Conv3DTranspose']) 1283 class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): 1284 """Transposed 3D convolution layer (sometimes called 3D Deconvolution). 1285 1286 Arguments: 1287 filters: Integer, the dimensionality of the output space (i.e. the number 1288 of filters in the convolution). 1289 kernel_size: An integer or tuple/list of 3 integers, specifying the 1290 depth, height and width of the 3D convolution window. 1291 Can be a single integer to specify the same value for all spatial 1292 dimensions. 1293 strides: An integer or tuple/list of 3 integers, specifying the strides 1294 of the convolution along the depth, height and width. 1295 Can be a single integer to specify the same value for all spatial 1296 dimensions. 1297 padding: One of `"valid"` or `"same"` (case-insensitive). 1298 data_format: A string, one of `channels_last` (default) or `channels_first`. 1299 The ordering of the dimensions in the inputs. 1300 `channels_last` corresponds to inputs with shape 1301 `(batch, depth, height, width, channels)` while `channels_first` 1302 corresponds to inputs with shape 1303 `(batch, channels, depth, height, width)`. 1304 activation: Activation function. Set it to `None` to maintain a 1305 linear activation. 1306 use_bias: Boolean, whether the layer uses a bias. 1307 kernel_initializer: An initializer for the convolution kernel. 1308 bias_initializer: An initializer for the bias vector. If `None`, the default 1309 initializer will be used. 1310 kernel_regularizer: Optional regularizer for the convolution kernel. 1311 bias_regularizer: Optional regularizer for the bias vector. 1312 activity_regularizer: Optional regularizer function for the output. 1313 kernel_constraint: Optional projection function to be applied to the 1314 kernel after being updated by an `Optimizer` (e.g. used to implement 1315 norm constraints or value constraints for layer weights). The function 1316 must take as input the unprojected variable and must return the 1317 projected variable (which must have the same shape). Constraints are 1318 not safe to use when doing asynchronous distributed training. 1319 bias_constraint: Optional projection function to be applied to the 1320 bias after being updated by an `Optimizer`. 1321 trainable: Boolean, if `True` also add variables to the graph collection 1322 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 1323 name: A string, the name of the layer. 1324 """ 1325 1326 def __init__(self, 1327 filters, 1328 kernel_size, 1329 strides=(1, 1, 1), 1330 padding='valid', 1331 data_format='channels_last', 1332 activation=None, 1333 use_bias=True, 1334 kernel_initializer=None, 1335 bias_initializer=init_ops.zeros_initializer(), 1336 kernel_regularizer=None, 1337 bias_regularizer=None, 1338 activity_regularizer=None, 1339 kernel_constraint=None, 1340 bias_constraint=None, 1341 trainable=True, 1342 name=None, 1343 **kwargs): 1344 super(Conv3DTranspose, self).__init__( 1345 filters=filters, 1346 kernel_size=kernel_size, 1347 strides=strides, 1348 padding=padding, 1349 data_format=data_format, 1350 activation=activation, 1351 use_bias=use_bias, 1352 kernel_initializer=kernel_initializer, 1353 bias_initializer=bias_initializer, 1354 kernel_regularizer=kernel_regularizer, 1355 bias_regularizer=bias_regularizer, 1356 activity_regularizer=activity_regularizer, 1357 kernel_constraint=kernel_constraint, 1358 bias_constraint=bias_constraint, 1359 trainable=trainable, 1360 name=name, 1361 **kwargs) 1362 1363 1364 @deprecation.deprecated( 1365 date=None, 1366 instructions='Use `tf.keras.layers.Conv3DTranspose` instead.') 1367 @tf_export(v1=['layers.conv3d_transpose']) 1368 def conv3d_transpose(inputs, 1369 filters, 1370 kernel_size, 1371 strides=(1, 1, 1), 1372 padding='valid', 1373 data_format='channels_last', 1374 activation=None, 1375 use_bias=True, 1376 kernel_initializer=None, 1377 bias_initializer=init_ops.zeros_initializer(), 1378 kernel_regularizer=None, 1379 bias_regularizer=None, 1380 activity_regularizer=None, 1381 kernel_constraint=None, 1382 bias_constraint=None, 1383 trainable=True, 1384 name=None, 1385 reuse=None): 1386 """Functional interface for transposed 3D convolution layer. 1387 1388 Arguments: 1389 inputs: Input tensor. 1390 filters: Integer, the dimensionality of the output space (i.e. the number 1391 of filters in the convolution). 1392 kernel_size: A tuple or list of 3 positive integers specifying the spatial 1393 dimensions of the filters. Can be a single integer to specify the same 1394 value for all spatial dimensions. 1395 strides: A tuple or list of 3 positive integers specifying the strides 1396 of the convolution. Can be a single integer to specify the same value for 1397 all spatial dimensions. 1398 padding: one of `"valid"` or `"same"` (case-insensitive). 1399 data_format: A string, one of `channels_last` (default) or `channels_first`. 1400 The ordering of the dimensions in the inputs. 1401 `channels_last` corresponds to inputs with shape 1402 `(batch, depth, height, width, channels)` while `channels_first` 1403 corresponds to inputs with shape 1404 `(batch, channels, depth, height, width)`. 1405 activation: Activation function. Set it to None to maintain a 1406 linear activation. 1407 use_bias: Boolean, whether the layer uses a bias. 1408 kernel_initializer: An initializer for the convolution kernel. 1409 bias_initializer: An initializer for the bias vector. If None, the default 1410 initializer will be used. 1411 kernel_regularizer: Optional regularizer for the convolution kernel. 1412 bias_regularizer: Optional regularizer for the bias vector. 1413 activity_regularizer: Optional regularizer function for the output. 1414 kernel_constraint: Optional projection function to be applied to the 1415 kernel after being updated by an `Optimizer` (e.g. used to implement 1416 norm constraints or value constraints for layer weights). The function 1417 must take as input the unprojected variable and must return the 1418 projected variable (which must have the same shape). Constraints are 1419 not safe to use when doing asynchronous distributed training. 1420 bias_constraint: Optional projection function to be applied to the 1421 bias after being updated by an `Optimizer`. 1422 trainable: Boolean, if `True` also add variables to the graph collection 1423 `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). 1424 name: A string, the name of the layer. 1425 reuse: Boolean, whether to reuse the weights of a previous layer 1426 by the same name. 1427 1428 Returns: 1429 Output tensor. 1430 1431 Raises: 1432 ValueError: if eager execution is enabled. 1433 """ 1434 layer = Conv3DTranspose( 1435 filters=filters, 1436 kernel_size=kernel_size, 1437 strides=strides, 1438 padding=padding, 1439 data_format=data_format, 1440 activation=activation, 1441 use_bias=use_bias, 1442 kernel_initializer=kernel_initializer, 1443 bias_initializer=bias_initializer, 1444 kernel_regularizer=kernel_regularizer, 1445 bias_regularizer=bias_regularizer, 1446 activity_regularizer=activity_regularizer, 1447 kernel_constraint=kernel_constraint, 1448 bias_constraint=bias_constraint, 1449 trainable=trainable, 1450 name=name, 1451 _reuse=reuse, 1452 _scope=name) 1453 return layer.apply(inputs) 1454 1455 1456 # Aliases 1457 1458 Convolution1D = Conv1D 1459 Convolution2D = Conv2D 1460 Convolution3D = Conv3D 1461 SeparableConvolution2D = SeparableConv2D 1462 Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose 1463 Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose 1464 convolution1d = conv1d 1465 convolution2d = conv2d 1466 convolution3d = conv3d 1467 separable_convolution2d = separable_conv2d 1468 convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose 1469 convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose 1470