Searched
full:layer (Results
1226 -
1250 of
6628) sorted by null
<<41424344454647484950>>
/external/tensorflow/tensorflow/contrib/layers/ |
README.md | 13 Functions that produce layer operations and associated weight & bias variables. Signatures will vary for different functions, but they will often take many of
|
/external/tensorflow/tensorflow/contrib/metrics/python/ops/ |
set_ops.py | 15 """Python layer for set_ops."""
|
/external/tensorflow/tensorflow/contrib/tensorrt/ |
tensorrt_test.cc | 77 // Add the hidden layer. 78 auto layer = network->addFullyConnected(*input, 1, weights.get(), bias.get()); local 79 EXPECT_NE(layer, nullptr); 81 auto output = layer->getOutput(0);
|
/external/tensorflow/tensorflow/examples/learn/ |
mnist.py | 33 """2-layer convolution model.""" 38 # First conv layer will compute 32 features for each 5x5 patch 49 # Second conv layer will compute 64 features for each 5x5 patch. 62 # Densely connected layer with 1024 neurons.
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/layers/ |
convolutional.py | 29 from tensorflow.python.keras._impl.keras.engine import Layer 45 class Conv1D(tf_convolutional_layers.Conv1D, Layer): 46 """1D convolution layer (e.g. temporal convolution). 48 This layer creates a convolution kernel that is convolved 49 with the layer input over a single spatial (or temporal) dimension 55 When using this layer as the first layer in a model, 83 use_bias: Boolean, whether the layer uses a bias vector. 90 the output of the layer (its "activation").. 159 class Conv2D(tf_convolutional_layers.Conv2D, Layer) [all...] |
/external/vulkan-validation-layers/tests/ |
vkvalidatelayerdoc.sh | 26 # Validate that layer database matches source contents
|
/external/webrtc/webrtc/base/objc/ |
RTCCameraPreviewView.m | 44 return (AVCaptureVideoPreviewLayer *)self.layer;
|
/external/webrtc/webrtc/ |
config.h | 88 // thresholds in between layers, we have one additional layer. One threshold 93 // get 100k in one temporal layer and 5k in the other, just that the bitrate 94 // in the first temporal layer should not exceed 100k. 95 // TODO(pbos): Apart from a special case for two-layer screencast these
|
/external/webrtc/webrtc/examples/objc/AppRTCDemo/ios/ |
ARDVideoCallView.m | 54 _cameraSwitchButton.layer.cornerRadius = kButtonSize / 2; 55 _cameraSwitchButton.layer.masksToBounds = YES; 65 _hangupButton.layer.cornerRadius = kButtonSize / 2; 66 _hangupButton.layer.masksToBounds = YES;
|
/external/webrtc/webrtc/modules/pacing/ |
packet_router.h | 32 // on the simulcast layer in RTPVideoHeader.
|
/external/webrtc/webrtc/modules/rtp_rtcp/source/rtcp_packet/ |
rtpfb.h | 21 // RTPFB: Transport layer feedback message.
|
/external/webrtc/webrtc/modules/video_coding/codecs/vp8/include/ |
vp8_common_types.h | 22 {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
|
/external/webrtc/webrtc/modules/video_coding/codecs/vp8/ |
realtime_temporal_layers.cc | 20 // input frame rate in order to avoid having the base layer being relaying at 236 // Regardless of pattern the frame after a base layer sync will always 237 // be a layer sync. 262 // Pattern of temporal layer ids.
|
/external/webrtc/webrtc/modules/video_coding/ |
decoding_state_unittest.cc | 95 // 1 layer up - still good. 106 // Lost non-base layer packet => should update sync parameter. 114 // Now insert the next non-base layer (belonging to a next tl0PicId). 126 // Next base layer (dropped interim non-base layers) - should update sync. 206 // Identify sync/non-sync when more than one layer. 248 // Insert next base layer 260 // A key frame is always a base layer. 274 // (continuous base layer is not enough ) 315 // Update base layer, lose packet 1 (sync flag on, layer 2), insert packet [all...] |
/external/webrtc/webrtc/video/ |
screenshare_loopback.cc | 72 "Temporal layer to show or analyze. -1 to disable filtering."); 140 "Spatial layer to show or analyze. -1 to disable filtering."); 161 "Comma separated values describing SpatialLayer for layer #0."); 168 "Comma separated values describing SpatialLayer for layer #1.");
|
video_loopback.cc | 74 "Temporal layer to show or analyze. -1 to disable filtering."); 142 "Spatial layer to show or analyze. -1 to disable filtering."); 163 "Comma separated values describing SpatialLayer for layer #0."); 170 "Comma separated values describing SpatialLayer for layer #1.");
|
/frameworks/base/core/java/android/os/ |
GraphicsEnvironment.java | 68 * Store the layer paths available to the loader. 81 * Set up layer search paths for all apps 110 // Prepend the debug layer path as a searchable path. 118 Log.i(TAG, "Debug layer list: " + layers);
|
/frameworks/base/libs/hwui/ |
GpuMemoryTracker.h | 37 Layer,
|
Snapshot.cpp | 31 , layer(nullptr) 48 , layer(s->layer) 105 // TODO: This is incorrect, when we start rendering into a new layer,
|
/frameworks/base/libs/hwui/pipeline/skia/ |
GLFunctorDrawable.cpp | 106 // create an offscreen layer and clear it 122 // the offscreen layer 128 // we are drawing into a (clipped) offscreen layer so we must update the clip and matrix 129 // from device coordinates to the layer's coordinates
|
/frameworks/base/libs/hwui/tests/common/scenes/ |
HwLayerAnimation.cpp | 24 "Tests the hardware layer codepath.",
|
/frameworks/base/libs/hwui/tests/unit/ |
RenderPropertiesTests.cpp | 41 // Too big - can't have layer bigger than max texture size
|
/frameworks/base/packages/SystemUI/src/com/android/keyguard/ |
AlphaOptimizedImageButton.java | 25 * layer when alpha is changed.
|
AlphaOptimizedLinearLayout.java | 25 * layer when alpha is changed.
|
AlphaOptimizedRelativeLayout.java | 25 * layer when alpha is changed.
|
Completed in 302 milliseconds
<<41424344454647484950>>