/prebuilts/go/darwin-x86/test/fixedbugs/ |
bug424.go | 9 // at which embedding level it is and in which order 10 // embedding is done.
|
bug414.go | 7 // Issue 1743: test embedding of imported types with private methods.
|
/prebuilts/go/linux-x86/test/fixedbugs/ |
bug424.go | 9 // at which embedding level it is and in which order 10 // embedding is done.
|
bug414.go | 7 // Issue 1743: test embedding of imported types with private methods.
|
/external/tensorflow/tensorflow/contrib/gan/python/features/python/ |
conditioning_utils_impl.py | 77 """Get a dense embedding vector from a one-hot encoding.""" 80 embedding = variable_scope.get_variable( 81 'embedding', [num_tokens, embedding_size]) 83 embedding, label_id, name='token_to_embedding') 100 embedding_size: The size of the class embedding.
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
embedding_ops_test.py | 251 embedding = embedding_ops.embedding_lookup(p, ids) 253 tf_result = embedding.eval(feed_dict=feed_dict) 256 self.assertShapeEqual(np_result, embedding) 263 embedding = embedding_ops.embedding_lookup( 266 self.assertAllEqual(embedding.eval(), [[1.0]]) 273 embedding = embedding_ops.embedding_lookup( 279 self.assertAllEqual(embedding.eval(), 2 * normalized.eval()) 291 embedding = embedding_ops.embedding_lookup(p_variable, ids) 297 tf_result = embedding.eval(feed_dict=feed_dict) 301 self.assertShapeEqual(np_result, embedding) [all...] |
/prebuilts/go/darwin-x86/test/fixedbugs/bug424.dir/ |
main.go | 7 // at which embedding level it is and in which order 8 // embedding is done.
|
/prebuilts/go/linux-x86/test/fixedbugs/bug424.dir/ |
main.go | 7 // at which embedding level it is and in which order 8 // embedding is done.
|
/external/tensorflow/tensorflow/contrib/losses/python/metric_learning/ |
metric_loss_ops_test.py | 99 embedding = np.random.rand(num_data, feat_dim).astype(np.float32) 108 pdist_matrix = pairwise_distance_np(embedding, squared=True) 140 embeddings=ops.convert_to_tensor(embedding), 155 embedding = np.random.rand(num_data, feat_dim).astype(np.float32) 163 pdist_matrix = pairwise_distance_np(embedding) 193 embeddings=ops.convert_to_tensor(embedding), 522 embedding, labels = blobs 523 embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0 [all...] |
/prebuilts/go/darwin-x86/src/vendor/golang_org/x/text/unicode/bidi/ |
bracket.go | 73 // takes the direction type for the start-of-sentence and the embedding level. 196 // b If any strong type (either L or R) matching the embedding direction is 197 // found, set the type for both brackets in the pair to match the embedding 206 // c Otherwise, if a strong type (opposite the embedding direction) is 210 // the embedding direction, then set the type for both brackets in the pair 212 // the embedding direction. 248 // assuming the given embedding direction. 251 // it returns this this type. Otherwise it returns the embedding direction. 262 return dir // type matching embedding direction found 299 // no strong opposite type found before - use embedding (c.2 [all...] |
core.go | 52 // level is the embedding level of a character. Even embedding levels indicate 100 // close brackets, after normalization). The embedding levels are optional, but 101 // may be supplied to encode embedding levels of styled text. 133 // If no externally supplied paragraph embedding level, use default. 138 // Initialize result levels to paragraph embedding level. 167 // 5) resolving implicit embedding levels 250 // default embedding level when no strong types found is 0. 261 // This stack will store the embedding levels and override and isolated 332 // Push new embedding level, override status, and isolate [all...] |
bidi.go | 24 // This API tries to avoid dealing with embedding levels for now. Under the hood 27 // embedding hierarchy, though. 57 // ICU allows the user to define embedding levels. This may be used, for example,
|
/prebuilts/go/linux-x86/src/vendor/golang_org/x/text/unicode/bidi/ |
bracket.go | 73 // takes the direction type for the start-of-sentence and the embedding level. 196 // b If any strong type (either L or R) matching the embedding direction is 197 // found, set the type for both brackets in the pair to match the embedding 206 // c Otherwise, if a strong type (opposite the embedding direction) is 210 // the embedding direction, then set the type for both brackets in the pair 212 // the embedding direction. 248 // assuming the given embedding direction. 251 // it returns this this type. Otherwise it returns the embedding direction. 262 return dir // type matching embedding direction found 299 // no strong opposite type found before - use embedding (c.2 [all...] |
core.go | 52 // level is the embedding level of a character. Even embedding levels indicate 100 // close brackets, after normalization). The embedding levels are optional, but 101 // may be supplied to encode embedding levels of styled text. 133 // If no externally supplied paragraph embedding level, use default. 138 // Initialize result levels to paragraph embedding level. 167 // 5) resolving implicit embedding levels 250 // default embedding level when no strong types found is 0. 261 // This stack will store the embedding levels and override and isolated 332 // Push new embedding level, override status, and isolate [all...] |
bidi.go | 24 // This API tries to avoid dealing with embedding levels for now. Under the hood 27 // embedding hierarchy, though. 57 // ICU allows the user to define embedding levels. This may be used, for example,
|
/external/tensorflow/tensorflow/contrib/legacy_seq2seq/python/ops/ |
seq2seq.py | 34 - embedding_rnn_seq2seq: The basic model with input embedding. 35 - embedding_tied_rnn_seq2seq: The tied model with input embedding. 36 - embedding_attention_seq2seq: Advanced model with input embedding and 40 - one2many_rnn_seq2seq: The embedding model with multiple decoders. 82 def _extract_argmax_and_embed(embedding, 88 embedding: embedding tensor for symbols. 104 emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol) 242 """RNN decoder with embedding and a pure-decoding option. 248 num_symbols: Integer, how many symbols come into the embedding [all...] |
/external/tensorflow/tensorflow/contrib/seq2seq/python/ops/ |
helper.py | 261 def __init__(self, inputs, sequence_length, embedding, sampling_probability, 268 embedding: A callable that takes a vector tensor of `ids` (argmax ids), 283 [embedding, sampling_probability]): 284 if callable(embedding): 285 self._embedding_fn = embedding 288 lambda ids: embedding_ops.embedding_lookup(embedding, ids)) 495 result through an embedding layer to get the next input. 498 def __init__(self, embedding, start_tokens, end_token): 502 embedding: A callable that takes a vector tensor of `ids` (argmax ids), 512 if callable(embedding) [all...] |
beam_search_decoder.py | 170 embedding, 181 embedding: A callable that takes a vector tensor of `ids` (argmax ids), 207 if callable(embedding): 208 self._embedding_fn = embedding 211 lambda ids: embedding_ops.embedding_lookup(embedding, ids))
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/rnn_ptb/ |
rnn_ptb.py | 70 class Embedding(tf.layers.Layer): 71 """An Embedding layer.""" 74 super(Embedding, self).__init__(**kwargs) 79 self.embedding = self.add_variable( 87 return tf.nn.embedding_lookup(self.embedding, x) 112 self.embedding = self.track_layer(Embedding(vocab_size, embedding_dim)) 136 y = self.embedding(input_seq) 347 "--embedding-dim", type=int, default=200, help="Embedding dimension." [all...] |
/prebuilts/go/darwin-x86/test/ |
notinheap.go | 14 // Types embedding notinheap types must be notinheap.
|
/prebuilts/go/linux-x86/test/ |
notinheap.go | 14 // Types embedding notinheap types must be notinheap.
|
/external/tensorflow/tensorflow/python/ops/ |
control_flow_ops_test.py | 206 embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0]) 207 cost += math_ops.reduce_sum(embedding) 231 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 232 cost += math_ops.reduce_sum(embedding) 253 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 256 lambda: cost + math_ops.reduce_sum(embedding)) 267 embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) 269 math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + 270 math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding [all...] |
/external/tensorflow/tensorflow/python/eager/ |
backprop_test.py | 126 embedding = resource_variable_ops.ResourceVariable( 127 initial_value=random_init, dtype=dtypes.float32, name='embedding') 130 tape.watch_variable(embedding) 131 embedded_x = embedding_ops.embedding_lookup(embedding, x) 153 opt.apply_gradients([(grad, embedding)]) 154 self.assertAllClose(expected, embedding.read_value())
|
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/ |
beam_search_decoder_test.py | 340 embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32) 369 embedding=embedding,
|
/external/capstone/ |
config.mk | 79 # enable OS X kernel embedding support. If 'CAPSTONE_USE_SYS_DYN_MEM = yes',
|