OpenGrok
Home
Sort by relevance
Sort by last modified time
Full Search
Definition
Symbol
File Path
History
|
|
Help
Searched
refs:p_choose_i
(Results
1 - 2
of
2
) sorted by null
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/
attention_wrapper_test.py
435
def monotonic_attention_explicit(
p_choose_i
, previous_attention):
440
for j in range(1,
p_choose_i
.shape[0]):
441
out.append((1 -
p_choose_i
[j - 1])*out[j - 1] + previous_attention[j])
442
return
p_choose_i
*np.array(out)
445
p_choose_i
= np.random.uniform(size=(10, 20)).astype(np.float32)
453
for p, a in zip(
p_choose_i
, previous_attention)])
458
p_choose_i
, previous_attention, 'recursive').eval()
467
# Generate new
p_choose_i
for parallel, which is unstable when
p_choose_i
[n]
469
p_choose_i
= np.random.uniform(0, 0.9, size=(10, 20)).astype(np.float32
[
all
...]
/external/tensorflow/tensorflow/contrib/seq2seq/python/ops/
attention_wrapper.py
604
def monotonic_attention(
p_choose_i
, previous_attention, mode):
616
p_choose_i
: Probability of choosing input sequence/memory element i. Should
633
when input_sequence_length is long and/or
p_choose_i
has entries very
635
* 'hard' requires that the probabilities in
p_choose_i
are all either 0
646
p_choose_i
= ops.convert_to_tensor(
p_choose_i
, name="
p_choose_i
")
651
batch_size =
p_choose_i
.shape[0].value or array_ops.shape(
p_choose_i
)[0]
652
# Compute [1, 1 -
p_choose_i
[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]
[
all
...]
Completed in 177 milliseconds