HomeSort by relevance Sort by last modified time
    Searched refs:start_token (Results 1 - 9 of 9) sorted by null

  /toolchain/binutils/binutils-2.27/binutils/
mclex.c 329 unichar *start_token; local
339 start_token = input_stream_pos;
355 yylval.ustr = get_diff (input_stream_pos, start_token);
371 start_token = input_stream_pos;
378 start_token++;
385 yylval.ustr = get_diff (input_stream_pos, start_token);
397 yylval.ustr = get_diff (input_stream_pos, start_token);
404 ++start_token;
409 yylval.ustr = get_diff (input_stream_pos, start_token);
431 ret = mc_token (start_token, (size_t) (input_stream_pos - start_token))
    [all...]
  /external/tensorflow/tensorflow/contrib/lite/testing/
tokenize.cc 28 auto start_token = [&](char c) { local
64 start_token(*it);
  /external/antlr/antlr-3.4/runtime/Ruby/lib/antlr3/tree/
debug.rb 96 def set_token_boundaries( tree, start_token, stop_token )
97 super( tree, start_token, stop_token )
98 return unless tree && start_token && stop_token
100 start_token.token_index, stop_token.token_index )
  /system/tools/hidl/c2hal/
c2hal_l.ll 53 extern int start_token;
102 if (start_token) {
103 int token = start_token;
104 start_token = 0;
249 int start_token;
276 start_token = START_HEADER;
297 start_token = START_EXPR;
  /external/mksh/src/
syn.c 30 int start_token; /* token than began nesting (eg, FOR) */ member in struct:nesting_state
868 if (nesting.start_token) {
869 c = nesting.start_token;
909 nesting.start_token = tok;
938 nesting.start_token = 0;
  /external/protobuf/src/google/protobuf/compiler/
parser.cc 1442 io::Tokenizer::Token start_token; local
1518 io::Tokenizer::Token start_token; local
    [all...]
  /external/libtextclassifier/
feature-processor.cc 348 TokenIndex start_token = kInvalidIndex; local
360 if (start_token == kInvalidIndex) {
361 start_token = i;
366 return {start_token, end_token};
    [all...]
  /external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/
basic_decoder_test.py 512 start_token = 0
516 np.ones(batch_size) * start_token,
593 start_token = 0
597 np.ones(batch_size) * start_token,
beam_search_decoder_test.py 332 start_token = 0
370 start_tokens=array_ops.fill([batch_size_tensor], start_token),

Completed in 598 milliseconds