OpenGrok
Home
Sort by relevance
Sort by last modified time
Full Search
Definition
Symbol
File Path
History
|
|
Help
Searched
defs:Tokenize
(Results
1 - 16
of
16
) sorted by null
/external/google-breakpad/src/processor/
tokenize.cc
45
bool
Tokenize
(char *line,
/external/chromium-trace/catapult/common/py_utils/py_utils/refactor/
offset_token.py
8
import
tokenize
55
def
Tokenize
(f):
66
tokenize_tokens =
tokenize
.generate_tokens(f.readline)
90
while offset_tokens[0].type ==
tokenize
.NL:
100
# Convert OffsetTokens to
tokenize
tokens.
113
#
tokenize
can't handle whitespace before line continuations.
115
return
tokenize
.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
/external/libtextclassifier/smartselect/
tokenizer.cc
67
std::vector<Token> Tokenizer::
Tokenize
(const std::string& utf8_text) const {
feature-processor.cc
183
std::vector<Token> FeatureProcessor::
Tokenize
(
187
return tokenizer_.
Tokenize
(utf8_text);
204
return tokenizer_.
Tokenize
(utf8_text);
546
*tokens =
Tokenize
(context);
743
// There is no span to
tokenize
.
760
std::vector<Token> tokens = tokenizer_.
Tokenize
(text);
/prebuilts/go/darwin-x86/src/cmd/asm/internal/lex/
lex.go
145
//
Tokenize
turns a string into a list of Tokens; used to parse the -D flag and in tests.
146
func
Tokenize
(str string) []Token {
/prebuilts/go/linux-x86/src/cmd/asm/internal/lex/
lex.go
145
//
Tokenize
turns a string into a list of Tokens; used to parse the -D flag and in tests.
146
func
Tokenize
(str string) []Token {
/system/tools/hidl/utils/
StringHelper.cpp
64
void StringHelper::
Tokenize
(const std::string &in,
103
Tokenize
(in, &components);
118
Tokenize
(in, &components);
127
Tokenize
(in, &components);
136
Tokenize
(in, &components);
/frameworks/base/tools/aapt2/util/
Util.h
272
inline Tokenizer
Tokenize
(const android::StringPiece& str, char sep) { return Tokenizer(str, sep); }
/external/google-breakpad/src/testing/gtest/scripts/
pump.py
382
def
Tokenize
(s):
579
tokens = list(
Tokenize
(pump_src_text))
/external/googletest/googletest/scripts/
pump.py
382
def
Tokenize
(s):
579
tokens = list(
Tokenize
(pump_src_text))
/external/protobuf/gtest/scripts/
pump.py
376
def
Tokenize
(s):
571
for token in
Tokenize
(s):
/external/v8/testing/gtest/scripts/
pump.py
382
def
Tokenize
(s):
579
tokens = list(
Tokenize
(pump_src_text))
/external/vulkan-validation-layers/tests/gtest-1.7.0/scripts/
pump.py
382
def
Tokenize
(s):
579
tokens = list(
Tokenize
(pump_src_text))
/external/vixl/src/aarch64/
debugger-aarch64.cc
63
static Token*
Tokenize
(const char* arg);
100
static Token*
Tokenize
(const char* arg);
122
static Token*
Tokenize
(const char* arg);
148
static Token*
Tokenize
(const char* arg);
166
static Token*
Tokenize
(const char* arg);
183
static Token*
Tokenize
(const char* arg);
216
static Token*
Tokenize
(const char* arg);
831
Token* Token::
Tokenize
(const char* arg) {
836
// The order is important. For example Identifier::
Tokenize
would consider
839
Token* token = RegisterToken::
Tokenize
(arg)
[
all
...]
/packages/apps/Test/connectivity/sl4n/rapidjson/include/rapidjson/
reader.h
[
all
...]
/prebuilts/tools/common/m2/repository/net/sourceforge/saxon/saxon/9.1.0.8/
saxon-9.1.0.8.jar
Completed in 212 milliseconds