OpenGrok
Home
Sort by relevance
Sort by last modified time
Full Search
Definition
Symbol
File Path
History
|
|
Help
Searched
refs:inference_type
(Results
1 - 10
of
10
) sorted by null
/external/tensorflow/tensorflow/lite/python/
tflite_convert.py
124
if flags.
inference_type
:
125
converter.
inference_type
= _parse_inference_type(flags.
inference_type
,
126
"
inference_type
")
140
if converter.
inference_type
== lite_constants.QUANTIZED_UINT8:
181
if converter.
inference_type
== lite_constants.QUANTIZED_UINT8:
182
print("--post_training_quantize quantizes a graph of
inference_type
"
184
converter.
inference_type
= lite_constants.FLOAT
300
"--
inference_type
",
convert.py
234
inference_type
=lite_constants.FLOAT,
259
inference_type
: Target data type of real-number arrays in the output file.
263
Must be `{tf.float32, tf.uint8}`. (default `
inference_type
`)
324
toco.
inference_type
= convert_dtype_to_tflite_type(
inference_type
)
329
toco.inference_input_type = toco.
inference_type
convert_test.py
78
inference_type
=lite_constants.QUANTIZED_UINT8,
92
inference_type
=lite_constants.QUANTIZED_UINT8)
105
inference_type
=lite_constants.FLOAT)
141
inference_type
=lite_constants.QUANTIZED_UINT8,
186
inference_type
=lite_constants.QUANTIZED_UINT8)
lite.py
322
inference_type
: Target data type of real-number arrays in the output file.
326
Must be `{tf.float32, tf.uint8}`. (default `
inference_type
`)
431
self.
inference_type
= constants.FLOAT
730
"
inference_type
": self.
inference_type
,
747
if self.
inference_type
== constants.QUANTIZED_UINT8:
[
all
...]
lite_test.py
158
converter.
inference_type
= lite_constants.QUANTIZED_UINT8
203
converter.
inference_type
= lite_constants.QUANTIZED_UINT8
461
converter.
inference_type
= lite_constants.QUANTIZED_UINT8
[
all
...]
/external/tensorflow/tensorflow/lite/toco/
toco_cmdline_flags.cc
79
Flag("
inference_type
", parsed_flags.
inference_type
.bind(),
80
parsed_flags.
inference_type
.default_value(),
87
"If not specified,
inference_type
is used. "
252
PARSE_TOCO_FLAG(IODataType,
inference_type
, FlagRequirement::kNone);
321
if (toco_flags->
inference_type
() == IODataType::QUANTIZED_UINT8) {
323
<< "--post_training_quantize quantizes a graph of
inference_type
"
toco_tooling.cc
163
type = ConvertIODataTypeToArrayDataType(toco_flags.
inference_type
());
181
// The enum value QUANTIZED_UINT8 for --
inference_type
and
189
// quantizing that model. In order to have --
inference_type
=QUANTIZED_UINT8
191
// already quantized, if --
inference_type
is quantized (so we're not
242
const IODataType
inference_type
= toco_flags.
inference_type
();
local
246
(
inference_type
== QUANTIZED_UINT8 ||
inference_type
== QUANTIZED_INT16);
args.h
169
Arg<string>
inference_type
;
member in struct:toco::ParsedTocoFlags
/external/tensorflow/tensorflow/lite/toco/python/
toco_from_protos_test.py
52
toco_flags.
inference_type
= types_pb2.FLOAT
/external/tensorflow/tensorflow/lite/testing/
generate_examples.py
143
inference_type
= "FLOAT"
147
inference_type
= "QUANTIZED_UINT8"
149
" --
inference_type
=%s" %
inference_type
+
[
all
...]
Completed in 3863 milliseconds