/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
fuse_activation_functions.cc | 21 #include "tensorflow/lite/toco/model.h" 28 ::tensorflow::Status FuseActivationFunctions::Run(Model* model, 32 const auto ac_it = model->operators.begin() + op_index; 42 Operator* op = GetOpWithOutput(*model, ac_op->inputs[0]); 46 if (CountTrueOutputs(*model, *op) > 1) { 56 int count_ops_consuming_output = CountOpsWithInput(*model, ac_op->inputs[0]); 66 if (!IsDiscardableArray(*model, op->outputs[0])) { 101 model->EraseArray(ac_op->inputs[0]); 103 model->operators.erase(ac_it) [all...] |
read_array_minmax_and_narrow_range_from_fake_quant.cc | 22 #include "tensorflow/lite/toco/model.h" 30 bool ApplyAttrsToArray(GraphTransformation* transformation, Model* model, 34 auto& annotated_array = model->GetArray(array_name); 55 Model* model, std::size_t op_index, bool* modified) { 57 const auto fakequant_it = model->operators.begin() + op_index; 76 changed |= ApplyAttrsToArray(this, model, *fq_op, fq_op->inputs[0]); 77 changed |= ApplyAttrsToArray(this, model, *fq_op, fq_op->outputs[0]);
|
remove_trivial_concatenation_input.cc | 22 #include "tensorflow/lite/toco/model.h" 28 ::tensorflow::Status RemoveTrivialConcatenationInput::Run(Model* model, 39 const auto concat_it = model->operators.begin() + op_index; 47 const auto& input_array = model->GetArray(input); 63 if (IsDiscardableArray(*model, input) && 64 CountOpsWithInput(*model, input) == 1) { 65 model->EraseArray(input);
|
remove_trivial_fake_quant.cc | 23 #include "tensorflow/lite/toco/model.h" 31 bool IsFakeQuantTrivial(GraphTransformation* transformation, const Model& model, 42 auto* producing_op = GetOpWithOutput(model, fakequant_op.inputs[0]); 67 ::tensorflow::Status RemoveTrivialFakeQuant::Run(Model* model, 71 const auto op_it = model->operators.begin() + op_index; 78 if (!IsFakeQuantTrivial(this, *model, *fakequant_op)) { 86 *modified = RemoveTrivialPassthroughOp(this, model, op_index);
|
resolve_batch_normalization.cc | 21 #include "tensorflow/lite/toco/model.h" 28 ::tensorflow::Status ResolveBatchNormalization::Run(Model* model, 32 auto bn_it = model->operators.begin() + op_index; 39 auto& mean_array = model->GetArray(bn_op->inputs[1]); 40 const auto& multiplier_array = model->GetArray(bn_op->inputs[2]); 41 const auto& offset_array = model->GetArray(bn_op->inputs[3]); 43 CHECK(IsConstantParameterArray(*model, bn_op->inputs[1]) && 44 IsConstantParameterArray(*model, bn_op->inputs[2]) && 45 IsConstantParameterArray(*model, bn_op->inputs[3]) [all...] |
resolve_constant_select.cc | 19 #include "tensorflow/lite/toco/model.h" 30 ::tensorflow::Status ResolveConstantSelect::Run(Model* model, 34 auto it = model->operators.begin() + op_index; 43 auto& output_array = model->GetArray(op->outputs[0]); 54 if (!IsConstantParameterArray(*model, op->inputs[0])) { 57 const Array& cond_array = model->GetArray(op->inputs[0]); 79 RemoveTrivialPassthroughOp(this, model, op_index, cond_value ? 1 : 2);
|
resolve_slice_attributes.cc | 21 #include "tensorflow/lite/toco/model.h" 27 ::tensorflow::Status ResolveSliceAttributes::Run(Model* model, 31 const auto slice_it = model->operators.begin() + op_index; 39 if (!IsConstantParameterArray(*model, op->inputs[1])) 41 if (!IsConstantParameterArray(*model, op->inputs[2])) 44 const auto& begin_array = model->GetArray(op->inputs[1]); 47 const auto& size_array = model->GetArray(op->inputs[2]);
|
move_binary_operator_before_reshape.cc | 18 #include "tensorflow/lite/toco/model.h" 52 // collapsing of some reshapes. The WaveNet model in particular benefits from 57 ::tensorflow::Status MoveBinaryOperatorBeforeReshape::Run(Model* model, 61 const auto binary_it = model->operators.begin() + op_index; 81 IsConstantParameterArray(*model, binary_op->inputs[0]), 82 IsConstantParameterArray(*model, binary_op->inputs[1]), 99 model->GetArray(binary_op->inputs[variable_input_idx]); 107 model->GetArray(binary_op->inputs[constant_input_idx]).shape(), 108 model->GetArray(binary_op->inputs[variable_input_idx]).shape())) [all...] |
/external/u-boot/arch/arm/dts/ |
am335x-bone.dts | 14 model = "TI AM335x BeagleBone";
|
armada-388.dtsi | 50 model = "Marvell Armada 388 family SoC";
|
at91sam9g15ek.dts | 15 model = "Atmel AT91SAM9G15-EK";
|
at91sam9g20ek.dts | 12 model = "Atmel at91sam9g20ek";
|
at91sam9x25ek.dts | 14 model = "Atmel AT91SAM9X25-EK";
|
salvator-x.dtsi | 14 model = "Renesas Salvator-X board";
|
/external/tensorflow/tensorflow/lite/toco/ |
toco_tooling.cc | 37 // CHECK-fails if the model contains a kUnsupported operation. 38 void CheckUnsupportedOperations(const Model& model) { 40 for (auto& op : model.operators) { 154 void SetFinalDataTypeOnInputs(const TocoFlags& toco_flags, Model* model) { 169 for (int i = 0; i < model->flags.input_arrays_size(); i++) { 170 string const& array_name = model->flags.input_arrays(i).name(); 171 auto* array = &model->GetArray(array_name); 186 // already mixed 8-bit / 16-bit quantized model in TFLITE format an 208 std::unique_ptr<Model> model; local [all...] |
/external/tensorflow/tensorflow/lite/tools/optimize/ |
quantize_model.cc | 28 #include "tensorflow/lite/model.h" 36 ModelT* model, ErrorReporter* error_reporter) { 37 for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs.size(); 39 SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get(); 40 internal::SubgraphQuantizer quantizer(model, subgraph, error_reporter); 46 model->operator_codes[op->opcode_index]->builtin_code; 55 flatbuffers::Offset<Model> output_model_location = 56 Model::Pack(*builder, model);
|
quantize_weights.cc | 27 #include "tensorflow/lite/model.h" 49 std::vector<ConsumerOpInfo> GetTensorConsumers(const ModelT* model, 53 // instead doing one sweep for the entire model. 149 const ModelT* model, const OperatorT* op, uint64_t weights_min_num_elements, 151 SubGraphT* subgraph = model->subgraphs.at(0).get(); 153 model->operator_codes[op->opcode_index]->builtin_code; 182 if (model->buffers[tensor->buffer]->data.data() == nullptr) { 196 int32_t GetOrInsertDequantizeOpCodeIndex(ModelT* model) { 197 for (size_t i = 0; i < model->operator_codes.size(); ++i) { 198 if (model->operator_codes[i]->builtin_code == BuiltinOperator_DEQUANTIZE) 257 std::unique_ptr<ModelT> model; local [all...] |
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/state_space_models/ |
test_utils.py | 34 def transition_power_test_template(test_case, model, num_steps): 35 """Tests the transition_to_powers function of a state space model.""" 37 model.get_state_transition(), dtype=model.dtype) 44 model_output_tensor = model.transition_to_powers(powers=array_ops.stack( 61 def noise_accumulator_test_template(test_case, model, num_steps): 62 """Tests `model`'s transition_power_noise_accumulator.""" 64 model.get_state_transition(), dtype=model.dtype) 66 model.get_noise_transform(), dtype=model.dtype [all...] |
/external/tensorflow/tensorflow/python/keras/layers/ |
cudnn_recurrent_test.py | 88 model = keras.models.Model(inputs, state[0]) 89 model.run_eagerly = testing_utils.should_run_eagerly() 92 state = model.predict(inputs) 107 model = keras.models.Sequential() 108 model.add( 111 model.add(layer) 112 model.add( 114 model.compile(loss='categorical_crossentropy', 116 model.fit [all...] |
wrappers_test.py | 77 model = keras.models.Sequential() 78 model.add( 81 model.compile(optimizer='rmsprop', loss='mse') 82 model.fit( 89 model.get_config() 91 # check whether the model variables are present in the 94 trackable_util.list_objects(model)) 95 for v in model.variables: 99 model = keras.models.Sequential() 100 model.add [all...] |
/external/tensorflow/tensorflow/python/keras/saving/ |
saving_utils_test.py | 61 model = testing_utils.get_small_mlp(10, 3, input_dim) 67 saving_utils.trace_model_call(model) 68 model._set_inputs(inputs) 70 fn = saving_utils.trace_model_call(model) 72 expected_outputs = {model.output_names[0]: model(inputs)} 80 model = testing_utils.get_small_mlp(10, 3, input_dim) 81 model.compile(optimizer='sgd', loss='mse') 82 model.fit(x=np.random.random((8, 5)), 87 fn = saving_utils.trace_model_call(model) [all...] |
/external/tensorflow/tensorflow/python/keras/ |
models_test.py | 15 """Tests for `models.py` (model cloning, mainly).""" 44 class TestModel(keras.Model): 45 """A model subclass.""" 112 model = models.Sequential(_get_layers(input_shape, add_input_layer)) 115 isinstance(model._layers[0], keras.layers.InputLayer), 117 self.assertEqual(model._is_graph_network, add_input_layer) 119 # With placeholder creation -- clone model should have an InputLayer 120 # if the original model has one. 121 new_model = clone_fn(model) 125 self.assertEqual(new_model._is_graph_network, model._is_graph_network [all...] |
/external/dagger2/compiler/src/main/java/dagger/internal/codegen/writer/ |
TypeNames.java | 20 import javax.lang.model.element.TypeElement; 21 import javax.lang.model.type.ArrayType; 22 import javax.lang.model.type.DeclaredType; 23 import javax.lang.model.type.NoType; 24 import javax.lang.model.type.NullType; 25 import javax.lang.model.type.PrimitiveType; 26 import javax.lang.model.type.TypeMirror; 27 import javax.lang.model.type.TypeVariable; 28 import javax.lang.model.type.WildcardType; 29 import javax.lang.model.util.SimpleTypeVisitor6 [all...] |
/external/libxkbcommon/xkbcommon/test/ |
rules-file.c | 33 const char *model; member in struct:test_data 53 data->rules, data->model, data->layout, data->variant, data->options 58 data->model, data->layout, data->variant, data->options); 98 .model = "my_model", .layout = "my_layout", .variant = "my_variant", 110 .model = "", .layout = "", .variant = "", .options = "", 120 .model = "pc104", .layout = "foo", .variant = "", .options = "", 130 .model = "foo", .layout = "ar", .variant = "bar", .options = "", 140 .model = NULL, .layout = "my_layout,second_layout", .variant = "my_variant", 150 .model = "", .layout = "br,al,cn,az", .variant = "", 162 .model = "my_model", .layout = "my_layout", .variant = "my_variant" [all...] |
/external/llvm/utils/ |
schedcover.py | 16 def add(instr, model, resource=None): 20 entry[model] = resource 21 models.add(model) 39 for model in ordered_models: 40 if not model: model = "default" 41 sys.stdout.write(", {}".format(model)) 46 for model in ordered_models: 47 if model in mapping: 48 sys.stdout.write(", {}".format(mapping[model])) [all...] |