HomeSort by relevance Sort by last modified time
    Searched defs:tensors (Results 1 - 23 of 23) sorted by null

  /external/tensorflow/tensorflow/core/framework/
unique_tensor_references_test.cc 32 TensorReferenceVector tensors; local
33 refs.FreezeAndReturnReferences(&tensors);
34 EXPECT_EQ(2, tensors.size());
35 if (tensors[0].SharesBufferWith(a)) {
36 EXPECT_TRUE(tensors[1].SharesBufferWith(b));
38 EXPECT_TRUE(tensors[1].SharesBufferWith(a));
39 EXPECT_TRUE(tensors[0].SharesBufferWith(b));
41 for (auto& t : tensors) {
55 TensorReferenceVector tensors; local
56 refs.FreezeAndReturnReferences(&tensors);
91 TensorReferenceVector tensors; local
116 TensorReferenceVector tensors; local
    [all...]
variant_tensor_data.cc 33 const Tensor& VariantTensorData::tensors(int index) const { function in class:tensorflow::VariantTensorData
37 const std::vector<Tensor>& VariantTensorData::tensors() const { function in class:tensorflow::VariantTensorData
58 for (const auto& tensor : proto.tensors()) {
89 strings::StrCat(repeated_field, " tensors: ", t.DebugString());
  /external/tensorflow/tensorflow/contrib/lite/toco/tflite/
export_test.cc 58 details::TensorsMap tensors; local
59 details::LoadTensorsMap(input_model_, &tensors);
60 EXPECT_EQ(0, tensors["tensor_one"]);
61 EXPECT_EQ(1, tensors["tensor_two"]);
107 // TODO(ahentz): tests for tensors, inputs, outpus, opcodes and operators.
import_test.cc 43 // The tensors
65 auto tensors = builder_.CreateVector( local
77 auto subgraph = ::tflite::CreateSubGraph(builder_, tensors, 0, 0, 0);
99 details::TensorsTable tensors; local
100 details::LoadTensorsTable(*input_model_, &tensors);
101 EXPECT_THAT(tensors, ElementsAre("tensor_one", "tensor_two"));
112 TEST_F(ImportTest, Tensors) {
import.cc 31 auto tensors = (*input_model.subgraphs())[0]->tensors(); local
32 if (!tensors) return;
33 for (const auto* tensor : *tensors) {
54 auto tensors = (*input_model.subgraphs())[0]->tensors(); local
56 // auto tensors = input_model.tensors();
57 if (!tensors) return;
58 for (const auto* input_tensor : *tensors) {
    [all...]
export.cc 96 // tensors in the tensors_map.
296 auto tensors = ExportTensors(model, tensors_map, &builder, &buffers_to_write); local
316 auto subgraph = CreateSubGraph(builder, tensors, inputs, outputs, ops);
  /external/tensorflow/tensorflow/contrib/lite/java/src/main/java/org/tensorflow/lite/
Interpreter.java 112 Tensor[] tensors = wrapper.run(inputs); local
113 if (outputs == null || tensors == null || outputs.size() > tensors.length) {
116 final int size = tensors.length;
122 tensors[idx].copyTo(outputs.get(idx));
  /external/tensorflow/tensorflow/contrib/lite/tools/
verifier_test.cc 190 auto tensors = builder.CreateVector(std::vector<Offset<Tensor>>{ local
194 {CreateSubGraph(builder, tensors, /*inputs=*/0, /*outputs=*/0,
  /external/tensorflow/tensorflow/core/grappler/costs/
graph_memory_test.cc 58 std::set<string> tensors; local
60 tensors.insert(strings::StrCat(t.node, ":", t.output_id));
64 // the order in which this takes place, in the worst case the 3 tensors are in
70 EXPECT_EQ(expected, tensors);
88 std::set<string> tensors; local
90 tensors.insert(strings::StrCat(t.node, ":", t.output_id));
96 EXPECT_EQ(expected, tensors);
215 std::set<string> tensors; local
217 tensors.insert(strings::StrCat(t.node, ":", t.output_id));
223 EXPECT_EQ(expected, tensors);
    [all...]
utils.cc 63 std::vector<TensorProto> tensors; local
66 tensors.push_back(attr_value.tensor());
71 tensors.push_back(tensor_proto);
77 return tensors;
119 std::vector<TensorProto> tensors = ExtractTensors(attr_value); local
120 if (tensors.empty()) continue;
122 const TensorProto& t = tensors[0];
  /external/tensorflow/tensorflow/contrib/lite/
arena_planner_test.cc 100 std::vector<TfLiteTensor>* tensors() { return &tensors_; } function in class:tflite::__anon39182::TestGraph
116 size_t num_tensors() const override { return graph_->tensors()->size(); }
118 return &graph_->tensors()->at(index);
161 const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
168 const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
196 EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr);
197 EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr);
210 (*graph.tensors())[1].bytes = 0;
293 (*graph.tensors())[1].bytes = 40;
318 (*graph.tensors())[1].allocation_type = kTfLiteArenaRwPersistent
    [all...]
model.cc 622 const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors,
634 for (int i = 0; i < tensors->Length(); ++i) {
635 const auto* tensor = tensors->Get(i);
762 // Construct interpreter with correct number of tensors and operators.
771 auto tensors = subgraph->tensors(); local
772 if (!operators || !tensors || !buffers) {
774 "Did not get operators, tensors, or buffers in input flat buffer.\n");
778 if ((**interpreter).AddTensors(tensors->Length()) != kTfLiteOk) {
786 // Finally setup nodes and tensors
    [all...]
context.h 23 // TfLiteContext - allows an op to access the tensors
64 // Check if two tensors are equal. Returns 1 if they are equal, 0 otherwise.
156 // data. kTfLiteDynamic is for tensors that are allocated during evaluation.
215 // Inputs to this node expressed as indices into the simulator's tensors.
218 // Outputs to this node expressed as indices into the simulator's tensors.
221 // Temporary tensors uses during the computations. This usually contains no
222 // tensors, but ops are allowed to change that if they need scratch space of
240 // Number of tensors in the context.
261 // An tensor of tensors in the interpreter context (of length `tensors_size`)
262 TfLiteTensor* tensors; member in struct:TfLiteContext
    [all...]
  /external/tensorflow/tensorflow/core/debug/
debug_grpc_io_utils_test.cc 182 // Prepare the tensors to sent.
183 std::vector<Tensor> tensors; local
187 tensors.push_back(tensor);
200 // Set up the concurrent tasks of sending Tensors via an Event stream to the
202 auto fn = [this, &mu, &tensor_count, &tensors, &statuses, &all_done,
210 // Different concurrent tasks will send different tensors.
216 tensors[this_count], wall_time, urls);
245 // One prep tensor plus kSends concurrent tensors are expected.
  /external/tensorflow/tensorflow/core/kernels/
list_kernels.h 37 // Variant compatible type for a list of tensors. This is mutable but instances
54 std::vector<Tensor> tensors; member in struct:tensorflow::TensorList
88 OP_REQUIRES(c, l->tensors.size() == num_elements_,
92 l->tensors.size(), " elements."));
95 resulting_shape.AddDim(l->tensors.size());
106 inputs_flat.reserve(l->tensors.size());
107 for (const auto& t : l->tensors) {
155 output_list.tensors.reserve(t.shape().dim_size(0));
163 output_list.tensors.push_back(tmp);
169 output_list.tensors.push_back(aligned)
    [all...]
serialize_sparse_op.cc 413 std::vector<SparseTensor> tensors; variable
414 tensors.reserve(num_sparse_tensors);
416 tensors.emplace_back(indices[i], values[i], shape, std_order);
422 maybe_output = SparseTensor::Concat<T>(tensors); \
batch_kernels.cc 48 // Note that we reduce the concat of k-dimensional tensors into a two
58 "Ranks of all input tensors should match: shape[0] = ",
97 // tensors along the zeroth dimension, with the ith split having zeroth-
98 // dimension size 'sizes[i]'. They allocate the output tensors using 'context',
206 // A class encapsulating the state and logic for batching tensors.
241 OpInputList tensors; local
242 TF_RETURN_IF_ERROR(context->input_list("in_tensors", &tensors));
243 for (int i = 0; i < tensors.size(); ++i) {
244 const Tensor& tensor = tensors[i];
247 "Batching input tensors must have at least one dimension")
832 std::vector<Tensor> tensors; local
    [all...]
  /external/tensorflow/tensorflow/contrib/lite/kernels/
test_util.cc 83 // Initialize buffers list with empty buffer to allow for non-const tensors.
159 auto tensors = builder_.CreateVector(tensors_); local
164 auto subgraph = CreateSubGraph(builder_, tensors, inputs, outputs, operators);
194 << "Cannot allocate tensors";
  /external/tensorflow/tensorflow/core/grappler/optimizers/
memory_optimizer_test.cc 390 auto tensors = EvaluateNodes(output, fetch); local
391 EXPECT_EQ(4, tensors.size());
393 for (int i = 0; i < tensors[0].NumElements(); ++i) {
394 float actual = tensors[3].flat<float>()(i);
397 expected += tensors[j].flat<float>()(i);
constant_folding_test.cc 59 auto tensors = EvaluateNodes(output, fetch); local
61 EXPECT_EQ(1, tensors.size());
62 test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
147 auto tensors = EvaluateNodes(output, fetch); local
148 EXPECT_EQ(fetch.size(), tensors.size());
150 test::ExpectTensorEqual<float>(tensor_expected[i], tensors[i]);
368 auto tensors = EvaluateNodes(output, fetch); local
369 EXPECT_EQ(fetch.size(), tensors.size());
371 test::ExpectTensorEqual<float>(tensor_expected[0], tensors[i]);
579 auto tensors = EvaluateNodes(output, fetch) local
1168 auto tensors = EvaluateNodes(output, fetch); local
    [all...]
  /external/tensorflow/tensorflow/c/eager/
tape.h 63 // Operations the tape needs to perform on tensors to do backpropagation. Named
90 // Consumes references to the tensors in the gradient_tensors list and returns
123 // functions (and hence the tensors they keep alive). Instead, everything
146 // once) and produces the gradient of the target tensors with respect to the
147 // source tensors. The output gradients are used if not empty and not
203 std::vector<TapeTensor> tensors; local
204 tensors.reserve(output_tensors.size());
210 tensors.push_back(o);
213 op_type, tensors, ids, backward_function, backward_function_deleter};
233 // Do not delete watched tensors
    [all...]
  /external/tensorflow/tensorflow/core/kernels/data/
tensor_queue_dataset_op.cc 282 std::vector<Tensor> tensors; local
285 input_impl_->GetNext(ctx, &tensors, &input_end));
287 batch->push_back(std::move(tensors));
317 Status Insert(const std::vector<Tensor>& tensors) {
318 if (tensors.size() != dtypes_.size()) {
320 "TensorQueue::Insert: mismatched number of tensors. Queue "
322 dtypes_.size(), " tensors but tried to insert ", tensors.size());
324 for (int i = 0; i < tensors.size(); ++i) {
325 if (tensors[i].dtype() != dtypes_[i])
626 std::vector<Tensor> tensors; variable
    [all...]
  /external/tensorflow/tensorflow/contrib/lite/schema/
schema_generated.h 4039 std::vector<std::unique_ptr<TensorT>> tensors; member in struct:tflite::SubGraphT
4057 const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const { function in struct:tflite::FLATBUFFERS_FINAL_CLASS
    [all...]

Completed in 2400 milliseconds