Home | History | Annotate | Download | only in lite

Lines Matching refs:context_

73   context_.impl_ = static_cast<void*>(this);
74 context_.ResizeTensor = ResizeTensor;
75 context_.ReportError = ReportError;
76 context_.AddTensors = AddTensors;
77 context_.tensors = nullptr;
78 context_.tensors_size = 0;
79 context_.gemm_context = nullptr;
82 context_.GetNodeAndRegistration = nullptr;
83 context_.ReplaceSubgraphsWithDelegateKernels = nullptr;
84 context_.GetExecutionPlan = nullptr;
104 for (int i = 0; i < context_.tensors_size; i++) {
105 TfLiteTensorFree(&context_.tensors[i]);
182 TF_LITE_ENSURE_OK(&context_,
190 &context_, CheckTensorIndices("outputs", outputs.data(), outputs.size()));
202 if (index < kOptionalTensor || index >= context_.tensors_size) {
203 ReportError(&context_, "Invalid tensor index %d in %s\n", index, label);
215 TF_LITE_ENSURE(&context_, bytes != nullptr);
232 ReportError(&context_,
254 ReportError(&context_, "AllocateTensors() called on inconsistent model.");
272 TF_LITE_ENSURE_OK(&context_, CheckTensorIndices("node inputs", inputs.data(),
275 &context_,
312 TF_LITE_ENSURE(&context_,
313 tensor_index < context_.tensors_size && tensor_index >= 0);
316 return ResizeTensorImpl(&context_.tensors[tensor_index], dims_lite);
348 if (HasDynamicTensor(context_, node.outputs)) {
358 &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this))));
375 ReportError(&context_, "Invoke called on model that is not consistent.");
379 ReportError(&context_, "Invoke called on model that is not ready.");
386 TF_LITE_ENSURE_OK(&context_, nnapi_delegate_->Invoke(this));
391 ReportError(&context_,
409 TF_LITE_ENSURE(&context_, next_execution_plan_index_to_prepare_ >=
456 context_.tensors = tensors_.data();
457 context_.tensors_size = tensors_.size();
472 TF_LITE_ENSURE(&context_, node_index < nodes_size() && node_index >= 0);
473 TF_LITE_ENSURE(&context_, node != nullptr && registration != nullptr);
490 TF_LITE_ENSURE(&context_,
491 tensor_index < context_.tensors_size && tensor_index >= 0);
497 TF_LITE_ENSURE_OK(&context_, BytesRequired(type, dims.data(), dims.size(),
499 TF_LITE_ENSURE_EQ(&context_, required_bytes, bytes);
504 kTfLiteMmapRo, allocation, &context_.tensors[tensor_index]);
516 TF_LITE_ENSURE(&context_,
517 tensor_index < context_.tensors_size && tensor_index >= 0);
524 TF_LITE_ENSURE_OK(&context_, BytesRequired(type, dims.data(), dims.size(),
531 nullptr, &context_.tensors[tensor_index]);
537 TF_LITE_ENSURE(&context_, node_index >= 0 && node_index < nodes_size());
571 ReportError(&context_, "Attempting to resize a fixed-size tensor.");
593 tflite::gemm_support::SetMaxNumThreads(&context_, num_threads);
599 context_.GetNodeAndRegistration = GetNodeAndRegistration;
600 context_.ReplaceSubgraphsWithDelegateKernels =
602 context_.GetExecutionPlan = GetExecutionPlan;
604 TfLiteStatus status = delegate->Prepare(&context_, delegate->data_);
606 context_.GetNodeAndRegistration = nullptr;
607 context_.ReplaceSubgraphsWithDelegateKernels = nullptr;
608 context_.GetExecutionPlan = nullptr;