Colm Donelan | 0aef653 | 2023-10-02 17:01:37 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #include <armnn_delegate.hpp> |
| 7 | #include <tensorflow/lite/c/common.h> |
| 8 | #include <tensorflow/lite/core/c/c_api.h> |
| 9 | #include <tensorflow/lite/interpreter.h> |
| 10 | #include <tensorflow/lite/kernels/register.h> |
| 11 | |
| 12 | #include <string> |
| 13 | |
| 14 | int main() |
| 15 | { |
| 16 | std::unique_ptr<tflite::FlatBufferModel> model; |
| 17 | model = tflite::FlatBufferModel::BuildFromFile("./simple_conv2d_1_op.tflite"); |
| 18 | if (!model) |
| 19 | { |
| 20 | std::cout << "Failed to load TfLite model from: ./simple_conv2d_1_op.tflite" << std::endl; |
| 21 | return -1; |
| 22 | } |
| 23 | std::unique_ptr<tflite::Interpreter> tfLiteInterpreter; |
| 24 | tfLiteInterpreter = std::make_unique<tflite::Interpreter>(); |
| 25 | tflite::ops::builtin::BuiltinOpResolver resolver; |
| 26 | tflite::InterpreterBuilder builder(*model, resolver); |
| 27 | if (builder(&tfLiteInterpreter) != kTfLiteOk) |
| 28 | { |
| 29 | std::cout << "Error loading the model into the TfLiteInterpreter." << std::endl; |
| 30 | return -1; |
| 31 | } |
| 32 | |
| 33 | // Create the Armnn Delegate |
| 34 | // Populate a DelegateOptions from the ExecuteNetworkParams. |
| 35 | armnnDelegate::DelegateOptions delegateOptions(armnn::Compute::CpuRef); |
| 36 | std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> theArmnnDelegate( |
| 37 | armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); |
| 38 | // Register armnn_delegate to TfLiteInterpreter |
| 39 | auto result = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); |
| 40 | if (result != kTfLiteOk) |
| 41 | { |
| 42 | std::cout << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter." << std::endl; |
| 43 | return -1; |
| 44 | } |
| 45 | if (tfLiteInterpreter->AllocateTensors() != kTfLiteOk) |
| 46 | { |
| 47 | std::cout << "Failed to allocate tensors in the TfLiteInterpreter." << std::endl; |
| 48 | return -1; |
| 49 | } |
| 50 | |
| 51 | // Really should populate the tensors here, but it'll work without it. |
| 52 | |
| 53 | int status = tfLiteInterpreter->Invoke(); |
| 54 | if (status != kTfLiteOk) |
| 55 | { |
| 56 | std::cout << "Inference failed." << std::endl; |
| 57 | return -1; |
| 58 | } |
| 59 | } |