blob: 1b1ab496a1bdd44acb15bd6681ebd3cb0f615de5 [file] [log] [blame]
Kevin May8ab2d7a2021-05-07 09:32:51 +01001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
Kevin May8ab2d7a2021-05-07 09:32:51 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
Kevin May8ab2d7a2021-05-07 09:32:51 +010012
13#include <flatbuffers/flatbuffers.h>
Kevin May8ab2d7a2021-05-07 09:32:51 +010014#include <tensorflow/lite/kernels/register.h>
Kevin May8ab2d7a2021-05-07 09:32:51 +010015#include <tensorflow/lite/version.h>
16
Matthew Sloyanebe392d2023-03-30 10:12:08 +010017#include <doctest/doctest.h>
Kevin May8ab2d7a2021-05-07 09:32:51 +010018
19namespace
20{
21
22std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode,
23 tflite::TensorType tensorType,
24 std::vector<int32_t>& inputTensorShape,
25 const std::vector <int32_t>& outputTensorShape,
26 const int32_t outputTensorNum,
27 unsigned int axis = 0,
28 float quantScale = 1.0f,
29 int quantOffset = 0)
30{
31 using namespace tflite;
32 flatbuffers::FlatBufferBuilder flatBufferBuilder;
33
34 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000035 buffers.push_back(CreateBuffer(flatBufferBuilder));
36 buffers.push_back(CreateBuffer(flatBufferBuilder));
37
Kevin May8ab2d7a2021-05-07 09:32:51 +010038
39 auto quantizationParameters =
40 CreateQuantizationParameters(flatBufferBuilder,
41 0,
42 0,
43 flatBufferBuilder.CreateVector<float>({ quantScale }),
44 flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
45
46 const std::vector<int32_t> operatorInputs{ 0 };
47 std::vector<int32_t> operatorOutputs{};
48 const std::vector<int> subgraphInputs{ 0 };
49 std::vector<int> subgraphOutputs{};
50
51 std::vector<flatbuffers::Offset<Tensor>> tensors(outputTensorNum + 1);
52
53 // Create input tensor
54 tensors[0] = CreateTensor(flatBufferBuilder,
55 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
56 inputTensorShape.size()),
57 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000058 1,
Kevin May8ab2d7a2021-05-07 09:32:51 +010059 flatBufferBuilder.CreateString("input"),
60 quantizationParameters);
61
62 for (int i = 0; i < outputTensorNum; ++i)
63 {
64 tensors[i + 1] = CreateTensor(flatBufferBuilder,
65 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
66 outputTensorShape.size()),
67 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000068 (i + 2),
Kevin May8ab2d7a2021-05-07 09:32:51 +010069 flatBufferBuilder.CreateString("output" + std::to_string(i)),
70 quantizationParameters);
71
Ryan OShea238ecd92023-03-07 11:44:23 +000072 buffers.push_back(CreateBuffer(flatBufferBuilder));
Kevin May8ab2d7a2021-05-07 09:32:51 +010073 operatorOutputs.push_back(i + 1);
74 subgraphOutputs.push_back(i + 1);
75 }
76
77 // create operator
78 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_UnpackOptions;
79 flatbuffers::Offset<void> operatorBuiltinOptions =
80 CreateUnpackOptions(flatBufferBuilder, outputTensorNum, axis).Union();
81
82 flatbuffers::Offset <Operator> unpackOperator =
83 CreateOperator(flatBufferBuilder,
84 0,
85 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
86 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
87 operatorBuiltinOptionsType,
88 operatorBuiltinOptions);
89
90 flatbuffers::Offset <SubGraph> subgraph =
91 CreateSubGraph(flatBufferBuilder,
92 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
93 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
94 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
95 flatBufferBuilder.CreateVector(&unpackOperator, 1));
96
97 flatbuffers::Offset <flatbuffers::String> modelDescription =
98 flatBufferBuilder.CreateString("ArmnnDelegate: Unpack Operator Model");
99 flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unpackOperatorCode);
100
101 flatbuffers::Offset <Model> flatbufferModel =
102 CreateModel(flatBufferBuilder,
103 TFLITE_SCHEMA_VERSION,
104 flatBufferBuilder.CreateVector(&operatorCode, 1),
105 flatBufferBuilder.CreateVector(&subgraph, 1),
106 modelDescription,
Ryan OShea238ecd92023-03-07 11:44:23 +0000107 flatBufferBuilder.CreateVector(buffers));
Kevin May8ab2d7a2021-05-07 09:32:51 +0100108
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100109 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
Kevin May8ab2d7a2021-05-07 09:32:51 +0100110
111 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113}
114
115template <typename T>
116void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
117 tflite::TensorType tensorType,
118 std::vector<armnn::BackendId>& backends,
119 std::vector<int32_t>& inputShape,
120 std::vector<int32_t>& expectedOutputShape,
121 std::vector<T>& inputValues,
122 std::vector<std::vector<T>>& expectedOutputValues,
123 unsigned int axis = 0,
124 float quantScale = 1.0f,
125 int quantOffset = 0)
126{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100127 using namespace delegateTestInterpreter;
Kevin May8ab2d7a2021-05-07 09:32:51 +0100128 std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
129 tensorType,
130 inputShape,
131 expectedOutputShape,
132 expectedOutputValues.size(),
133 axis,
134 quantScale,
135 quantOffset);
136
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100137 // Setup interpreter with just TFLite Runtime.
138 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
139 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
140 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
141 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
Kevin May8ab2d7a2021-05-07 09:32:51 +0100142
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100143 // Setup interpreter with Arm NN Delegate applied.
144 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
145 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
146 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
147 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
Kevin May8ab2d7a2021-05-07 09:32:51 +0100148
149 // Compare output data
150 for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
151 {
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100152 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
153 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(i);
154
155 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
156 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(i);
157
158 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
159 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
Kevin May8ab2d7a2021-05-07 09:32:51 +0100160 }
161
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100162 tfLiteInterpreter.Cleanup();
163 armnnInterpreter.Cleanup();
Kevin May8ab2d7a2021-05-07 09:32:51 +0100164}
165
166} // anonymous namespace