blob: d37ffe16201ba7eed270366a075d1bdbb88844bb [file] [log] [blame]
Idriss Chaouchcbf79292023-09-08 11:18:16 +01001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
Idriss Chaouchcbf79292023-09-08 11:18:16 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "TestUtils.hpp"
9
10#include <armnn_delegate.hpp>
11#include <DelegateTestInterpreter.hpp>
12
Idriss Chaouchcbf79292023-09-08 11:18:16 +010013#include <tensorflow/lite/version.h>
14
Idriss Chaouchcbf79292023-09-08 11:18:16 +010015namespace
16{
17 std::vector<char> CreateBroadcastToTfLiteModel(tflite::BuiltinOperator operatorCode,
18 tflite::TensorType inputTensorType,
19 const std::vector<int32_t>& inputTensorShape,
20 const std::vector<int32_t>& shapeTensorShape,
21 const std::vector<int32_t>& shapeTensorData,
22 const std::vector<int32_t>& outputTensorShape)
23 {
24 using namespace tflite;
25 flatbuffers::FlatBufferBuilder flatBufferBuilder;
26
27 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
28 buffers.push_back(CreateBuffer(flatBufferBuilder));
29 buffers.push_back(CreateBuffer(flatBufferBuilder));
30 buffers.push_back(CreateBuffer(flatBufferBuilder,
31 flatBufferBuilder.CreateVector(
32 reinterpret_cast<const uint8_t*>(shapeTensorData.data()),
33 sizeof(int32_t) * shapeTensorData.size())));
34 buffers.push_back(CreateBuffer(flatBufferBuilder));
35
36 float qScale = 1.0f;
37 int32_t qOffset = 0;
38
39 auto quantizationParameters =
40 CreateQuantizationParameters(flatBufferBuilder,
41 0,
42 0,
43 flatBufferBuilder.CreateVector<float>({ qScale }),
44 flatBufferBuilder.CreateVector<int64_t>({ qOffset }));
45
46 std::array<flatbuffers::Offset<Tensor>, 3> tensors;
47 tensors[0] = CreateTensor(flatBufferBuilder,
48 flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
49 inputTensorShape.size()),
50 inputTensorType,
51 1,
52 flatBufferBuilder.CreateString("input_tensor"),
53 quantizationParameters);
54
55 tensors[1] = CreateTensor(flatBufferBuilder,
56 flatBufferBuilder.CreateVector<int32_t>(shapeTensorShape.data(),
57 shapeTensorShape.size()),
58 TensorType_INT32,
59 2,
60 flatBufferBuilder.CreateString("shape_input_tensor"),
61 quantizationParameters);
62
63 tensors[2] = CreateTensor(flatBufferBuilder,
64 flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
65 outputTensorShape.size()),
66 inputTensorType,
67 3,
68 flatBufferBuilder.CreateString("output_tensor"),
69 quantizationParameters);
70
71 // Create Operator
72 tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_BroadcastToOptions;
73 flatbuffers::Offset<void> operatorBuiltinOption = 0;
74
75 const std::vector<int> operatorInputs {0, 1};
76 const std::vector<int> operatorOutputs {2};
77
78 flatbuffers::Offset<Operator> broadcastOperator =
79 CreateOperator(flatBufferBuilder,
80 0,
81 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
82 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
83 operatorBuiltinOptionsType,
84 operatorBuiltinOption);
85
86 const std::vector<int> subgraphInputs{0, 1};
87 const std::vector<int> subgraphOutputs{2};
88 flatbuffers::Offset <SubGraph> subgraph =
89 CreateSubGraph(flatBufferBuilder,
90 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
91 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
92 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
93 flatBufferBuilder.CreateVector(&broadcastOperator, 1));
94
95 flatbuffers::Offset <flatbuffers::String> modelDescription =
96 flatBufferBuilder.CreateString("ArmnnDelegate: BrodacastTo Operator Model");
97 flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder,0,
98 0, 2,
99 tflite::BuiltinOperator_BROADCAST_TO);
100
101 flatbuffers::Offset <Model> flatbufferModel =
102 CreateModel(flatBufferBuilder,
103 TFLITE_SCHEMA_VERSION,
104 flatBufferBuilder.CreateVector(&opCode, 1),
105 flatBufferBuilder.CreateVector(&subgraph, 1),
106 modelDescription,
107 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
108
109 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
110
111 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113 }
114
115 template<typename T>
116 void BroadcastToTestImpl(tflite::TensorType inputTensorType,
117 tflite::BuiltinOperator operatorCode,
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100118 std::vector<T>& inputValues,
119 std::vector<int32_t> inputShape,
120 std::vector<int32_t> shapeShapes,
121 std::vector<int32_t> shapeData,
122 std::vector<T>& expectedOutputValues,
Colm Donelaneff204a2023-11-28 15:46:09 +0000123 std::vector<int32_t> expectedOutputShape,
124 const std::vector<armnn::BackendId>& backends)
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100125 {
126 using namespace delegateTestInterpreter;
127
128 std::vector<char> modelBuffer = CreateBroadcastToTfLiteModel(operatorCode,
129 inputTensorType,
130 inputShape,
131 shapeShapes,
132 shapeData,
133 expectedOutputShape);
134
135
136 // Setup interpreter with just TFLite Runtime.
137 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
138 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
139 CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
140 CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(shapeData, 1) == kTfLiteOk);
141 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
142 std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
143 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
144
145 // Setup interpreter with Arm NN Delegate applied.
Colm Donelaneff204a2023-11-28 15:46:09 +0000146 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100147 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
148 CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
149 CHECK(armnnInterpreter.FillInputTensor<int32_t>(shapeData, 1) == kTfLiteOk);
150 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
151 std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
152 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
153
154 armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
155 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
156
157 tfLiteInterpreter.Cleanup();
158 armnnInterpreter.Cleanup();
159 }
160
161} // anonymous namespace