blob: f8525d151f4714ce8f4774fc8257695aea3d4fd3 [file] [log] [blame]
James Warda8578102020-11-13 18:05:04 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
James Warda8578102020-11-13 18:05:04 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyanebe392d2023-03-30 10:12:08 +01008#include "TestUtils.hpp"
9
James Warda8578102020-11-13 18:05:04 +000010#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
James Warda8578102020-11-13 18:05:04 +000012
James Warda8578102020-11-13 18:05:04 +000013#include <tensorflow/lite/version.h>
14
James Warda8578102020-11-13 18:05:04 +000015namespace
16{
17std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
18 tflite::TensorType tensorType,
19 const std::vector <int32_t>& tensorShape,
20 float beta)
21{
22 using namespace tflite;
23 flatbuffers::FlatBufferBuilder flatBufferBuilder;
24
25 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000026 buffers.push_back(CreateBuffer(flatBufferBuilder));
27 buffers.push_back(CreateBuffer(flatBufferBuilder));
28 buffers.push_back(CreateBuffer(flatBufferBuilder));
James Warda8578102020-11-13 18:05:04 +000029
30 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
31 tensors[0] = CreateTensor(flatBufferBuilder,
32 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
33 tensorShape.size()),
34 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000035 1);
James Warda8578102020-11-13 18:05:04 +000036 tensors[1] = CreateTensor(flatBufferBuilder,
37 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
38 tensorShape.size()),
39 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000040 2);
James Warda8578102020-11-13 18:05:04 +000041
42 const std::vector<int32_t> operatorInputs({0});
43 const std::vector<int32_t> operatorOutputs({1});
44
45 flatbuffers::Offset<Operator> softmaxOperator;
46 flatbuffers::Offset<flatbuffers::String> modelDescription;
47 flatbuffers::Offset<OperatorCode> operatorCode;
48
49 switch (softmaxOperatorCode)
50 {
51 case tflite::BuiltinOperator_SOFTMAX:
52 softmaxOperator =
53 CreateOperator(flatBufferBuilder,
54 0,
55 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
56 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
57 BuiltinOptions_SoftmaxOptions,
58 CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
59 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
60 operatorCode = CreateOperatorCode(flatBufferBuilder,
61 tflite::BuiltinOperator_SOFTMAX);
62 break;
63 case tflite::BuiltinOperator_LOG_SOFTMAX:
64 softmaxOperator =
65 CreateOperator(flatBufferBuilder,
66 0,
67 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
68 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
69 BuiltinOptions_LogSoftmaxOptions,
70 CreateLogSoftmaxOptions(flatBufferBuilder).Union());
71 flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
72 operatorCode = CreateOperatorCode(flatBufferBuilder,
73 tflite::BuiltinOperator_LOG_SOFTMAX);
74 break;
75 default:
76 break;
77 }
78 const std::vector<int32_t> subgraphInputs({0});
79 const std::vector<int32_t> subgraphOutputs({1});
80 flatbuffers::Offset<SubGraph> subgraph =
81 CreateSubGraph(flatBufferBuilder,
82 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
83 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
84 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
85 flatBufferBuilder.CreateVector(&softmaxOperator, 1));
86 flatbuffers::Offset<Model> flatbufferModel =
87 CreateModel(flatBufferBuilder,
88 TFLITE_SCHEMA_VERSION,
89 flatBufferBuilder.CreateVector(&operatorCode, 1),
90 flatBufferBuilder.CreateVector(&subgraph, 1),
91 modelDescription,
92 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
Matthew Sloyanebe392d2023-03-30 10:12:08 +010093 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
James Warda8578102020-11-13 18:05:04 +000094 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
95 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
96}
97
98void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
99 tflite::TensorType tensorType,
James Warda8578102020-11-13 18:05:04 +0000100 std::vector<int32_t>& shape,
101 std::vector<float>& inputValues,
102 std::vector<float>& expectedOutputValues,
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000103 const std::vector<armnn::BackendId>& backends = {},
James Warda8578102020-11-13 18:05:04 +0000104 float beta = 0)
105{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100106 using namespace delegateTestInterpreter;
James Warda8578102020-11-13 18:05:04 +0000107 std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
108 tensorType,
109 shape,
110 beta);
111
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100112 // Setup interpreter with just TFLite Runtime.
113 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
114 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
115 CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
116 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
117 std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
118 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
James Warda8578102020-11-13 18:05:04 +0000119
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100120 // Setup interpreter with Arm NN Delegate applied.
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000121 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100122 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
123 CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
124 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
125 std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
126 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
James Warda8578102020-11-13 18:05:04 +0000127
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100128 armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
129 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
James Warda8578102020-11-13 18:05:04 +0000130
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100131 tfLiteInterpreter.Cleanup();
132 armnnInterpreter.Cleanup();
James Warda8578102020-11-13 18:05:04 +0000133}
134
Keith Davis7c67fab2021-04-08 11:47:23 +0100135
136/// Convenience function to run softmax and log-softmax test cases
137/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
138/// \param backends armnn backends to target
139/// \param beta multiplicative parameter to the softmax function
140/// \param expectedOutput to be checked against transformed input
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000141void SoftmaxTestCase(tflite::BuiltinOperator operatorCode, float beta,
142 std::vector<float> expectedOutput, const std::vector<armnn::BackendId> backends = {})
143{
Keith Davis7c67fab2021-04-08 11:47:23 +0100144 std::vector<float> input = {
145 1.0, 2.5, 3.0, 4.5, 5.0,
146 -1.0, -2.5, -3.0, -4.5, -5.0};
147 std::vector<int32_t> shape = {2, 5};
148
149 SoftmaxTest(operatorCode,
150 tflite::TensorType_FLOAT32,
Keith Davis7c67fab2021-04-08 11:47:23 +0100151 shape,
152 input,
153 expectedOutput,
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000154 backends,
Keith Davis7c67fab2021-04-08 11:47:23 +0100155 beta);
156}
157
James Warda8578102020-11-13 18:05:04 +0000158} // anonymous namespace