blob: ffd02abdf7525ac85026a631ff84f3f771dbed39 [file] [log] [blame]
James Warda8578102020-11-13 18:05:04 +00001//
Ryan OShea238ecd92023-03-07 11:44:23 +00002// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
James Warda8578102020-11-13 18:05:04 +00003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyanebe392d2023-03-30 10:12:08 +01008#include "TestUtils.hpp"
9
James Warda8578102020-11-13 18:05:04 +000010#include <armnn_delegate.hpp>
Matthew Sloyanebe392d2023-03-30 10:12:08 +010011#include <DelegateTestInterpreter.hpp>
James Warda8578102020-11-13 18:05:04 +000012#include <armnnUtils/FloatingPointComparison.hpp>
13
14#include <flatbuffers/flatbuffers.h>
James Warda8578102020-11-13 18:05:04 +000015#include <tensorflow/lite/kernels/register.h>
James Warda8578102020-11-13 18:05:04 +000016#include <tensorflow/lite/version.h>
17
Matthew Sloyanebe392d2023-03-30 10:12:08 +010018#include <schema_generated.h>
19
James Warda8578102020-11-13 18:05:04 +000020#include <doctest/doctest.h>
21
22namespace
23{
24std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
25 tflite::TensorType tensorType,
26 const std::vector <int32_t>& tensorShape,
27 float beta)
28{
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
Ryan OShea238ecd92023-03-07 11:44:23 +000033 buffers.push_back(CreateBuffer(flatBufferBuilder));
34 buffers.push_back(CreateBuffer(flatBufferBuilder));
35 buffers.push_back(CreateBuffer(flatBufferBuilder));
James Warda8578102020-11-13 18:05:04 +000036
37 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
38 tensors[0] = CreateTensor(flatBufferBuilder,
39 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
40 tensorShape.size()),
41 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000042 1);
James Warda8578102020-11-13 18:05:04 +000043 tensors[1] = CreateTensor(flatBufferBuilder,
44 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
45 tensorShape.size()),
46 tensorType,
Ryan OShea238ecd92023-03-07 11:44:23 +000047 2);
James Warda8578102020-11-13 18:05:04 +000048
49 const std::vector<int32_t> operatorInputs({0});
50 const std::vector<int32_t> operatorOutputs({1});
51
52 flatbuffers::Offset<Operator> softmaxOperator;
53 flatbuffers::Offset<flatbuffers::String> modelDescription;
54 flatbuffers::Offset<OperatorCode> operatorCode;
55
56 switch (softmaxOperatorCode)
57 {
58 case tflite::BuiltinOperator_SOFTMAX:
59 softmaxOperator =
60 CreateOperator(flatBufferBuilder,
61 0,
62 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
63 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
64 BuiltinOptions_SoftmaxOptions,
65 CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
66 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
67 operatorCode = CreateOperatorCode(flatBufferBuilder,
68 tflite::BuiltinOperator_SOFTMAX);
69 break;
70 case tflite::BuiltinOperator_LOG_SOFTMAX:
71 softmaxOperator =
72 CreateOperator(flatBufferBuilder,
73 0,
74 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
75 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
76 BuiltinOptions_LogSoftmaxOptions,
77 CreateLogSoftmaxOptions(flatBufferBuilder).Union());
78 flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
79 operatorCode = CreateOperatorCode(flatBufferBuilder,
80 tflite::BuiltinOperator_LOG_SOFTMAX);
81 break;
82 default:
83 break;
84 }
85 const std::vector<int32_t> subgraphInputs({0});
86 const std::vector<int32_t> subgraphOutputs({1});
87 flatbuffers::Offset<SubGraph> subgraph =
88 CreateSubGraph(flatBufferBuilder,
89 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
90 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
91 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
92 flatBufferBuilder.CreateVector(&softmaxOperator, 1));
93 flatbuffers::Offset<Model> flatbufferModel =
94 CreateModel(flatBufferBuilder,
95 TFLITE_SCHEMA_VERSION,
96 flatBufferBuilder.CreateVector(&operatorCode, 1),
97 flatBufferBuilder.CreateVector(&subgraph, 1),
98 modelDescription,
99 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100100 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
James Warda8578102020-11-13 18:05:04 +0000101 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
102 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
103}
104
105void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
106 tflite::TensorType tensorType,
107 std::vector<armnn::BackendId>& backends,
108 std::vector<int32_t>& shape,
109 std::vector<float>& inputValues,
110 std::vector<float>& expectedOutputValues,
111 float beta = 0)
112{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100113 using namespace delegateTestInterpreter;
James Warda8578102020-11-13 18:05:04 +0000114 std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
115 tensorType,
116 shape,
117 beta);
118
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100119 // Setup interpreter with just TFLite Runtime.
120 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
121 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
122 CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
123 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
124 std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
125 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
James Warda8578102020-11-13 18:05:04 +0000126
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100127 // Setup interpreter with Arm NN Delegate applied.
128 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
129 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
130 CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
131 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
132 std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
133 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
James Warda8578102020-11-13 18:05:04 +0000134
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100135 armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
136 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
James Warda8578102020-11-13 18:05:04 +0000137
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100138 tfLiteInterpreter.Cleanup();
139 armnnInterpreter.Cleanup();
James Warda8578102020-11-13 18:05:04 +0000140}
141
Keith Davis7c67fab2021-04-08 11:47:23 +0100142
143/// Convenience function to run softmax and log-softmax test cases
144/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
145/// \param backends armnn backends to target
146/// \param beta multiplicative parameter to the softmax function
147/// \param expectedOutput to be checked against transformed input
148void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
149 std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
150 std::vector<float> input = {
151 1.0, 2.5, 3.0, 4.5, 5.0,
152 -1.0, -2.5, -3.0, -4.5, -5.0};
153 std::vector<int32_t> shape = {2, 5};
154
155 SoftmaxTest(operatorCode,
156 tflite::TensorType_FLOAT32,
157 backends,
158 shape,
159 input,
160 expectedOutput,
161 beta);
162}
163
James Warda8578102020-11-13 18:05:04 +0000164} // anonymous namespace