blob: ff03127d93369f2ab12a6750bd65b2ab954d42f6 [file] [log] [blame]
//
// Copyright © 2020, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
#include <DelegateTestInterpreter.hpp>
#include <tensorflow/lite/version.h>
namespace
{
std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
tflite::TensorType tensorType,
const std::vector <int32_t>& tensorShape,
float alpha = 0)
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
buffers[0] = CreateBuffer(flatBufferBuilder);
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
tensorType);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
tensorType);
// create operator
const std::vector<int> operatorInputs{0};
const std::vector<int> operatorOutputs{1};
// builtin options
tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
flatbuffers::Offset<void> operatorBuiltinOption = 0;
if (activationOperatorCode == tflite::BuiltinOperator_LEAKY_RELU)
{
operatorBuiltinOptionsType = tflite::BuiltinOptions_LeakyReluOptions;
operatorBuiltinOption = CreateLeakyReluOptions(flatBufferBuilder, alpha).Union();
}
flatbuffers::Offset <Operator> unaryOperator =
CreateOperator(flatBufferBuilder,
0,
flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
operatorBuiltinOptionsType,
operatorBuiltinOption);
const std::vector<int> subgraphInputs{0};
const std::vector<int> subgraphOutputs{1};
flatbuffers::Offset <SubGraph> subgraph =
CreateSubGraph(flatBufferBuilder,
flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
flatBufferBuilder.CreateVector(&unaryOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model");
flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
activationOperatorCode,
0,
1,
activationOperatorCode);
flatbuffers::Offset <Model> flatbufferModel =
CreateModel(flatBufferBuilder,
TFLITE_SCHEMA_VERSION,
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
std::vector<float>& inputValues,
std::vector<float>& expectedOutputValues,
float alpha = 0,
const std::vector<armnn::BackendId>& backends = {})
{
using namespace delegateTestInterpreter;
std::vector<int32_t> inputShape { { 4, 1, 4} };
std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
::tflite::TensorType_FLOAT32,
inputShape,
alpha);
// Setup interpreter with just TFLite Runtime.
auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
// Setup interpreter with Arm NN Delegate applied.
auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends));
CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
tfLiteInterpreter.Cleanup();
armnnInterpreter.Cleanup();
}
} // anonymous namespace