blob: a5e2faccc97aa105c6f934522a1036ed7f7dc939 [file] [log] [blame]
Narumol Prangnawarat8c7324d2019-05-31 16:42:11 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "CommonTestUtils.hpp"
9
10#include <armnn/INetwork.hpp>
11#include <ResolveType.hpp>
12
Sadik Armagan1625efc2021-06-10 18:24:34 +010013#include <doctest/doctest.h>
14
Narumol Prangnawarat8c7324d2019-05-31 16:42:11 +010015namespace
16{
17
18template<typename T>
19armnn::INetworkPtr CreateDequantizeNetwork(const armnn::TensorInfo& inputInfo,
20 const armnn::TensorInfo& outputInfo)
21{
22 armnn::INetworkPtr net(armnn::INetwork::Create());
23
24 armnn::IConnectableLayer* inputLayer = net->AddInputLayer(0);
25 armnn::IConnectableLayer* dequantizeLayer = net->AddDequantizeLayer("Dequantize");
26 armnn::IConnectableLayer* outputLayer = net->AddOutputLayer(0, "output");
27 Connect(inputLayer, dequantizeLayer, inputInfo, 0, 0);
28 Connect(dequantizeLayer, outputLayer, outputInfo, 0, 0);
29
30 return net;
31}
32
33template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
34void DequantizeEndToEndLayerTestImpl(const std::vector<BackendId>& backends,
35 const armnn::TensorShape& tensorShape,
36 const std::vector<T>& input,
37 const std::vector<float>& expectedOutput,
38 float scale,
39 int32_t offset)
40{
41 armnn::TensorInfo inputInfo(tensorShape, ArmnnType);
42 armnn::TensorInfo outputInfo(tensorShape, armnn::DataType::Float32);
43
44 inputInfo.SetQuantizationScale(scale);
45 inputInfo.SetQuantizationOffset(offset);
46
47 // Builds up the structure of the network
48 armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
49
Sadik Armagan1625efc2021-06-10 18:24:34 +010050 CHECK(net);
Narumol Prangnawarat8c7324d2019-05-31 16:42:11 +010051
52 std::map<int, std::vector<T>> inputTensorData = { { 0, input } };
53 std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
54
55 EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Float32>(
56 move(net), inputTensorData, expectedOutputData, backends);
57}
58
59template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
60void DequantizeEndToEndSimple(const std::vector<BackendId>& backends)
61{
62 const armnn::TensorShape tensorShape({ 1, 2, 2, 4 });
63 std::vector<T> inputData = std::vector<T>(
64 {
65 2, 4, 6, 8,
66 10, 12, 14, 16,
67 18, 20, 22, 24,
68 26, 28, 30, 32
69 });
70
71 std::vector<float> expectedOutputData = std::vector<float>(
72 {
73 1.0f, 2.0f, 3.0f, 4.0f,
74 5.0f, 6.0f, 7.0f, 8.0f,
75 9.0f, 10.0f, 11.0f, 12.0f,
76 13.0f, 14.0f, 15.0f, 16.0f
77 });
78 DequantizeEndToEndLayerTestImpl<ArmnnType>(backends, tensorShape, inputData, expectedOutputData, 0.5f, 0);
79};
80
81template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
82void DequantizeEndToEndOffset(const std::vector<BackendId>& backends)
83{
84 const armnn::TensorShape tensorShape({ 1, 2, 2, 4 });
85 std::vector<T> inputData = std::vector<T>(
86 {
87 3, 5, 7, 9,
88 11, 13, 15, 17,
89 19, 21, 23, 25,
90 27, 29, 31, 33
91 });
92
93 std::vector<float> expectedOutputData = std::vector<float>(
94 {
95 1.0f, 2.0f, 3.0f, 4.0f,
96 5.0f, 6.0f, 7.0f, 8.0f,
97 9.0f, 10.0f, 11.0f, 12.0f,
98 13.0f, 14.0f, 15.0f, 16.0f
99 });
100 DequantizeEndToEndLayerTestImpl<ArmnnType>(backends, tensorShape, inputData, expectedOutputData, 0.5f, 1);
101};
102
103} // anonymous namespace