blob: dcd498cb639ddb9650075ba07fbd925ce463fc1e [file] [log] [blame]
Derek Lambertif30f7d32019-04-09 10:25:02 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "BaseIterator.hpp"
Matthew Jacksone69c3992019-09-09 14:31:21 +01009#include "FloatingPointConverter.hpp"
Keith Davis5236e1d2019-11-04 08:58:33 +000010#include "TensorUtils.hpp"
Derek Lambertif30f7d32019-04-09 10:25:02 +010011
Francis Murtagh43aec582019-05-27 12:14:10 +010012#include <boost/assert.hpp>
13
Derek Lambertif30f7d32019-04-09 10:25:02 +010014namespace armnn
15{
16
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +000017namespace
18{
19
20inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(const TensorInfo& info, const void* data)
21{
22 auto params = armnnUtils::GetPerAxisParams(info);
23 return std::make_unique<ScaledInt32PerAxisDecoder>(
24 static_cast<const int32_t*>(data),
25 params.second,
26 params.first);
27}
28
29inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(const TensorInfo& info, const void* data)
30{
31 if(info.HasMultipleQuantizationScales())
32 {
33 // NOTE: If we have multiple quantization scales, we create a ScaledInt32PerAxisDecoder.
34 // This will be used to decode per-axis quantized convolution biases.
35 return MakeSigned32PerAxisDecoder(info, data);
36 }
37 else
38 {
39 if (info.GetQuantizationDim().has_value())
40 {
41 // NOTE: Even though we only have a single quantization scale, if the quantization
42 // dimension is set, the tensor has per-axis quantization and we need to create a
43 // ScaledInt32PerAxisDecoder
44 return MakeSigned32PerAxisDecoder(info, data);
45 }
46
47 const float scale = info.GetQuantizationScale();
48 if (scale == 0.f)
49 {
50 // NOTE:: If no quantization scale is set, we create an Int32Decoder, which simply
51 // casts the int value to float. This will be used for any INT32 data other than
52 // convolution biases.
53 return std::make_unique<Int32Decoder>(static_cast<const int32_t*>(data));
54 }
55
56 // NOTE: If we only have a single (non-zero) quantization scale and no quantization
57 // dimension is specified, we need to create a ScaledInt32Decoder. This will be used
58 // to decode per-tensor quantized convolution biases.
59 return std::make_unique<ScaledInt32Decoder>(static_cast<const int32_t*>(data), scale);
60 }
61}
62
63} // anonymous namespace
64
Derek Lambertif30f7d32019-04-09 10:25:02 +010065template<typename T>
Matthew Benthamc394a6d2019-06-24 12:51:25 +010066inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);
Derek Lambertif30f7d32019-04-09 10:25:02 +010067
68template<>
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +010069inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
Derek Lambertif30f7d32019-04-09 10:25:02 +010070{
71 switch(info.GetDataType())
72 {
Keith Davis5236e1d2019-11-04 08:58:33 +000073 case armnn::DataType::QuantizedSymm8PerAxis:
74 {
75 std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
76 return std::make_unique<QSymm8PerAxisDecoder>(
77 static_cast<const int8_t*>(data),
78 params.second,
79 params.first);
80 }
Matthew Jacksone69c3992019-09-09 14:31:21 +010081 case DataType::QuantisedAsymm8:
Derek Lambertif30f7d32019-04-09 10:25:02 +010082 {
83 return std::make_unique<QASymm8Decoder>(
84 static_cast<const uint8_t*>(data),
85 info.GetQuantizationScale(),
86 info.GetQuantizationOffset());
87 }
Matthew Jacksone69c3992019-09-09 14:31:21 +010088 case DataType::QuantisedSymm16:
Derek Lambertif30f7d32019-04-09 10:25:02 +010089 {
90 return std::make_unique<QSymm16Decoder>(
91 static_cast<const int16_t*>(data),
92 info.GetQuantizationScale(),
93 info.GetQuantizationOffset());
94 }
Matthew Jacksone69c3992019-09-09 14:31:21 +010095 case DataType::Float16:
Derek Lambertif30f7d32019-04-09 10:25:02 +010096 {
Matthew Jacksone69c3992019-09-09 14:31:21 +010097 return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
Derek Lambertif30f7d32019-04-09 10:25:02 +010098 }
Matthew Jacksone69c3992019-09-09 14:31:21 +010099 case DataType::Float32:
100 {
101 return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
102 }
103 case DataType::Signed32:
Mike Kelly9b398322019-05-22 17:21:49 +0100104 {
Aron Virginas-Tarb67f9572019-11-04 15:00:19 +0000105 return MakeSigned32Decoder(info, data);
Mike Kelly9b398322019-05-22 17:21:49 +0100106 }
Derek Lambertif30f7d32019-04-09 10:25:02 +0100107 default:
108 {
Keith Davis5236e1d2019-11-04 08:58:33 +0000109 BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
Derek Lambertif30f7d32019-04-09 10:25:02 +0100110 break;
111 }
112 }
113 return nullptr;
114}
115
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100116} //namespace armnn