blob: 26f67f95b259de61e916c93b7bb86e30e2028221 [file] [log] [blame]
Matteo Martincigha8d572d2019-02-07 17:51:09 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Nattapat Chaimanowong7ac07f32019-03-20 11:51:14 +00008#include "NetworkQuantizationScheme.hpp"
9
Matteo Martincigha8d572d2019-02-07 17:51:09 +000010#include <armnn/Tensor.hpp>
11#include <armnn/TypesUtils.hpp>
12#include <armnn/ILayerVisitor.hpp>
13
14#include <utility>
15#include <limits>
16
17#include <boost/assert.hpp>
18
19namespace armnn
20{
21
Matteo Martincigha8d572d2019-02-07 17:51:09 +000022template<typename srcType>
Nattapat Chaimanowong7ac07f32019-03-20 11:51:14 +000023void QuantizeConstant(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
Matteo Martincigha8d572d2019-02-07 17:51:09 +000024{
25 BOOST_ASSERT(src);
26 BOOST_ASSERT(dst);
27
28 float min = std::numeric_limits<srcType>::max();
29 float max = std::numeric_limits<srcType>::lowest();
30 for (size_t i = 0; i < numElements; ++i)
31 {
32 min = std::min(min, src[i]);
33 max = std::max(max, src[i]);
34 }
35
Nattapat Chaimanowong7ac07f32019-03-20 11:51:14 +000036 QAsymm8QuantizationScheme quantizationScheme;
37 OffsetScalePair qParams = quantizationScheme.ComputeScheme(min, max);
Ferran Balaguerb2845652019-02-27 09:42:06 +000038 scale = qParams.first;
39 offset = qParams.second;
Nattapat Chaimanowong7ac07f32019-03-20 11:51:14 +000040
Matteo Martincigha8d572d2019-02-07 17:51:09 +000041 for (size_t i = 0; i < numElements; ++i)
42 {
43 dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
44 }
45}
46
47ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing);
48
49template <typename LayerContainer>
50void VisitLayers(const LayerContainer& layerContainer, ILayerVisitor& visitor)
51{
52 for (auto layer : layerContainer)
53 {
54 layer->Accept(visitor);
55 }
56}
57
58} // namespace armnn