blob: c23517e385d7a4b78ea4c3d36a09658ea3094793 [file] [log] [blame]
Matteo Martincigha8d572d2019-02-07 17:51:09 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/Tensor.hpp>
9#include <armnn/TypesUtils.hpp>
10#include <armnn/ILayerVisitor.hpp>
11
12#include <utility>
13#include <limits>
14
15#include <boost/assert.hpp>
16
17namespace armnn
18{
19
Ferran Balaguerb2845652019-02-27 09:42:06 +000020std::pair<float, int> ComputeQAsymmParams(int numBits, double min, double max);
Matteo Martincigha8d572d2019-02-07 17:51:09 +000021
22template<typename srcType>
23void Quantize(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
24{
25 BOOST_ASSERT(src);
26 BOOST_ASSERT(dst);
27
28 float min = std::numeric_limits<srcType>::max();
29 float max = std::numeric_limits<srcType>::lowest();
30 for (size_t i = 0; i < numElements; ++i)
31 {
32 min = std::min(min, src[i]);
33 max = std::max(max, src[i]);
34 }
35
36 auto qParams = ComputeQAsymmParams(8, min, max);
Ferran Balaguerb2845652019-02-27 09:42:06 +000037 scale = qParams.first;
38 offset = qParams.second;
Matteo Martincigha8d572d2019-02-07 17:51:09 +000039 for (size_t i = 0; i < numElements; ++i)
40 {
41 dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
42 }
43}
44
45ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing);
46
47template <typename LayerContainer>
48void VisitLayers(const LayerContainer& layerContainer, ILayerVisitor& visitor)
49{
50 for (auto layer : layerContainer)
51 {
52 layer->Accept(visitor);
53 }
54}
55
56} // namespace armnn