blob: 6d2e549360f38f5c8e1ce814c5f0b7c9b16e6254 [file] [log] [blame]
SiCong Li39f46392019-06-21 12:00:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../InferenceTestImage.hpp"
Matteo Martincighe011d202019-11-28 11:35:47 +00007
SiCong Li39f46392019-06-21 12:00:04 +01008#include <armnn/TypesUtils.hpp>
9
Matteo Martincighe011d202019-11-28 11:35:47 +000010#include <armnnUtils/Permute.hpp>
11
SiCong Li39f46392019-06-21 12:00:04 +010012#include <algorithm>
13#include <fstream>
14#include <iterator>
15#include <string>
16
SiCong Li588973f2019-07-18 16:33:42 +010017// Parameters used in normalizing images
SiCong Li39f46392019-06-21 12:00:04 +010018struct NormalizationParameters
19{
20 float scale{ 1.0 };
Kevin May5f9f2e32019-07-11 09:50:15 +010021 std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
22 std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
SiCong Li39f46392019-06-21 12:00:04 +010023};
24
25enum class SupportedFrontend
26{
Nikhil Raj5d955cf2021-04-19 16:59:48 +010027 TFLite = 0,
SiCong Li39f46392019-06-21 12:00:04 +010028};
29
SiCong Li588973f2019-07-18 16:33:42 +010030/** Get normalization parameters.
31 * Note that different flavours of models and different model data types have different normalization methods.
Nikhil Raj6dd178f2021-04-02 22:04:39 +010032 * This tool currently only supports TF and TFLite models
SiCong Li588973f2019-07-18 16:33:42 +010033 *
34 * @param[in] modelFormat One of the supported frontends
35 * @param[in] outputType Output type of the image tensor, also the type of the intended model
36 */
SiCong Li39f46392019-06-21 12:00:04 +010037NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
38 const armnn::DataType& outputType)
39{
40 NormalizationParameters normParams;
41 // Explicitly set default parameters
42 normParams.scale = 1.0;
43 normParams.mean = { 0.0, 0.0, 0.0 };
44 normParams.stddev = { 1.0, 1.0, 1.0 };
45 switch (modelFormat)
46 {
SiCong Li39f46392019-06-21 12:00:04 +010047 case SupportedFrontend::TFLite:
48 default:
49 switch (outputType)
50 {
51 case armnn::DataType::Float32:
52 normParams.scale = 127.5;
53 normParams.mean = { 1.0, 1.0, 1.0 };
54 break;
55 case armnn::DataType::Signed32:
56 normParams.mean = { 128.0, 128.0, 128.0 };
57 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +000058 case armnn::DataType::QAsymmU8:
Mike Kellyd7ed6d42021-07-21 09:42:43 +010059 break;
60 case armnn::DataType::QAsymmS8:
61 normParams.mean = { 128.0, 128.0, 128.0 };
62 break;
SiCong Li39f46392019-06-21 12:00:04 +010063 default:
64 break;
65 }
66 break;
67 }
68 return normParams;
69}
70
SiCong Li588973f2019-07-18 16:33:42 +010071/** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
72 *
73 * @param[in] imagePath Path to the image file
74 * @param[in] newWidth The new width of the output image tensor
75 * @param[in] newHeight The new height of the output image tensor
76 * @param[in] normParams Normalization parameters for the normalization of the image
77 * @param[in] batchSize Batch size
78 * @param[in] outputLayout Data layout of the output image tensor
79 */
SiCong Li39f46392019-06-21 12:00:04 +010080template <typename ElemType>
81std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
82 unsigned int newWidth,
83 unsigned int newHeight,
84 const NormalizationParameters& normParams,
85 unsigned int batchSize = 1,
86 const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
87
88// Prepare float32 image tensor
89template <>
90std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
91 unsigned int newWidth,
92 unsigned int newHeight,
93 const NormalizationParameters& normParams,
94 unsigned int batchSize,
95 const armnn::DataLayout& outputLayout)
96{
97 // Generate image tensor
98 std::vector<float> imageData;
99 InferenceTestImage testImage(imagePath.c_str());
100 if (newWidth == 0)
101 {
102 newWidth = testImage.GetWidth();
103 }
104 if (newHeight == 0)
105 {
106 newHeight = testImage.GetHeight();
107 }
108 // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
109 // as 0 Centre/Normalise the image.
110 imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
111 InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
112 normParams.stddev, normParams.scale);
113 if (outputLayout == armnn::DataLayout::NCHW)
114 {
115 // Convert to NCHW format
116 const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
117 armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
118 std::vector<float> tempImage(imageData.size());
119 armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
120 imageData.swap(tempImage);
121 }
122 return imageData;
123}
124
125// Prepare int32 image tensor
126template <>
127std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
128 unsigned int newWidth,
129 unsigned int newHeight,
130 const NormalizationParameters& normParams,
131 unsigned int batchSize,
132 const armnn::DataLayout& outputLayout)
133{
134 // Get float32 image tensor
135 std::vector<float> imageDataFloat =
136 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
137 // Convert to int32 image tensor with static cast
138 std::vector<int> imageDataInt;
139 imageDataInt.reserve(imageDataFloat.size());
140 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
141 [](float val) { return static_cast<int>(val); });
142 return imageDataInt;
143}
144
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100145// Prepare qasymmu8 image tensor
SiCong Li39f46392019-06-21 12:00:04 +0100146template <>
147std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
148 unsigned int newWidth,
149 unsigned int newHeight,
150 const NormalizationParameters& normParams,
151 unsigned int batchSize,
152 const armnn::DataLayout& outputLayout)
153{
154 // Get float32 image tensor
155 std::vector<float> imageDataFloat =
156 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
157 std::vector<uint8_t> imageDataQasymm8;
158 imageDataQasymm8.reserve(imageDataFloat.size());
159 // Convert to uint8 image tensor with static cast
160 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
161 [](float val) { return static_cast<uint8_t>(val); });
162 return imageDataQasymm8;
163}
164
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100165// Prepare qasymms8 image tensor
166template <>
167std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
168 unsigned int newWidth,
169 unsigned int newHeight,
170 const NormalizationParameters& normParams,
171 unsigned int batchSize,
172 const armnn::DataLayout& outputLayout)
173{
174 // Get float32 image tensor
175 std::vector<float> imageDataFloat =
176 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
177 std::vector<int8_t> imageDataQasymms8;
178 imageDataQasymms8.reserve(imageDataFloat.size());
179 // Convert to uint8 image tensor with static cast
180 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
181 [](float val) { return static_cast<uint8_t>(val); });
182 return imageDataQasymms8;
183}
184
SiCong Li588973f2019-07-18 16:33:42 +0100185/** Write image tensor to ofstream
186 *
187 * @param[in] imageData Image tensor data
188 * @param[in] imageTensorFile Output filestream (ofstream) to which the image tensor data is written
189 */
SiCong Li39f46392019-06-21 12:00:04 +0100190template <typename ElemType>
191void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
192{
193 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
SiCong Li588973f2019-07-18 16:33:42 +0100194}
195
196// For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
197// numerical values
198template <>
199void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
200{
201 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
Matteo Martincighe011d202019-11-28 11:35:47 +0000202}
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100203
204// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
205// numerical values
206template <>
207void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
208{
209 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
210}