blob: b9579e76369713d18f8fe10f10f30ff9c2b53d7a [file] [log] [blame]
SiCong Li39f46392019-06-21 12:00:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../InferenceTestImage.hpp"
Matteo Martincighe011d202019-11-28 11:35:47 +00007
SiCong Li39f46392019-06-21 12:00:04 +01008#include <armnn/TypesUtils.hpp>
9
Francis Murtagh40d27412021-10-28 11:11:35 +010010#include <armnnUtils/TContainer.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <armnnUtils/Permute.hpp>
12
SiCong Li39f46392019-06-21 12:00:04 +010013#include <algorithm>
14#include <fstream>
15#include <iterator>
16#include <string>
17
SiCong Li588973f2019-07-18 16:33:42 +010018// Parameters used in normalizing images
SiCong Li39f46392019-06-21 12:00:04 +010019struct NormalizationParameters
20{
21 float scale{ 1.0 };
Kevin May5f9f2e32019-07-11 09:50:15 +010022 std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
23 std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
SiCong Li39f46392019-06-21 12:00:04 +010024};
25
26enum class SupportedFrontend
27{
Nikhil Raj5d955cf2021-04-19 16:59:48 +010028 TFLite = 0,
SiCong Li39f46392019-06-21 12:00:04 +010029};
30
SiCong Li588973f2019-07-18 16:33:42 +010031/** Get normalization parameters.
32 * Note that different flavours of models and different model data types have different normalization methods.
Nikhil Raj6dd178f2021-04-02 22:04:39 +010033 * This tool currently only supports TF and TFLite models
SiCong Li588973f2019-07-18 16:33:42 +010034 *
35 * @param[in] modelFormat One of the supported frontends
36 * @param[in] outputType Output type of the image tensor, also the type of the intended model
37 */
SiCong Li39f46392019-06-21 12:00:04 +010038NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
39 const armnn::DataType& outputType)
40{
41 NormalizationParameters normParams;
42 // Explicitly set default parameters
43 normParams.scale = 1.0;
44 normParams.mean = { 0.0, 0.0, 0.0 };
45 normParams.stddev = { 1.0, 1.0, 1.0 };
46 switch (modelFormat)
47 {
SiCong Li39f46392019-06-21 12:00:04 +010048 case SupportedFrontend::TFLite:
49 default:
50 switch (outputType)
51 {
52 case armnn::DataType::Float32:
53 normParams.scale = 127.5;
54 normParams.mean = { 1.0, 1.0, 1.0 };
55 break;
56 case armnn::DataType::Signed32:
57 normParams.mean = { 128.0, 128.0, 128.0 };
58 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +000059 case armnn::DataType::QAsymmU8:
Mike Kellyd7ed6d42021-07-21 09:42:43 +010060 break;
61 case armnn::DataType::QAsymmS8:
62 normParams.mean = { 128.0, 128.0, 128.0 };
63 break;
SiCong Li39f46392019-06-21 12:00:04 +010064 default:
65 break;
66 }
67 break;
68 }
69 return normParams;
70}
71
SiCong Li588973f2019-07-18 16:33:42 +010072/** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
73 *
74 * @param[in] imagePath Path to the image file
75 * @param[in] newWidth The new width of the output image tensor
76 * @param[in] newHeight The new height of the output image tensor
77 * @param[in] normParams Normalization parameters for the normalization of the image
78 * @param[in] batchSize Batch size
79 * @param[in] outputLayout Data layout of the output image tensor
80 */
SiCong Li39f46392019-06-21 12:00:04 +010081template <typename ElemType>
82std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
83 unsigned int newWidth,
84 unsigned int newHeight,
85 const NormalizationParameters& normParams,
86 unsigned int batchSize = 1,
87 const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
88
89// Prepare float32 image tensor
90template <>
91std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
92 unsigned int newWidth,
93 unsigned int newHeight,
94 const NormalizationParameters& normParams,
95 unsigned int batchSize,
96 const armnn::DataLayout& outputLayout)
97{
98 // Generate image tensor
99 std::vector<float> imageData;
100 InferenceTestImage testImage(imagePath.c_str());
101 if (newWidth == 0)
102 {
103 newWidth = testImage.GetWidth();
104 }
105 if (newHeight == 0)
106 {
107 newHeight = testImage.GetHeight();
108 }
109 // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
110 // as 0 Centre/Normalise the image.
111 imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
112 InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
113 normParams.stddev, normParams.scale);
114 if (outputLayout == armnn::DataLayout::NCHW)
115 {
116 // Convert to NCHW format
117 const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
118 armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
119 std::vector<float> tempImage(imageData.size());
120 armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
121 imageData.swap(tempImage);
122 }
123 return imageData;
124}
125
126// Prepare int32 image tensor
127template <>
128std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
129 unsigned int newWidth,
130 unsigned int newHeight,
131 const NormalizationParameters& normParams,
132 unsigned int batchSize,
133 const armnn::DataLayout& outputLayout)
134{
135 // Get float32 image tensor
136 std::vector<float> imageDataFloat =
137 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
138 // Convert to int32 image tensor with static cast
139 std::vector<int> imageDataInt;
140 imageDataInt.reserve(imageDataFloat.size());
141 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
142 [](float val) { return static_cast<int>(val); });
143 return imageDataInt;
144}
145
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100146// Prepare qasymmu8 image tensor
SiCong Li39f46392019-06-21 12:00:04 +0100147template <>
148std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
149 unsigned int newWidth,
150 unsigned int newHeight,
151 const NormalizationParameters& normParams,
152 unsigned int batchSize,
153 const armnn::DataLayout& outputLayout)
154{
155 // Get float32 image tensor
156 std::vector<float> imageDataFloat =
157 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
158 std::vector<uint8_t> imageDataQasymm8;
159 imageDataQasymm8.reserve(imageDataFloat.size());
160 // Convert to uint8 image tensor with static cast
161 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
162 [](float val) { return static_cast<uint8_t>(val); });
163 return imageDataQasymm8;
164}
165
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100166// Prepare qasymms8 image tensor
167template <>
168std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath,
169 unsigned int newWidth,
170 unsigned int newHeight,
171 const NormalizationParameters& normParams,
172 unsigned int batchSize,
173 const armnn::DataLayout& outputLayout)
174{
175 // Get float32 image tensor
176 std::vector<float> imageDataFloat =
177 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
178 std::vector<int8_t> imageDataQasymms8;
179 imageDataQasymms8.reserve(imageDataFloat.size());
180 // Convert to uint8 image tensor with static cast
181 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8),
182 [](float val) { return static_cast<uint8_t>(val); });
183 return imageDataQasymms8;
184}
185
SiCong Li588973f2019-07-18 16:33:42 +0100186/** Write image tensor to ofstream
187 *
188 * @param[in] imageData Image tensor data
189 * @param[in] imageTensorFile Output filestream (ofstream) to which the image tensor data is written
190 */
SiCong Li39f46392019-06-21 12:00:04 +0100191template <typename ElemType>
192void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
193{
194 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
SiCong Li588973f2019-07-18 16:33:42 +0100195}
196
197// For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
198// numerical values
199template <>
200void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
201{
202 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
Matteo Martincighe011d202019-11-28 11:35:47 +0000203}
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100204
205// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
206// numerical values
207template <>
208void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile)
209{
210 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
211}