blob: f2ee470a7a088bfcab15d66126ecbf3bd5fb7450 [file] [log] [blame]
SiCong Li39f46392019-06-21 12:00:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../InferenceTestImage.hpp"
Matteo Martincighe011d202019-11-28 11:35:47 +00007
SiCong Li39f46392019-06-21 12:00:04 +01008#include <armnn/TypesUtils.hpp>
9
Matteo Martincighe011d202019-11-28 11:35:47 +000010#include <armnnUtils/Permute.hpp>
11
SiCong Li39f46392019-06-21 12:00:04 +010012#include <algorithm>
13#include <fstream>
14#include <iterator>
15#include <string>
16
SiCong Li588973f2019-07-18 16:33:42 +010017// Parameters used in normalizing images
SiCong Li39f46392019-06-21 12:00:04 +010018struct NormalizationParameters
19{
20 float scale{ 1.0 };
Kevin May5f9f2e32019-07-11 09:50:15 +010021 std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
22 std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
SiCong Li39f46392019-06-21 12:00:04 +010023};
24
25enum class SupportedFrontend
26{
Nikhil Raj6dd178f2021-04-02 22:04:39 +010027 TensorFlow = 0,
28 TFLite = 1,
SiCong Li39f46392019-06-21 12:00:04 +010029};
30
SiCong Li588973f2019-07-18 16:33:42 +010031/** Get normalization parameters.
32 * Note that different flavours of models and different model data types have different normalization methods.
Nikhil Raj6dd178f2021-04-02 22:04:39 +010033 * This tool currently only supports TF and TFLite models
SiCong Li588973f2019-07-18 16:33:42 +010034 *
35 * @param[in] modelFormat One of the supported frontends
36 * @param[in] outputType Output type of the image tensor, also the type of the intended model
37 */
SiCong Li39f46392019-06-21 12:00:04 +010038NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
39 const armnn::DataType& outputType)
40{
41 NormalizationParameters normParams;
42 // Explicitly set default parameters
43 normParams.scale = 1.0;
44 normParams.mean = { 0.0, 0.0, 0.0 };
45 normParams.stddev = { 1.0, 1.0, 1.0 };
46 switch (modelFormat)
47 {
SiCong Li39f46392019-06-21 12:00:04 +010048 case SupportedFrontend::TensorFlow:
49 case SupportedFrontend::TFLite:
50 default:
51 switch (outputType)
52 {
53 case armnn::DataType::Float32:
54 normParams.scale = 127.5;
55 normParams.mean = { 1.0, 1.0, 1.0 };
56 break;
57 case armnn::DataType::Signed32:
58 normParams.mean = { 128.0, 128.0, 128.0 };
59 break;
Derek Lambertif90c56d2020-01-10 17:14:08 +000060 case armnn::DataType::QAsymmU8:
SiCong Li39f46392019-06-21 12:00:04 +010061 default:
62 break;
63 }
64 break;
65 }
66 return normParams;
67}
68
SiCong Li588973f2019-07-18 16:33:42 +010069/** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
70 *
71 * @param[in] imagePath Path to the image file
72 * @param[in] newWidth The new width of the output image tensor
73 * @param[in] newHeight The new height of the output image tensor
74 * @param[in] normParams Normalization parameters for the normalization of the image
75 * @param[in] batchSize Batch size
76 * @param[in] outputLayout Data layout of the output image tensor
77 */
SiCong Li39f46392019-06-21 12:00:04 +010078template <typename ElemType>
79std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
80 unsigned int newWidth,
81 unsigned int newHeight,
82 const NormalizationParameters& normParams,
83 unsigned int batchSize = 1,
84 const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
85
86// Prepare float32 image tensor
87template <>
88std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
89 unsigned int newWidth,
90 unsigned int newHeight,
91 const NormalizationParameters& normParams,
92 unsigned int batchSize,
93 const armnn::DataLayout& outputLayout)
94{
95 // Generate image tensor
96 std::vector<float> imageData;
97 InferenceTestImage testImage(imagePath.c_str());
98 if (newWidth == 0)
99 {
100 newWidth = testImage.GetWidth();
101 }
102 if (newHeight == 0)
103 {
104 newHeight = testImage.GetHeight();
105 }
106 // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
107 // as 0 Centre/Normalise the image.
108 imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
109 InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
110 normParams.stddev, normParams.scale);
111 if (outputLayout == armnn::DataLayout::NCHW)
112 {
113 // Convert to NCHW format
114 const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
115 armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
116 std::vector<float> tempImage(imageData.size());
117 armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
118 imageData.swap(tempImage);
119 }
120 return imageData;
121}
122
123// Prepare int32 image tensor
124template <>
125std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
126 unsigned int newWidth,
127 unsigned int newHeight,
128 const NormalizationParameters& normParams,
129 unsigned int batchSize,
130 const armnn::DataLayout& outputLayout)
131{
132 // Get float32 image tensor
133 std::vector<float> imageDataFloat =
134 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
135 // Convert to int32 image tensor with static cast
136 std::vector<int> imageDataInt;
137 imageDataInt.reserve(imageDataFloat.size());
138 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
139 [](float val) { return static_cast<int>(val); });
140 return imageDataInt;
141}
142
143// Prepare qasymm8 image tensor
144template <>
145std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
146 unsigned int newWidth,
147 unsigned int newHeight,
148 const NormalizationParameters& normParams,
149 unsigned int batchSize,
150 const armnn::DataLayout& outputLayout)
151{
152 // Get float32 image tensor
153 std::vector<float> imageDataFloat =
154 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
155 std::vector<uint8_t> imageDataQasymm8;
156 imageDataQasymm8.reserve(imageDataFloat.size());
157 // Convert to uint8 image tensor with static cast
158 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
159 [](float val) { return static_cast<uint8_t>(val); });
160 return imageDataQasymm8;
161}
162
SiCong Li588973f2019-07-18 16:33:42 +0100163/** Write image tensor to ofstream
164 *
165 * @param[in] imageData Image tensor data
166 * @param[in] imageTensorFile Output filestream (ofstream) to which the image tensor data is written
167 */
SiCong Li39f46392019-06-21 12:00:04 +0100168template <typename ElemType>
169void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
170{
171 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
SiCong Li588973f2019-07-18 16:33:42 +0100172}
173
174// For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
175// numerical values
176template <>
177void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
178{
179 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
Matteo Martincighe011d202019-11-28 11:35:47 +0000180}