blob: 3e164bc9e05a3e0290a82c892de29cfc2f4ffaa9 [file] [log] [blame]
SiCong Li39f46392019-06-21 12:00:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../InferenceTestImage.hpp"
7#include "Permute.hpp"
8#include <armnn/TypesUtils.hpp>
9
10#include <algorithm>
11#include <fstream>
12#include <iterator>
13#include <string>
14
SiCong Li588973f2019-07-18 16:33:42 +010015// Parameters used in normalizing images
SiCong Li39f46392019-06-21 12:00:04 +010016struct NormalizationParameters
17{
18 float scale{ 1.0 };
Kevin May5f9f2e32019-07-11 09:50:15 +010019 std::array<float, 3> mean{ { 0.0, 0.0, 0.0 } };
20 std::array<float, 3> stddev{ { 1.0, 1.0, 1.0 } };
SiCong Li39f46392019-06-21 12:00:04 +010021};
22
23enum class SupportedFrontend
24{
25 Caffe = 0,
26 TensorFlow = 1,
27 TFLite = 2,
28};
29
SiCong Li588973f2019-07-18 16:33:42 +010030/** Get normalization parameters.
31 * Note that different flavours of models and different model data types have different normalization methods.
32 * This tool currently only supports Caffe, TF and TFLite models
33 *
34 * @param[in] modelFormat One of the supported frontends
35 * @param[in] outputType Output type of the image tensor, also the type of the intended model
36 */
SiCong Li39f46392019-06-21 12:00:04 +010037NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
38 const armnn::DataType& outputType)
39{
40 NormalizationParameters normParams;
41 // Explicitly set default parameters
42 normParams.scale = 1.0;
43 normParams.mean = { 0.0, 0.0, 0.0 };
44 normParams.stddev = { 1.0, 1.0, 1.0 };
45 switch (modelFormat)
46 {
47 case SupportedFrontend::Caffe:
48 break;
49 case SupportedFrontend::TensorFlow:
50 case SupportedFrontend::TFLite:
51 default:
52 switch (outputType)
53 {
54 case armnn::DataType::Float32:
55 normParams.scale = 127.5;
56 normParams.mean = { 1.0, 1.0, 1.0 };
57 break;
58 case armnn::DataType::Signed32:
59 normParams.mean = { 128.0, 128.0, 128.0 };
60 break;
61 case armnn::DataType::QuantisedAsymm8:
62 default:
63 break;
64 }
65 break;
66 }
67 return normParams;
68}
69
SiCong Li588973f2019-07-18 16:33:42 +010070/** Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
71 *
72 * @param[in] imagePath Path to the image file
73 * @param[in] newWidth The new width of the output image tensor
74 * @param[in] newHeight The new height of the output image tensor
75 * @param[in] normParams Normalization parameters for the normalization of the image
76 * @param[in] batchSize Batch size
77 * @param[in] outputLayout Data layout of the output image tensor
78 */
SiCong Li39f46392019-06-21 12:00:04 +010079template <typename ElemType>
80std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
81 unsigned int newWidth,
82 unsigned int newHeight,
83 const NormalizationParameters& normParams,
84 unsigned int batchSize = 1,
85 const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
86
87// Prepare float32 image tensor
88template <>
89std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
90 unsigned int newWidth,
91 unsigned int newHeight,
92 const NormalizationParameters& normParams,
93 unsigned int batchSize,
94 const armnn::DataLayout& outputLayout)
95{
96 // Generate image tensor
97 std::vector<float> imageData;
98 InferenceTestImage testImage(imagePath.c_str());
99 if (newWidth == 0)
100 {
101 newWidth = testImage.GetWidth();
102 }
103 if (newHeight == 0)
104 {
105 newHeight = testImage.GetHeight();
106 }
107 // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
108 // as 0 Centre/Normalise the image.
109 imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
110 InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
111 normParams.stddev, normParams.scale);
112 if (outputLayout == armnn::DataLayout::NCHW)
113 {
114 // Convert to NCHW format
115 const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
116 armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
117 std::vector<float> tempImage(imageData.size());
118 armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
119 imageData.swap(tempImage);
120 }
121 return imageData;
122}
123
124// Prepare int32 image tensor
125template <>
126std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
127 unsigned int newWidth,
128 unsigned int newHeight,
129 const NormalizationParameters& normParams,
130 unsigned int batchSize,
131 const armnn::DataLayout& outputLayout)
132{
133 // Get float32 image tensor
134 std::vector<float> imageDataFloat =
135 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
136 // Convert to int32 image tensor with static cast
137 std::vector<int> imageDataInt;
138 imageDataInt.reserve(imageDataFloat.size());
139 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
140 [](float val) { return static_cast<int>(val); });
141 return imageDataInt;
142}
143
144// Prepare qasymm8 image tensor
145template <>
146std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
147 unsigned int newWidth,
148 unsigned int newHeight,
149 const NormalizationParameters& normParams,
150 unsigned int batchSize,
151 const armnn::DataLayout& outputLayout)
152{
153 // Get float32 image tensor
154 std::vector<float> imageDataFloat =
155 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
156 std::vector<uint8_t> imageDataQasymm8;
157 imageDataQasymm8.reserve(imageDataFloat.size());
158 // Convert to uint8 image tensor with static cast
159 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
160 [](float val) { return static_cast<uint8_t>(val); });
161 return imageDataQasymm8;
162}
163
SiCong Li588973f2019-07-18 16:33:42 +0100164/** Write image tensor to ofstream
165 *
166 * @param[in] imageData Image tensor data
167 * @param[in] imageTensorFile Output filestream (ofstream) to which the image tensor data is written
168 */
SiCong Li39f46392019-06-21 12:00:04 +0100169template <typename ElemType>
170void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
171{
172 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
SiCong Li588973f2019-07-18 16:33:42 +0100173}
174
175// For uint8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of
176// numerical values
177template <>
178void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::ofstream& imageTensorFile)
179{
180 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " "));
SiCong Li39f46392019-06-21 12:00:04 +0100181}