blob: 987487d7bbb140242cbfb07ca4235eaabc7e9802 [file] [log] [blame]
SiCong Li39f46392019-06-21 12:00:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../InferenceTestImage.hpp"
7#include "Permute.hpp"
8#include <armnn/TypesUtils.hpp>
9
10#include <algorithm>
11#include <fstream>
12#include <iterator>
13#include <string>
14
15struct NormalizationParameters
16{
17 float scale{ 1.0 };
18 std::array<float, 3> mean{ 0.0, 0.0, 0.0 };
19 std::array<float, 3> stddev{ 1.0, 1.0, 1.0 };
20};
21
22enum class SupportedFrontend
23{
24 Caffe = 0,
25 TensorFlow = 1,
26 TFLite = 2,
27};
28
29// Get normalization parameters.
30// Note that different flavours of models have different normalization methods.
31// This tool currently only supports Caffe, TF and TFLite models
32NormalizationParameters GetNormalizationParameters(const SupportedFrontend& modelFormat,
33 const armnn::DataType& outputType)
34{
35 NormalizationParameters normParams;
36 // Explicitly set default parameters
37 normParams.scale = 1.0;
38 normParams.mean = { 0.0, 0.0, 0.0 };
39 normParams.stddev = { 1.0, 1.0, 1.0 };
40 switch (modelFormat)
41 {
42 case SupportedFrontend::Caffe:
43 break;
44 case SupportedFrontend::TensorFlow:
45 case SupportedFrontend::TFLite:
46 default:
47 switch (outputType)
48 {
49 case armnn::DataType::Float32:
50 normParams.scale = 127.5;
51 normParams.mean = { 1.0, 1.0, 1.0 };
52 break;
53 case armnn::DataType::Signed32:
54 normParams.mean = { 128.0, 128.0, 128.0 };
55 break;
56 case armnn::DataType::QuantisedAsymm8:
57 default:
58 break;
59 }
60 break;
61 }
62 return normParams;
63}
64
65// Prepare raw image tensor data by loading the image from imagePath and preprocessing it.
66template <typename ElemType>
67std::vector<ElemType> PrepareImageTensor(const std::string& imagePath,
68 unsigned int newWidth,
69 unsigned int newHeight,
70 const NormalizationParameters& normParams,
71 unsigned int batchSize = 1,
72 const armnn::DataLayout& outputLayout = armnn::DataLayout::NHWC);
73
74// Prepare float32 image tensor
75template <>
76std::vector<float> PrepareImageTensor<float>(const std::string& imagePath,
77 unsigned int newWidth,
78 unsigned int newHeight,
79 const NormalizationParameters& normParams,
80 unsigned int batchSize,
81 const armnn::DataLayout& outputLayout)
82{
83 // Generate image tensor
84 std::vector<float> imageData;
85 InferenceTestImage testImage(imagePath.c_str());
86 if (newWidth == 0)
87 {
88 newWidth = testImage.GetWidth();
89 }
90 if (newHeight == 0)
91 {
92 newHeight = testImage.GetHeight();
93 }
94 // Resize the image to new width and height or keep at original dimensions if the new width and height are specified
95 // as 0 Centre/Normalise the image.
96 imageData = testImage.Resize(newWidth, newHeight, CHECK_LOCATION(),
97 InferenceTestImage::ResizingMethods::BilinearAndNormalized, normParams.mean,
98 normParams.stddev, normParams.scale);
99 if (outputLayout == armnn::DataLayout::NCHW)
100 {
101 // Convert to NCHW format
102 const armnn::PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
103 armnn::TensorShape dstShape({ batchSize, 3, newHeight, newWidth });
104 std::vector<float> tempImage(imageData.size());
105 armnnUtils::Permute(dstShape, NHWCToArmNN, imageData.data(), tempImage.data(), sizeof(float));
106 imageData.swap(tempImage);
107 }
108 return imageData;
109}
110
111// Prepare int32 image tensor
112template <>
113std::vector<int> PrepareImageTensor<int>(const std::string& imagePath,
114 unsigned int newWidth,
115 unsigned int newHeight,
116 const NormalizationParameters& normParams,
117 unsigned int batchSize,
118 const armnn::DataLayout& outputLayout)
119{
120 // Get float32 image tensor
121 std::vector<float> imageDataFloat =
122 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
123 // Convert to int32 image tensor with static cast
124 std::vector<int> imageDataInt;
125 imageDataInt.reserve(imageDataFloat.size());
126 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataInt),
127 [](float val) { return static_cast<int>(val); });
128 return imageDataInt;
129}
130
131// Prepare qasymm8 image tensor
132template <>
133std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath,
134 unsigned int newWidth,
135 unsigned int newHeight,
136 const NormalizationParameters& normParams,
137 unsigned int batchSize,
138 const armnn::DataLayout& outputLayout)
139{
140 // Get float32 image tensor
141 std::vector<float> imageDataFloat =
142 PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout);
143 std::vector<uint8_t> imageDataQasymm8;
144 imageDataQasymm8.reserve(imageDataFloat.size());
145 // Convert to uint8 image tensor with static cast
146 std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymm8),
147 [](float val) { return static_cast<uint8_t>(val); });
148 return imageDataQasymm8;
149}
150
151// Write image tensor to ofstream
152template <typename ElemType>
153void WriteImageTensorImpl(const std::vector<ElemType>& imageData, std::ofstream& imageTensorFile)
154{
155 std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<ElemType>(imageTensorFile, " "));
156}