blob: 58f77d55c57b7a90229828e16a7a217c9903f131 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 default:
36 BOOST_ASSERT_MSG(false, "Invalid input data type");
37 return DataType::Float32;
38 }
39}
40
41namespace
42{
43
44//---------------------------------------------------------------
45//android ndk does not support std::to_string function.
46template <typename T>
47std::string to_string(T value)
48{
49 std::ostringstream os;
50 os << value;
51 return os.str();
52}
53
54//---------------------------------------------------------------
55void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
56{
57 if (!ptr)
58 {
59 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
60 paramName + " parameter must be set.");
61 }
62}
63
64//---------------------------------------------------------------
65void ValidateTensorShapesMatch(const TensorInfo& first,
66 const TensorInfo& second,
67 std::string const& descName,
68 std::string const& firstName,
69 std::string const& secondName)
70{
71 if (first.GetShape() != second.GetShape())
72 {
73 throw InvalidArgumentException(descName + ": "
74 + firstName + " & " + secondName + " must have identical shapes");
75 }
76}
77
78//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010079void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000080{
Sadik Armaganeff363d2019-04-05 15:25:46 +010081 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082 {
83 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010084 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000085 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
86 }
87}
88
89//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010090void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091{
Sadik Armaganeff363d2019-04-05 15:25:46 +010092 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093 {
94 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010095 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000096 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
97 }
98}
99
100//---------------------------------------------------------------
101void ValidateTensorNumDimensions(const TensorInfo& tensor,
102 std::string const& descName,
103 unsigned int numDimensions,
104 std::string const& tensorName)
105{
106 if (tensor.GetNumDimensions() != numDimensions)
107 {
108 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
109 to_string(tensor.GetNumDimensions()) + " dimensions for " +
110 tensorName + " tensor.");
111 }
112}
113
114//---------------------------------------------------------------
115void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
116 const std::string& descName, std::string const& tensorName)
117{
118 if (tensor.GetDataType() != dataType)
119 {
120 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
121 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
122 }
123}
124
125//---------------------------------------------------------------
126void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
127 const TensorInfo& weightsTensorInfo, const std::string& descName)
128{
129 if (biasTensor.GetQuantizationOffset() != 0)
130 {
131 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
132 to_string(biasTensor.GetQuantizationOffset()));
133 }
134 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000135 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
137 // Print the float values with extra precision to see very small differences
138 std::stringstream msg;
139 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
140 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
141 biasTensor.GetQuantizationScale();
142 throw InvalidArgumentException(msg.str());
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateTensors(const std::vector<ITensorHandle*>& vec,
148 unsigned int numExpected,
149 const std::string& descName,
150 const std::string& varName)
151{
152 if (vec.empty() && numExpected > 0)
153 {
154 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
155 }
156
157 for (unsigned int i = 0; i < numExpected; ++i)
158 {
159 if (!vec[i])
160 {
161 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
162 }
163 }
164}
165
166//---------------------------------------------------------------
167void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
168 const TensorInfo& second,
169 const TensorInfo& output,
170 std::string const& descName,
171 std::string const& firstName,
172 std::string const& secondName)
173{
174 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
175 // broadcasted.
176 if (first.GetNumDimensions() != second.GetNumDimensions())
177 {
178 throw InvalidArgumentException(descName + ": Tensors "
179 + firstName + " & " + secondName
180 + " must have the same number of dimensions in order to be broadcasted");
181 }
182 uint32_t numDims = first.GetNumDimensions();
183 std::vector<uint32_t> outputDims(numDims, 0u);
184 for (uint32_t i = 0; i < numDims; i++)
185 {
186 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
187 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
188 if (dimsNotEqual && dimsNotOne)
189 {
190 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
191 }
192 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
193 }
194 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
195 if (broadcastShape != output.GetShape())
196 {
197 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
198 + firstName + " & " + secondName
199 + " does not match the output shape");
200 }
201}
202
203//---------------------------------------------------------------
204/// Validates that the output tensor's quantization scale is greater than the product
205/// of the two input tensors' quantization scales. This is a requirement of the implementation of
206/// the quantized multiplication.
207void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
208 const TensorInfo& outputTensorInfo, std::string const& descName,
209 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
210{
211 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
212 {
213 if (outputTensorInfo.GetQuantizationScale() <=
214 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
215 {
216 std::stringstream msg;
217 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
218 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
219 throw InvalidArgumentException(msg.str());
220 }
221 }
222}
223
Sadik Armaganeff363d2019-04-05 15:25:46 +0100224//---------------------------------------------------------------
225void ValidateDataTypes(const TensorInfo& info,
226 const std::vector<armnn::DataType>& supportedTypes,
227 std::string const& descName)
228{
229 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
230 if (iterator == supportedTypes.end())
231 {
232 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
233 }
234}
235
telsoa014fcda012018-03-09 14:13:49 +0000236} //namespace
237
238void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
239 unsigned int numExpectedIn, unsigned int numExpectedOut) const
240{
241 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
242 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
243}
244
245//---------------------------------------------------------------
246void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
247{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100248 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
249 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000250
251 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
252 {
253 throw InvalidArgumentException(boost::str(
254 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
255 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
256 }
257
258 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
259 {
260 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
261 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
262 {
263 throw InvalidArgumentException(boost::str(
264 boost::format("Number of elements for tensor input and output %1% does not match")
265 % i ));
266 }
267 }
268
269 if (m_Inputs.size() != m_Outputs.size())
270 {
271 throw InvalidArgumentException(boost::str(
272 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
273 % m_Inputs.size() % m_Outputs.size()));
274 }
275
276 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
277 {
278 if (!m_Inputs[i])
279 {
280 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
281 }
282
283 if (!m_Outputs[i])
284 {
285 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
286 }
287 }
288}
289
290//---------------------------------------------------------------
291void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
292{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100293 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
294 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
296 workloadInfo.m_OutputTensorInfos[0],
297 "ActivationQueueDescriptor",
298 "input",
299 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100300
301 std::vector<DataType> supportedTypes = {
302 DataType::Float32,
303 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100304 DataType::QuantisedAsymm8,
305 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100306 };
307
308 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
309 supportedTypes,
310 "ActivationQueueDescriptor");
311
312 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
313 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
314 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000315}
316
317//---------------------------------------------------------------
318void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100320 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
321 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000322
323 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
324 workloadInfo.m_OutputTensorInfos[0],
325 "SoftmaxQueueDescriptor",
326 "input",
327 "output");
328}
329
330//---------------------------------------------------------------
331void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
332{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100333 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000334
335 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
336 {
337 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
338 }
339
340 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
341 {
342 throw InvalidArgumentException(
343 "SplitterQueueDescriptor: Number of split windows "
344 "has to match number of workloadInfo.m_OutputTensorInfos. "
345 "Number of windows: " +
346 to_string(m_ViewOrigins.size()) +
347 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
348 }
349
telsoa01c577f2c2018-08-31 09:22:23 +0100350 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000351 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
352 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
353 {
telsoa01c577f2c2018-08-31 09:22:23 +0100354 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000355 ViewOrigin const& e = m_ViewOrigins[w];
356 if (e.m_Origin.size() != inputDims)
357 {
358 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
359 "have the same dimensionality as the input tensor. "
360 "Window origin (index: " +
361 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
362 " dimensions, the input "
363 "tensor has " +
364 to_string(inputDims) + " dimensions.");
365 }
366 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
367 {
368 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
369 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
370 {
371 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
372 "be smaller or equal than the size of the input in that coord.");
373 }
374 }
375 }
376}
377
378//---------------------------------------------------------------
379void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
380{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100381 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000382
383 if (m_Inputs.size() <= 0)
384 {
385 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
386 }
387 if (m_Outputs.size() <= 0)
388 {
389 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
390 }
391
392 if (workloadInfo.m_InputTensorInfos.size() <= 0)
393 {
394 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
395 }
396 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
397 {
398 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
399 }
400
Nikhil Raj8599a412018-11-19 14:51:07 +0000401 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
402 {
403 throw InvalidArgumentException("Invalid Concatenation Axis provided");
404 }
405
406 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
407 {
408 return;
409 }
410
telsoa014fcda012018-03-09 14:13:49 +0000411 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
412 {
413 throw InvalidArgumentException(
414 "MergerQueueDescriptor: Number of split windows "
415 "has to match number of workloadInfo.m_InputTensorInfos. "
416 "Number of windows: " +
417 to_string(m_ViewOrigins.size()) +
418 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
419 }
420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000422 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
423 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
424 {
telsoa01c577f2c2018-08-31 09:22:23 +0100425 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000426 ViewOrigin const& e = m_ViewOrigins[w];
427 if (e.m_Origin.size() != outputDims)
428 {
429 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
430 "have the same dimensionality as the output tensor. "
431 "Window origin (index: " +
432 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
433 " dimensions, the output "
434 "tensor has " +
435 to_string(outputDims) + " dimensions.");
436 }
telsoa01c577f2c2018-08-31 09:22:23 +0100437 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000438 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
439 {
440 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
441 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
442 {
443 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
444 "be smaller or equal than the size of the output in that coord.");
445 }
446 }
447 }
448}
449
450//---------------------------------------------------------------
451void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
452{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100453 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
454 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000455 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
456
457 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
458 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
459 {
460 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
461 }
462
463 if (m_Weight == nullptr)
464 {
465 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
466 }
467
468 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
469
470 if (m_Parameters.m_BiasEnabled)
471 {
472 if (m_Bias == nullptr)
473 {
474 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
475 "bias value tensor descriptor is missing.");
476 }
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000479 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
480 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
481
482 ValidateTensorDataType(m_Bias->GetTensorInfo(),
483 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
484 "FullyConnectedQueueDescriptor", "bias");
485
486 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
487 }
488
489 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
490 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
491}
492
493//---------------------------------------------------------------
494void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
495{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100496 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
497 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000498 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
499 workloadInfo.m_OutputTensorInfos[0],
500 "NormalizationQueueDescriptor",
501 "input",
502 "output");
503}
504
505void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
506{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100507 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
508 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000509
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100510 std::vector<DataType> supportedTypes = {
511 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100512 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100513 DataType::QuantisedSymm16,
514 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100515 };
516
517 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
518 supportedTypes,
519 "AdditionQueueDescriptor");
520
521 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
522 supportedTypes,
523 "AdditionQueueDescriptor");
524
525 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
526 supportedTypes,
527 "AdditionQueueDescriptor");
528
telsoa014fcda012018-03-09 14:13:49 +0000529 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
530 workloadInfo.m_InputTensorInfos[1],
531 workloadInfo.m_OutputTensorInfos[0],
532 "AdditionQueueDescriptor",
533 "first input",
534 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000535}
536
537//---------------------------------------------------------------
538void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
539{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100540 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
541 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100542
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100543 std::vector<DataType> supportedTypes = {
544 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100545 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100546 DataType::QuantisedSymm16,
547 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100548 };
549
550 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
551 supportedTypes,
552 "MultiplicationQueueDescriptor");
553
554 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
555 supportedTypes,
556 "MultiplicationQueueDescriptor");
557
558 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
559 supportedTypes,
560 "MultiplicationQueueDescriptor");
561
surmeh01bceff2f2018-03-29 16:29:27 +0100562 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
563 workloadInfo.m_InputTensorInfos[1],
564 workloadInfo.m_OutputTensorInfos[0],
565 "MultiplicationQueueDescriptor",
566 "first input",
567 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000568}
569
570void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
571{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100572 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
573 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000574 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
575 workloadInfo.m_OutputTensorInfos[0],
576 "BatchNormalizationQueueDescriptor",
577 "input",
578 "output");
579 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
580 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
581 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
582 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
583
584
585 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
586 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
587 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
588 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
589
590 ValidateTensorShapesMatch(
591 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
592 ValidateTensorShapesMatch(
593 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
594 ValidateTensorShapesMatch(
595 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
596}
597
598void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
599{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100600 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
601 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000602
603 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
604 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
605
606 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
607 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
608 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
609 "Convolution2dQueueDescriptor", "weight");
610 if (m_Parameters.m_BiasEnabled)
611 {
612 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
613 ValidateTensorDataType(m_Bias->GetTensorInfo(),
614 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
615 "Convolution2dQueueDescriptor", "bias");
616 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
617 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
618 }
619
620 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
621 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
622}
623
624void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
625{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100626 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
627 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000628
629 ValidateTensorNumDimensions(
630 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
631 ValidateTensorNumDimensions(
632 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
633
634 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
635 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
636
Bruno Goncalves22972f02019-04-26 21:03:24 -0300637 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
638 {
639 throw InvalidArgumentException(
640 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: dilationX (provided %1%) "
641 "and dilationY (provided %2%) cannot be smaller than 1.")
642 % m_Parameters.m_DilationX % m_Parameters.m_DilationX));
643 }
644
Nikhil Rajcec6b652018-10-12 13:51:57 +0100645 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
646
Matteo Martincigh747ef822018-12-18 09:26:39 +0000647 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
648 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000649 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000650 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000652 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
653 {
654 throw InvalidArgumentException(
655 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
656 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
657 "(provided %3%).")
658 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
659 }
660
661 if (m_Parameters.m_BiasEnabled)
662 {
663 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
664 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
665 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
666 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
667
668 ValidateTensorDataType(m_Bias->GetTensorInfo(),
669 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
670 "DepthwiseConvolution2dQueueDescriptor", "bias");
671 }
672
673 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
674 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
675}
676
677void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
678{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100679 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
680 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000681
682 const PermutationVector& mapping = m_Parameters.m_DimMappings;
683
684 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
685 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
686
687 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
688 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
689
690 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
691 {
692 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
693 {
694 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
695 " (=" + to_string(input.GetShape()[i]) + ") " +
696 "must match dst dimension " + to_string(mapping[i]) +
697 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
698 }
699 }
700}
701
702void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
703{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100704 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
705 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000706
707 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
708 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
709}
710
711void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
712{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100713 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
714 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000715
716 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
717 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
718
telsoa01c577f2c2018-08-31 09:22:23 +0100719 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000720 {
721 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
722 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
723 if (inputBatchSize != outputBatchSize)
724 {
725 throw InvalidArgumentException(
726 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
727 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
728 }
729 }
730
731 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000732 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100733 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000734 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100735 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000736 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000737 if (inputChannelCount != outputChannelCount)
738 {
739 throw InvalidArgumentException(
740 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
741 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
742 }
743 }
744}
745
746void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
747{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100748 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
749 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000750
751 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
752 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
753 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
754 workloadInfo.m_OutputTensorInfos[0],
755 "FakeQuantizationQueueDescriptor",
756 "input",
757 "output");
758 if (m_Parameters.m_Min > m_Parameters.m_Max)
759 {
760 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
761 }
762
763}
764
765void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
766{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100767 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
768 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000769
770 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
771 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
772 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
773 workloadInfo.m_OutputTensorInfos[0],
774 "L2NormalizationQueueDescriptor",
775 "input",
776 "output");
777}
778
779void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
780{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100781 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
782 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000783
784 if (!m_LayerOutput)
785 {
786 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
787 }
788
789 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
790 workloadInfo.m_OutputTensorInfos[0],
791 "ConstantQueueDescriptor",
792 "constant",
793 "output");
794}
795
796void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
797{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100798 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
799 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000800
801 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
802 {
803 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
804 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
805 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
806 }
807}
808
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000809void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
810{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100811 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
812 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000813
814 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
815 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
816
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000817 if (m_Parameters.m_BlockShape.size() != 2)
818 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000819 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000820 }
821
822 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
823 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000824 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000825 }
826
827 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
828
829 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
830 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
831
Matthew Bentham8800c002018-11-19 13:19:28 +0000832 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
833 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000834 + heightPad.first + heightPad.second;
835
Matthew Bentham8800c002018-11-19 13:19:28 +0000836 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000837 + widthPad.first + widthPad.second;
838
839 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000840 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000841
842 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
843 {
844 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
845 to_string(numInputElements) + " after padding but output tensor has " +
846 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
847 }
848
849 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000850 {
851 throw InvalidArgumentException(
852 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
853 }
854}
855
telsoa014fcda012018-03-09 14:13:49 +0000856void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
857{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100858 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
859 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000860
861 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
862 {
863 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
864 }
865}
866
telsoa01c577f2c2018-08-31 09:22:23 +0100867void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
868{
869 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
870 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100871
872 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +0100873 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100874 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100875 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100876 };
877
878 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
879 supportedTypes,
880 "LstmQueueDescriptor");
881
882 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
883 supportedTypes,
884 "LstmQueueDescriptor");
telsoa01c577f2c2018-08-31 09:22:23 +0100885}
886
887void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
888{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100889 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
890 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100891
892 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
893 {
894 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
895 }
896
897 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
898 {
899 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
900 }
901
902 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
903 workloadInfo.m_OutputTensorInfos[0],
904 "ConvertFp32ToFp16QueueDescriptor",
905 "input",
906 "output");
907}
908
909void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
910{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100911 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
912 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100913
914 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
915 {
916 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
917 }
918 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
919 {
920 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
921 }
922
923 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
924 workloadInfo.m_OutputTensorInfos[0],
925 "ConvertFp16ToFp32QueueDescriptor",
926 "input",
927 "output");
928}
929
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100930void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
931{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100932 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
933 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100934
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100935 std::vector<DataType> supportedTypes = {
936 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100937 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100938 DataType::QuantisedSymm16,
939 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100940 };
941
942 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
943 supportedTypes,
944 "DivisionQueueDescriptor");
945
946 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
947 supportedTypes,
948 "DivisionQueueDescriptor");
949
950 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
951 supportedTypes,
952 "DivisionQueueDescriptor");
953
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100954 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
955 workloadInfo.m_InputTensorInfos[1],
956 workloadInfo.m_OutputTensorInfos[0],
957 "DivisionQueueDescriptor",
958 "first input",
959 "second input");
960}
961
David Beckc2044fe2018-09-05 15:00:38 +0100962void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
963{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100964 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
965 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +0100966
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100967 std::vector<DataType> supportedTypes = {
968 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100969 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100970 DataType::QuantisedSymm16,
971 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100972 };
973
974 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
975 supportedTypes,
976 "SubtractionQueueDescriptor");
977
978 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
979 supportedTypes,
980 "SubtractionQueueDescriptor");
981
982 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
983 supportedTypes,
984 "SubtractionQueueDescriptor");
985
David Beckc2044fe2018-09-05 15:00:38 +0100986 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
987 workloadInfo.m_InputTensorInfos[1],
988 workloadInfo.m_OutputTensorInfos[0],
989 "SubtractionQueueDescriptor",
990 "first input",
991 "second input");
992}
993
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000994void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
995{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100996 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
997 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000998
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100999 std::vector<DataType> supportedTypes = {
1000 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001001 DataType::QuantisedAsymm8,
1002 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001003 };
1004
1005 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1006 supportedTypes,
1007 "MaximumQueueDescriptor");
1008
1009 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1010 supportedTypes,
1011 "MaximumQueueDescriptor");
1012
1013 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1014 supportedTypes,
1015 "MaximumQueueDescriptor");
1016
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001017 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1018 workloadInfo.m_InputTensorInfos[1],
1019 workloadInfo.m_OutputTensorInfos[0],
1020 "MaximumQueueDescriptor",
1021 "first input",
1022 "second input");
1023}
1024
narpra01a6bf9122018-09-10 09:50:09 +01001025void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1026{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001027 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1028 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001029
1030 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1031 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1032
narpra0132b90462018-09-13 11:07:48 +01001033 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001034 {
1035 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1036 }
narpra0132b90462018-09-13 11:07:48 +01001037 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001038 {
1039 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1040 }
1041 else
1042 {
narpra0132b90462018-09-13 11:07:48 +01001043 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001044 ValidateTensorNumDimensions(output,
1045 "MeanQueueDescriptor",
1046 outputDim > 0 ? outputDim : 1,
1047 "output");
1048 }
narpra01a6bf9122018-09-10 09:50:09 +01001049}
1050
jimfly012c9322a2018-09-19 10:59:49 +01001051void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1052{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001053 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1054 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001055
1056 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001057 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1058
jimfly012c9322a2018-09-19 10:59:49 +01001059 // input and output should have the same number of dimensions
1060 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1061 // there should be entry in the pad list for each dimension in the input tensor
1062 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1063 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1064 " are dimensions in the input tensor that is " +
1065 to_string(input.GetNumDimensions()) + " entries " +
1066 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1067 }
1068}
1069
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001070void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1071{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001072 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1073 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001074
1075
1076 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1077 {
1078 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1079 }
1080
1081 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1082 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1083 {
1084 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1085 }
1086}
1087
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001088void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1089{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001090 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1091 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001092}
1093
Conor Kennedy430b5d82018-11-14 15:28:28 +00001094void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1095{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001096 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1097 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001098
1099 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1100 const uint32_t rank = input.GetNumDimensions();
1101
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001102 if (rank > 4)
1103 {
1104 throw InvalidArgumentException(
1105 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1106 }
1107
Conor Kennedy430b5d82018-11-14 15:28:28 +00001108 // Begin, End & Stride length must be of rank(input0)
1109 if (m_Parameters.m_Begin.size() != rank)
1110 {
1111 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1112 + to_string(rank) + ")");
1113 }
1114
1115 if (m_Parameters.m_End.size() != rank)
1116 {
1117 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1118 + to_string(rank) + ")");
1119 }
1120
1121 if (m_Parameters.m_Stride.size() != rank)
1122 {
1123 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1124 + to_string(rank) + ")");
1125 }
1126
1127 // Stride entries must be non-zero
1128 for (auto& stride : m_Parameters.m_Stride)
1129 {
1130 if (stride == 0)
1131 {
1132 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1133 }
1134 }
1135}
1136
kevmay0190539692018-11-29 08:40:19 +00001137void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1138{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001139 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1140 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001141
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001142 std::vector<DataType> supportedTypes = {
1143 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001144 DataType::QuantisedAsymm8,
1145 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001146 };
1147
1148 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1149 supportedTypes,
1150 "MinimumQueueDescriptor");
1151
1152 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1153 supportedTypes,
1154 "MinimumQueueDescriptor");
1155
1156 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1157 supportedTypes,
1158 "MinimumQueueDescriptor");
1159
kevmay0190539692018-11-29 08:40:19 +00001160 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1161 workloadInfo.m_InputTensorInfos[1],
1162 workloadInfo.m_OutputTensorInfos[0],
1163 "MinimumQueueDescriptor",
1164 "first input",
1165 "second input");
1166}
1167
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001168void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1169{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001170 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1171 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001172}
1173
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001174void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1175{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001176 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1177 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001178
1179 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1180 workloadInfo.m_InputTensorInfos[1],
1181 workloadInfo.m_OutputTensorInfos[0],
1182 "EqualQueueDescriptor",
1183 "first input",
1184 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001185
1186 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1187 {
1188 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1189 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001190}
1191
FrancisMurtagh878f0232018-12-19 10:56:15 +00001192void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1193{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001194 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1195 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001196
1197 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1198 workloadInfo.m_InputTensorInfos[1],
1199 workloadInfo.m_OutputTensorInfos[0],
1200 "GreaterQueueDescriptor",
1201 "first input",
1202 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001203
1204 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1205 {
1206 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1207 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001208}
1209
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001210void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1211{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001212 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1213 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001214 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1215 workloadInfo.m_OutputTensorInfos[0],
1216 "RsqrtQueueDescriptor",
1217 "input",
1218 "output");
1219}
1220
narpra01b89b05f2019-01-16 09:53:09 +00001221void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1222{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001223 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1224 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001225
1226 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1227
1228 if (indices.GetDataType() != DataType::Signed32)
1229 {
1230 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1231 }
1232
1233 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1234 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1235 unsigned int paramsDim = params.GetNumDimensions();
1236 unsigned int indicesDim = indices.GetNumDimensions();
1237 unsigned int outputDim = paramsDim - 1 + indicesDim;
1238
1239 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001240}
1241
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001242void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1243{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001244 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001245
1246 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1247 {
1248 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1249 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1250 }
1251
1252 if (m_Anchors == nullptr)
1253 {
1254 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1255 }
1256
1257 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1258 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1259 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1260 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001261 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1262 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001263 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1264
1265 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1266 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1267 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1268
1269 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1270 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1271 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1272 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1273
1274 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1275 "DetectionPostProcessQueueDescriptor", "detection boxes");
1276 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1277 "DetectionPostProcessQueueDescriptor", "detection scores");
1278 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1279 "DetectionPostProcessQueueDescriptor", "detection classes");
1280 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1281 "DetectionPostProcessQueueDescriptor", "num detections");
1282
1283 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1284 {
1285 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1286 "must be positive and less than or equal to 1.");
1287 }
1288 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1289 {
1290 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1291 "should be equal to number of classes + 1.");
1292 }
1293}
1294
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001295void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1296{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001297 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1298 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001299
1300 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1301 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1302 {
1303 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1304 }
1305
1306 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1307 {
1308 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1309 }
1310}
1311
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001312void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1313{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001314 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1315 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001316
1317 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1318 workloadInfo.m_InputTensorInfos[1],
1319 "MergeQueueDescriptor",
1320 "input0",
1321 "input1");
1322
1323 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1324 workloadInfo.m_OutputTensorInfos[0],
1325 "MergeQueueDescriptor",
1326 "input0",
1327 "output");
1328
1329 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1330 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1331 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1332}
1333
Sadik Armaganeff363d2019-04-05 15:25:46 +01001334void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1335{
1336 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1337 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1338
1339 std::vector<DataType> supportedTypes = {
1340 DataType::Float32,
1341 DataType::QuantisedAsymm8,
1342 DataType::QuantisedSymm16
1343 };
1344
1345 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1346 supportedTypes,
1347 "SwitchQueueDescriptor");
1348
1349 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1350 supportedTypes,
1351 "SwitchQueueDescriptor");
1352
1353 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1354 supportedTypes,
1355 "SwitchQueueDescriptor");
1356
1357 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1358 workloadInfo.m_OutputTensorInfos[0],
1359 "SwitchQueueDescriptor",
1360 "input0",
1361 "output0");
1362
1363 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1364 workloadInfo.m_OutputTensorInfos[1],
1365 "SwitchQueueDescriptor",
1366 "input0",
1367 "output1");
1368}
1369
Matteo Martincigh49124022019-01-11 13:25:59 +00001370void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1371{
1372 // This is internally generated so it should not need validation.
1373}
1374
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001375} //namespace armnn