blob: 2cf99371d2ccb526549b83ee544b4673fd9e22ae [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 default:
36 BOOST_ASSERT_MSG(false, "Invalid input data type");
37 return DataType::Float32;
38 }
39}
40
41namespace
42{
43
44//---------------------------------------------------------------
45//android ndk does not support std::to_string function.
46template <typename T>
47std::string to_string(T value)
48{
49 std::ostringstream os;
50 os << value;
51 return os.str();
52}
53
54//---------------------------------------------------------------
55void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
56{
57 if (!ptr)
58 {
59 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
60 paramName + " parameter must be set.");
61 }
62}
63
64//---------------------------------------------------------------
65void ValidateTensorShapesMatch(const TensorInfo& first,
66 const TensorInfo& second,
67 std::string const& descName,
68 std::string const& firstName,
69 std::string const& secondName)
70{
71 if (first.GetShape() != second.GetShape())
72 {
73 throw InvalidArgumentException(descName + ": "
74 + firstName + " & " + secondName + " must have identical shapes");
75 }
76}
77
78//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010079void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000080{
Sadik Armaganeff363d2019-04-05 15:25:46 +010081 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082 {
83 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010084 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000085 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
86 }
87}
88
89//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010090void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091{
Sadik Armaganeff363d2019-04-05 15:25:46 +010092 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093 {
94 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010095 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000096 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
97 }
98}
99
100//---------------------------------------------------------------
101void ValidateTensorNumDimensions(const TensorInfo& tensor,
102 std::string const& descName,
103 unsigned int numDimensions,
104 std::string const& tensorName)
105{
106 if (tensor.GetNumDimensions() != numDimensions)
107 {
108 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
109 to_string(tensor.GetNumDimensions()) + " dimensions for " +
110 tensorName + " tensor.");
111 }
112}
113
114//---------------------------------------------------------------
115void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
116 const std::string& descName, std::string const& tensorName)
117{
118 if (tensor.GetDataType() != dataType)
119 {
120 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
121 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
122 }
123}
124
125//---------------------------------------------------------------
126void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
127 const TensorInfo& weightsTensorInfo, const std::string& descName)
128{
129 if (biasTensor.GetQuantizationOffset() != 0)
130 {
131 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
132 to_string(biasTensor.GetQuantizationOffset()));
133 }
134 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000135 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
137 // Print the float values with extra precision to see very small differences
138 std::stringstream msg;
139 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
140 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
141 biasTensor.GetQuantizationScale();
142 throw InvalidArgumentException(msg.str());
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateTensors(const std::vector<ITensorHandle*>& vec,
148 unsigned int numExpected,
149 const std::string& descName,
150 const std::string& varName)
151{
152 if (vec.empty() && numExpected > 0)
153 {
154 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
155 }
156
157 for (unsigned int i = 0; i < numExpected; ++i)
158 {
159 if (!vec[i])
160 {
161 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
162 }
163 }
164}
165
166//---------------------------------------------------------------
167void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
168 const TensorInfo& second,
169 const TensorInfo& output,
170 std::string const& descName,
171 std::string const& firstName,
172 std::string const& secondName)
173{
174 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
175 // broadcasted.
176 if (first.GetNumDimensions() != second.GetNumDimensions())
177 {
178 throw InvalidArgumentException(descName + ": Tensors "
179 + firstName + " & " + secondName
180 + " must have the same number of dimensions in order to be broadcasted");
181 }
182 uint32_t numDims = first.GetNumDimensions();
183 std::vector<uint32_t> outputDims(numDims, 0u);
184 for (uint32_t i = 0; i < numDims; i++)
185 {
186 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
187 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
188 if (dimsNotEqual && dimsNotOne)
189 {
190 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
191 }
192 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
193 }
194 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
195 if (broadcastShape != output.GetShape())
196 {
197 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
198 + firstName + " & " + secondName
199 + " does not match the output shape");
200 }
201}
202
203//---------------------------------------------------------------
204/// Validates that the output tensor's quantization scale is greater than the product
205/// of the two input tensors' quantization scales. This is a requirement of the implementation of
206/// the quantized multiplication.
207void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
208 const TensorInfo& outputTensorInfo, std::string const& descName,
209 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
210{
211 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
212 {
213 if (outputTensorInfo.GetQuantizationScale() <=
214 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
215 {
216 std::stringstream msg;
217 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
218 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
219 throw InvalidArgumentException(msg.str());
220 }
221 }
222}
223
Sadik Armaganeff363d2019-04-05 15:25:46 +0100224//---------------------------------------------------------------
225void ValidateDataTypes(const TensorInfo& info,
226 const std::vector<armnn::DataType>& supportedTypes,
227 std::string const& descName)
228{
229 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
230 if (iterator == supportedTypes.end())
231 {
232 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
233 }
234}
235
telsoa014fcda012018-03-09 14:13:49 +0000236} //namespace
237
238void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
239 unsigned int numExpectedIn, unsigned int numExpectedOut) const
240{
241 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
242 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
243}
244
245//---------------------------------------------------------------
246void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
247{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100248 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
249 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000250
251 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
252 {
253 throw InvalidArgumentException(boost::str(
254 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
255 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
256 }
257
258 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
259 {
260 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
261 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
262 {
263 throw InvalidArgumentException(boost::str(
264 boost::format("Number of elements for tensor input and output %1% does not match")
265 % i ));
266 }
267 }
268
269 if (m_Inputs.size() != m_Outputs.size())
270 {
271 throw InvalidArgumentException(boost::str(
272 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
273 % m_Inputs.size() % m_Outputs.size()));
274 }
275
276 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
277 {
278 if (!m_Inputs[i])
279 {
280 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
281 }
282
283 if (!m_Outputs[i])
284 {
285 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
286 }
287 }
288}
289
290//---------------------------------------------------------------
291void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
292{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100293 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
294 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
296 workloadInfo.m_OutputTensorInfos[0],
297 "ActivationQueueDescriptor",
298 "input",
299 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100300
301 std::vector<DataType> supportedTypes = {
302 DataType::Float32,
303 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100304 DataType::QuantisedAsymm8,
305 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100306 };
307
308 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
309 supportedTypes,
310 "ActivationQueueDescriptor");
311
312 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
313 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
314 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000315}
316
317//---------------------------------------------------------------
318void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100320 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
321 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000322
323 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
324 workloadInfo.m_OutputTensorInfos[0],
325 "SoftmaxQueueDescriptor",
326 "input",
327 "output");
328}
329
330//---------------------------------------------------------------
331void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
332{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100333 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000334
335 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
336 {
337 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
338 }
339
340 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
341 {
342 throw InvalidArgumentException(
343 "SplitterQueueDescriptor: Number of split windows "
344 "has to match number of workloadInfo.m_OutputTensorInfos. "
345 "Number of windows: " +
346 to_string(m_ViewOrigins.size()) +
347 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
348 }
349
telsoa01c577f2c2018-08-31 09:22:23 +0100350 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000351 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
352 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
353 {
telsoa01c577f2c2018-08-31 09:22:23 +0100354 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000355 ViewOrigin const& e = m_ViewOrigins[w];
356 if (e.m_Origin.size() != inputDims)
357 {
358 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
359 "have the same dimensionality as the input tensor. "
360 "Window origin (index: " +
361 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
362 " dimensions, the input "
363 "tensor has " +
364 to_string(inputDims) + " dimensions.");
365 }
366 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
367 {
368 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
369 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
370 {
371 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
372 "be smaller or equal than the size of the input in that coord.");
373 }
374 }
375 }
376}
377
378//---------------------------------------------------------------
379void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
380{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100381 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000382
383 if (m_Inputs.size() <= 0)
384 {
385 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
386 }
387 if (m_Outputs.size() <= 0)
388 {
389 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
390 }
391
392 if (workloadInfo.m_InputTensorInfos.size() <= 0)
393 {
394 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
395 }
396 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
397 {
398 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
399 }
400
Nikhil Raj8599a412018-11-19 14:51:07 +0000401 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
402 {
403 throw InvalidArgumentException("Invalid Concatenation Axis provided");
404 }
405
406 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
407 {
408 return;
409 }
410
telsoa014fcda012018-03-09 14:13:49 +0000411 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
412 {
413 throw InvalidArgumentException(
414 "MergerQueueDescriptor: Number of split windows "
415 "has to match number of workloadInfo.m_InputTensorInfos. "
416 "Number of windows: " +
417 to_string(m_ViewOrigins.size()) +
418 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
419 }
420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000422 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
423 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
424 {
telsoa01c577f2c2018-08-31 09:22:23 +0100425 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000426 ViewOrigin const& e = m_ViewOrigins[w];
427 if (e.m_Origin.size() != outputDims)
428 {
429 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
430 "have the same dimensionality as the output tensor. "
431 "Window origin (index: " +
432 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
433 " dimensions, the output "
434 "tensor has " +
435 to_string(outputDims) + " dimensions.");
436 }
telsoa01c577f2c2018-08-31 09:22:23 +0100437 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000438 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
439 {
440 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
441 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
442 {
443 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
444 "be smaller or equal than the size of the output in that coord.");
445 }
446 }
447 }
448}
449
450//---------------------------------------------------------------
451void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
452{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100453 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
454 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000455 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
456
457 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
458 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
459 {
460 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
461 }
462
463 if (m_Weight == nullptr)
464 {
465 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
466 }
467
468 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
469
470 if (m_Parameters.m_BiasEnabled)
471 {
472 if (m_Bias == nullptr)
473 {
474 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
475 "bias value tensor descriptor is missing.");
476 }
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000479 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
480 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
481
482 ValidateTensorDataType(m_Bias->GetTensorInfo(),
483 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
484 "FullyConnectedQueueDescriptor", "bias");
485
486 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
487 }
488
489 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
490 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
491}
492
493//---------------------------------------------------------------
494void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
495{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100496 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
497 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000498 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
499 workloadInfo.m_OutputTensorInfos[0],
500 "NormalizationQueueDescriptor",
501 "input",
502 "output");
503}
504
505void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
506{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100507 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
508 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000509
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100510 std::vector<DataType> supportedTypes = {
511 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100512 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100513 DataType::QuantisedSymm16,
514 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100515 };
516
517 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
518 supportedTypes,
519 "AdditionQueueDescriptor");
520
521 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
522 supportedTypes,
523 "AdditionQueueDescriptor");
524
525 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
526 supportedTypes,
527 "AdditionQueueDescriptor");
528
telsoa014fcda012018-03-09 14:13:49 +0000529 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
530 workloadInfo.m_InputTensorInfos[1],
531 workloadInfo.m_OutputTensorInfos[0],
532 "AdditionQueueDescriptor",
533 "first input",
534 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000535}
536
537//---------------------------------------------------------------
538void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
539{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100540 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
541 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100542
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100543 std::vector<DataType> supportedTypes = {
544 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100545 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100546 DataType::QuantisedSymm16,
547 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100548 };
549
550 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
551 supportedTypes,
552 "MultiplicationQueueDescriptor");
553
554 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
555 supportedTypes,
556 "MultiplicationQueueDescriptor");
557
558 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
559 supportedTypes,
560 "MultiplicationQueueDescriptor");
561
surmeh01bceff2f2018-03-29 16:29:27 +0100562 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
563 workloadInfo.m_InputTensorInfos[1],
564 workloadInfo.m_OutputTensorInfos[0],
565 "MultiplicationQueueDescriptor",
566 "first input",
567 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000568}
569
570void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
571{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100572 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
573 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000574 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
575 workloadInfo.m_OutputTensorInfos[0],
576 "BatchNormalizationQueueDescriptor",
577 "input",
578 "output");
579 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
580 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
581 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
582 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
583
584
585 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
586 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
587 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
588 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
589
590 ValidateTensorShapesMatch(
591 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
592 ValidateTensorShapesMatch(
593 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
594 ValidateTensorShapesMatch(
595 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
596}
597
598void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
599{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100600 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
601 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000602
603 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
604 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
605
606 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
607 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
608 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
609 "Convolution2dQueueDescriptor", "weight");
610 if (m_Parameters.m_BiasEnabled)
611 {
612 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
613 ValidateTensorDataType(m_Bias->GetTensorInfo(),
614 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
615 "Convolution2dQueueDescriptor", "bias");
616 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
617 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
618 }
619
620 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
621 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
622}
623
624void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
625{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100626 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
627 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000628
629 ValidateTensorNumDimensions(
630 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
631 ValidateTensorNumDimensions(
632 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
633
634 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
635 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
636
Nikhil Rajcec6b652018-10-12 13:51:57 +0100637 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
638
Matteo Martincigh747ef822018-12-18 09:26:39 +0000639 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
640 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000641 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000642 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100643 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000644 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
645 {
646 throw InvalidArgumentException(
647 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
648 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
649 "(provided %3%).")
650 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
651 }
652
653 if (m_Parameters.m_BiasEnabled)
654 {
655 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
656 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
657 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
658 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
659
660 ValidateTensorDataType(m_Bias->GetTensorInfo(),
661 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
662 "DepthwiseConvolution2dQueueDescriptor", "bias");
663 }
664
665 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
666 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
667}
668
669void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
670{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100671 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
672 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000673
674 const PermutationVector& mapping = m_Parameters.m_DimMappings;
675
676 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
677 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
678
679 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
680 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
681
682 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
683 {
684 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
685 {
686 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
687 " (=" + to_string(input.GetShape()[i]) + ") " +
688 "must match dst dimension " + to_string(mapping[i]) +
689 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
690 }
691 }
692}
693
694void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
695{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100696 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
697 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000698
699 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
700 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
701}
702
703void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
704{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100705 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
706 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000707
708 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
709 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
710
telsoa01c577f2c2018-08-31 09:22:23 +0100711 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000712 {
713 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
714 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
715 if (inputBatchSize != outputBatchSize)
716 {
717 throw InvalidArgumentException(
718 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
719 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
720 }
721 }
722
723 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000724 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100725 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000726 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100727 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000728 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000729 if (inputChannelCount != outputChannelCount)
730 {
731 throw InvalidArgumentException(
732 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
733 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
734 }
735 }
736}
737
738void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
739{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100740 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
741 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000742
743 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
744 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
745 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
746 workloadInfo.m_OutputTensorInfos[0],
747 "FakeQuantizationQueueDescriptor",
748 "input",
749 "output");
750 if (m_Parameters.m_Min > m_Parameters.m_Max)
751 {
752 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
753 }
754
755}
756
757void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
758{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100759 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
760 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000761
762 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
763 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
764 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
765 workloadInfo.m_OutputTensorInfos[0],
766 "L2NormalizationQueueDescriptor",
767 "input",
768 "output");
769}
770
771void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
772{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100773 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
774 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000775
776 if (!m_LayerOutput)
777 {
778 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
779 }
780
781 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
782 workloadInfo.m_OutputTensorInfos[0],
783 "ConstantQueueDescriptor",
784 "constant",
785 "output");
786}
787
788void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
789{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100790 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
791 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000792
793 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
794 {
795 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
796 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
797 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
798 }
799}
800
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000801void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
802{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100803 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
804 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000805
806 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
807 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
808
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000809 if (m_Parameters.m_BlockShape.size() != 2)
810 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000811 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000812 }
813
814 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
815 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000816 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000817 }
818
819 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
820
821 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
822 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
823
Matthew Bentham8800c002018-11-19 13:19:28 +0000824 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
825 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000826 + heightPad.first + heightPad.second;
827
Matthew Bentham8800c002018-11-19 13:19:28 +0000828 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000829 + widthPad.first + widthPad.second;
830
831 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000832 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000833
834 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
835 {
836 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
837 to_string(numInputElements) + " after padding but output tensor has " +
838 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
839 }
840
841 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000842 {
843 throw InvalidArgumentException(
844 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
845 }
846}
847
telsoa014fcda012018-03-09 14:13:49 +0000848void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
849{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100850 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
851 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000852
853 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
854 {
855 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
856 }
857}
858
telsoa01c577f2c2018-08-31 09:22:23 +0100859void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
860{
861 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
862 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100863
864 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +0100865 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100866 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100867 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100868 };
869
870 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
871 supportedTypes,
872 "LstmQueueDescriptor");
873
874 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
875 supportedTypes,
876 "LstmQueueDescriptor");
telsoa01c577f2c2018-08-31 09:22:23 +0100877}
878
879void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
880{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100881 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
882 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100883
884 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
885 {
886 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
887 }
888
889 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
890 {
891 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
892 }
893
894 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
895 workloadInfo.m_OutputTensorInfos[0],
896 "ConvertFp32ToFp16QueueDescriptor",
897 "input",
898 "output");
899}
900
901void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
902{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100903 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
904 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100905
906 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
907 {
908 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
909 }
910 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
911 {
912 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
913 }
914
915 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
916 workloadInfo.m_OutputTensorInfos[0],
917 "ConvertFp16ToFp32QueueDescriptor",
918 "input",
919 "output");
920}
921
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100922void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
923{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100924 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
925 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100926
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100927 std::vector<DataType> supportedTypes = {
928 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100929 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100930 DataType::QuantisedSymm16,
931 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100932 };
933
934 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
935 supportedTypes,
936 "DivisionQueueDescriptor");
937
938 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
939 supportedTypes,
940 "DivisionQueueDescriptor");
941
942 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
943 supportedTypes,
944 "DivisionQueueDescriptor");
945
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100946 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
947 workloadInfo.m_InputTensorInfos[1],
948 workloadInfo.m_OutputTensorInfos[0],
949 "DivisionQueueDescriptor",
950 "first input",
951 "second input");
952}
953
David Beckc2044fe2018-09-05 15:00:38 +0100954void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
955{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100956 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
957 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +0100958
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100959 std::vector<DataType> supportedTypes = {
960 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100961 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100962 DataType::QuantisedSymm16,
963 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100964 };
965
966 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
967 supportedTypes,
968 "SubtractionQueueDescriptor");
969
970 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
971 supportedTypes,
972 "SubtractionQueueDescriptor");
973
974 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
975 supportedTypes,
976 "SubtractionQueueDescriptor");
977
David Beckc2044fe2018-09-05 15:00:38 +0100978 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
979 workloadInfo.m_InputTensorInfos[1],
980 workloadInfo.m_OutputTensorInfos[0],
981 "SubtractionQueueDescriptor",
982 "first input",
983 "second input");
984}
985
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000986void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
987{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100988 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
989 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000990
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100991 std::vector<DataType> supportedTypes = {
992 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100993 DataType::QuantisedAsymm8,
994 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100995 };
996
997 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
998 supportedTypes,
999 "MaximumQueueDescriptor");
1000
1001 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1002 supportedTypes,
1003 "MaximumQueueDescriptor");
1004
1005 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1006 supportedTypes,
1007 "MaximumQueueDescriptor");
1008
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001009 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1010 workloadInfo.m_InputTensorInfos[1],
1011 workloadInfo.m_OutputTensorInfos[0],
1012 "MaximumQueueDescriptor",
1013 "first input",
1014 "second input");
1015}
1016
narpra01a6bf9122018-09-10 09:50:09 +01001017void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1018{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001019 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1020 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001021
1022 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1023 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1024
narpra0132b90462018-09-13 11:07:48 +01001025 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001026 {
1027 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1028 }
narpra0132b90462018-09-13 11:07:48 +01001029 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001030 {
1031 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1032 }
1033 else
1034 {
narpra0132b90462018-09-13 11:07:48 +01001035 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001036 ValidateTensorNumDimensions(output,
1037 "MeanQueueDescriptor",
1038 outputDim > 0 ? outputDim : 1,
1039 "output");
1040 }
narpra01a6bf9122018-09-10 09:50:09 +01001041}
1042
jimfly012c9322a2018-09-19 10:59:49 +01001043void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1044{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001045 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1046 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001047
1048 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001049 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1050
jimfly012c9322a2018-09-19 10:59:49 +01001051 // input and output should have the same number of dimensions
1052 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1053 // there should be entry in the pad list for each dimension in the input tensor
1054 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1055 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1056 " are dimensions in the input tensor that is " +
1057 to_string(input.GetNumDimensions()) + " entries " +
1058 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1059 }
1060}
1061
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001062void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1063{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001064 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1065 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001066
1067
1068 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1069 {
1070 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1071 }
1072
1073 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1074 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1075 {
1076 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1077 }
1078}
1079
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001080void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1081{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001082 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1083 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001084}
1085
Conor Kennedy430b5d82018-11-14 15:28:28 +00001086void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1087{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001088 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1089 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001090
1091 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1092 const uint32_t rank = input.GetNumDimensions();
1093
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001094 if (rank > 4)
1095 {
1096 throw InvalidArgumentException(
1097 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1098 }
1099
Conor Kennedy430b5d82018-11-14 15:28:28 +00001100 // Begin, End & Stride length must be of rank(input0)
1101 if (m_Parameters.m_Begin.size() != rank)
1102 {
1103 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1104 + to_string(rank) + ")");
1105 }
1106
1107 if (m_Parameters.m_End.size() != rank)
1108 {
1109 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1110 + to_string(rank) + ")");
1111 }
1112
1113 if (m_Parameters.m_Stride.size() != rank)
1114 {
1115 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1116 + to_string(rank) + ")");
1117 }
1118
1119 // Stride entries must be non-zero
1120 for (auto& stride : m_Parameters.m_Stride)
1121 {
1122 if (stride == 0)
1123 {
1124 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1125 }
1126 }
1127}
1128
kevmay0190539692018-11-29 08:40:19 +00001129void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1130{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001131 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1132 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001133
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001134 std::vector<DataType> supportedTypes = {
1135 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001136 DataType::QuantisedAsymm8,
1137 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001138 };
1139
1140 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1141 supportedTypes,
1142 "MinimumQueueDescriptor");
1143
1144 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1145 supportedTypes,
1146 "MinimumQueueDescriptor");
1147
1148 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1149 supportedTypes,
1150 "MinimumQueueDescriptor");
1151
kevmay0190539692018-11-29 08:40:19 +00001152 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1153 workloadInfo.m_InputTensorInfos[1],
1154 workloadInfo.m_OutputTensorInfos[0],
1155 "MinimumQueueDescriptor",
1156 "first input",
1157 "second input");
1158}
1159
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001160void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1161{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001162 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1163 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001164}
1165
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001166void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1167{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001168 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1169 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001170
1171 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1172 workloadInfo.m_InputTensorInfos[1],
1173 workloadInfo.m_OutputTensorInfos[0],
1174 "EqualQueueDescriptor",
1175 "first input",
1176 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001177
1178 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1179 {
1180 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1181 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001182}
1183
FrancisMurtagh878f0232018-12-19 10:56:15 +00001184void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1185{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001186 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1187 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001188
1189 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1190 workloadInfo.m_InputTensorInfos[1],
1191 workloadInfo.m_OutputTensorInfos[0],
1192 "GreaterQueueDescriptor",
1193 "first input",
1194 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001195
1196 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1197 {
1198 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1199 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001200}
1201
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001202void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1203{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001204 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1205 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001206 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1207 workloadInfo.m_OutputTensorInfos[0],
1208 "RsqrtQueueDescriptor",
1209 "input",
1210 "output");
1211}
1212
narpra01b89b05f2019-01-16 09:53:09 +00001213void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1214{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001215 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1216 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001217
1218 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1219
1220 if (indices.GetDataType() != DataType::Signed32)
1221 {
1222 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1223 }
1224
1225 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1226 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1227 unsigned int paramsDim = params.GetNumDimensions();
1228 unsigned int indicesDim = indices.GetNumDimensions();
1229 unsigned int outputDim = paramsDim - 1 + indicesDim;
1230
1231 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001232}
1233
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001234void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1235{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001236 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001237
1238 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1239 {
1240 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1241 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1242 }
1243
1244 if (m_Anchors == nullptr)
1245 {
1246 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1247 }
1248
1249 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1250 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1251 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1252 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001253 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1254 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001255 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1256
1257 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1258 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1259 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1260
1261 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1262 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1263 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1264 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1265
1266 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1267 "DetectionPostProcessQueueDescriptor", "detection boxes");
1268 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1269 "DetectionPostProcessQueueDescriptor", "detection scores");
1270 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1271 "DetectionPostProcessQueueDescriptor", "detection classes");
1272 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1273 "DetectionPostProcessQueueDescriptor", "num detections");
1274
1275 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1276 {
1277 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1278 "must be positive and less than or equal to 1.");
1279 }
1280 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1281 {
1282 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1283 "should be equal to number of classes + 1.");
1284 }
1285}
1286
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001287void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1288{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001289 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1290 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001291
1292 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1293 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1294 {
1295 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1296 }
1297
1298 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1299 {
1300 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1301 }
1302}
1303
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001304void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1305{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001306 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1307 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001308
1309 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1310 workloadInfo.m_InputTensorInfos[1],
1311 "MergeQueueDescriptor",
1312 "input0",
1313 "input1");
1314
1315 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1316 workloadInfo.m_OutputTensorInfos[0],
1317 "MergeQueueDescriptor",
1318 "input0",
1319 "output");
1320
1321 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1322 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1323 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1324}
1325
Sadik Armaganeff363d2019-04-05 15:25:46 +01001326void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1327{
1328 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1329 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1330
1331 std::vector<DataType> supportedTypes = {
1332 DataType::Float32,
1333 DataType::QuantisedAsymm8,
1334 DataType::QuantisedSymm16
1335 };
1336
1337 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1338 supportedTypes,
1339 "SwitchQueueDescriptor");
1340
1341 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1342 supportedTypes,
1343 "SwitchQueueDescriptor");
1344
1345 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1346 supportedTypes,
1347 "SwitchQueueDescriptor");
1348
1349 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1350 workloadInfo.m_OutputTensorInfos[0],
1351 "SwitchQueueDescriptor",
1352 "input0",
1353 "output0");
1354
1355 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1356 workloadInfo.m_OutputTensorInfos[1],
1357 "SwitchQueueDescriptor",
1358 "input0",
1359 "output1");
1360}
1361
Matteo Martincigh49124022019-01-11 13:25:59 +00001362void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1363{
1364 // This is internally generated so it should not need validation.
1365}
1366
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001367} //namespace armnn