blob: ca9d7d9c5e806f841f1ad8402f65cb36651b1109 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 default:
36 BOOST_ASSERT_MSG(false, "Invalid input data type");
37 return DataType::Float32;
38 }
39}
40
41namespace
42{
43
44//---------------------------------------------------------------
45//android ndk does not support std::to_string function.
46template <typename T>
47std::string to_string(T value)
48{
49 std::ostringstream os;
50 os << value;
51 return os.str();
52}
53
54//---------------------------------------------------------------
55void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
56{
57 if (!ptr)
58 {
59 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
60 paramName + " parameter must be set.");
61 }
62}
63
64//---------------------------------------------------------------
65void ValidateTensorShapesMatch(const TensorInfo& first,
66 const TensorInfo& second,
67 std::string const& descName,
68 std::string const& firstName,
69 std::string const& secondName)
70{
71 if (first.GetShape() != second.GetShape())
72 {
73 throw InvalidArgumentException(descName + ": "
74 + firstName + " & " + secondName + " must have identical shapes");
75 }
76}
77
78//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010079void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000080{
Sadik Armaganeff363d2019-04-05 15:25:46 +010081 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082 {
83 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010084 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000085 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
86 }
87}
88
89//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010090void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091{
Sadik Armaganeff363d2019-04-05 15:25:46 +010092 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093 {
94 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010095 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000096 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
97 }
98}
99
100//---------------------------------------------------------------
101void ValidateTensorNumDimensions(const TensorInfo& tensor,
102 std::string const& descName,
103 unsigned int numDimensions,
104 std::string const& tensorName)
105{
106 if (tensor.GetNumDimensions() != numDimensions)
107 {
108 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
109 to_string(tensor.GetNumDimensions()) + " dimensions for " +
110 tensorName + " tensor.");
111 }
112}
113
114//---------------------------------------------------------------
115void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
116 const std::string& descName, std::string const& tensorName)
117{
118 if (tensor.GetDataType() != dataType)
119 {
120 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
121 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
122 }
123}
124
125//---------------------------------------------------------------
126void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
127 const TensorInfo& weightsTensorInfo, const std::string& descName)
128{
129 if (biasTensor.GetQuantizationOffset() != 0)
130 {
131 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
132 to_string(biasTensor.GetQuantizationOffset()));
133 }
134 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000135 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
137 // Print the float values with extra precision to see very small differences
138 std::stringstream msg;
139 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
140 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
141 biasTensor.GetQuantizationScale();
142 throw InvalidArgumentException(msg.str());
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateTensors(const std::vector<ITensorHandle*>& vec,
148 unsigned int numExpected,
149 const std::string& descName,
150 const std::string& varName)
151{
152 if (vec.empty() && numExpected > 0)
153 {
154 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
155 }
156
157 for (unsigned int i = 0; i < numExpected; ++i)
158 {
159 if (!vec[i])
160 {
161 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
162 }
163 }
164}
165
166//---------------------------------------------------------------
167void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
168 const TensorInfo& second,
169 const TensorInfo& output,
170 std::string const& descName,
171 std::string const& firstName,
172 std::string const& secondName)
173{
174 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
175 // broadcasted.
176 if (first.GetNumDimensions() != second.GetNumDimensions())
177 {
178 throw InvalidArgumentException(descName + ": Tensors "
179 + firstName + " & " + secondName
180 + " must have the same number of dimensions in order to be broadcasted");
181 }
182 uint32_t numDims = first.GetNumDimensions();
183 std::vector<uint32_t> outputDims(numDims, 0u);
184 for (uint32_t i = 0; i < numDims; i++)
185 {
186 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
187 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
188 if (dimsNotEqual && dimsNotOne)
189 {
190 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
191 }
192 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
193 }
194 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
195 if (broadcastShape != output.GetShape())
196 {
197 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
198 + firstName + " & " + secondName
199 + " does not match the output shape");
200 }
201}
202
203//---------------------------------------------------------------
204/// Validates that the output tensor's quantization scale is greater than the product
205/// of the two input tensors' quantization scales. This is a requirement of the implementation of
206/// the quantized multiplication.
207void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
208 const TensorInfo& outputTensorInfo, std::string const& descName,
209 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
210{
211 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
212 {
213 if (outputTensorInfo.GetQuantizationScale() <=
214 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
215 {
216 std::stringstream msg;
217 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
218 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
219 throw InvalidArgumentException(msg.str());
220 }
221 }
222}
223
Sadik Armaganeff363d2019-04-05 15:25:46 +0100224//---------------------------------------------------------------
225void ValidateDataTypes(const TensorInfo& info,
226 const std::vector<armnn::DataType>& supportedTypes,
227 std::string const& descName)
228{
229 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
230 if (iterator == supportedTypes.end())
231 {
232 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
233 }
234}
235
telsoa014fcda012018-03-09 14:13:49 +0000236} //namespace
237
238void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
239 unsigned int numExpectedIn, unsigned int numExpectedOut) const
240{
241 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
242 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
243}
244
245//---------------------------------------------------------------
246void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
247{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100248 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
249 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000250
251 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
252 {
253 throw InvalidArgumentException(boost::str(
254 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
255 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
256 }
257
258 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
259 {
260 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
261 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
262 {
263 throw InvalidArgumentException(boost::str(
264 boost::format("Number of elements for tensor input and output %1% does not match")
265 % i ));
266 }
267 }
268
269 if (m_Inputs.size() != m_Outputs.size())
270 {
271 throw InvalidArgumentException(boost::str(
272 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
273 % m_Inputs.size() % m_Outputs.size()));
274 }
275
276 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
277 {
278 if (!m_Inputs[i])
279 {
280 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
281 }
282
283 if (!m_Outputs[i])
284 {
285 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
286 }
287 }
288}
289
290//---------------------------------------------------------------
291void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
292{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100293 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
294 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
296 workloadInfo.m_OutputTensorInfos[0],
297 "ActivationQueueDescriptor",
298 "input",
299 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100300
301 std::vector<DataType> supportedTypes = {
302 DataType::Float32,
303 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100304 DataType::QuantisedAsymm8,
305 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100306 };
307
308 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
309 supportedTypes,
310 "ActivationQueueDescriptor");
311
312 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
313 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
314 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000315}
316
317//---------------------------------------------------------------
318void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100320 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
321 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000322
323 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
324 workloadInfo.m_OutputTensorInfos[0],
325 "SoftmaxQueueDescriptor",
326 "input",
327 "output");
328}
329
330//---------------------------------------------------------------
331void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
332{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100333 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000334
335 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
336 {
337 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
338 }
339
340 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
341 {
342 throw InvalidArgumentException(
343 "SplitterQueueDescriptor: Number of split windows "
344 "has to match number of workloadInfo.m_OutputTensorInfos. "
345 "Number of windows: " +
346 to_string(m_ViewOrigins.size()) +
347 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
348 }
349
telsoa01c577f2c2018-08-31 09:22:23 +0100350 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000351 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
352 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
353 {
telsoa01c577f2c2018-08-31 09:22:23 +0100354 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000355 ViewOrigin const& e = m_ViewOrigins[w];
356 if (e.m_Origin.size() != inputDims)
357 {
358 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
359 "have the same dimensionality as the input tensor. "
360 "Window origin (index: " +
361 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
362 " dimensions, the input "
363 "tensor has " +
364 to_string(inputDims) + " dimensions.");
365 }
366 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
367 {
368 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
369 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
370 {
371 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
372 "be smaller or equal than the size of the input in that coord.");
373 }
374 }
375 }
376}
377
378//---------------------------------------------------------------
379void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
380{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100381 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000382
383 if (m_Inputs.size() <= 0)
384 {
385 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
386 }
387 if (m_Outputs.size() <= 0)
388 {
389 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
390 }
391
392 if (workloadInfo.m_InputTensorInfos.size() <= 0)
393 {
394 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
395 }
396 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
397 {
398 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
399 }
400
Nikhil Raj8599a412018-11-19 14:51:07 +0000401 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
402 {
403 throw InvalidArgumentException("Invalid Concatenation Axis provided");
404 }
405
406 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
407 {
408 return;
409 }
410
telsoa014fcda012018-03-09 14:13:49 +0000411 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
412 {
413 throw InvalidArgumentException(
414 "MergerQueueDescriptor: Number of split windows "
415 "has to match number of workloadInfo.m_InputTensorInfos. "
416 "Number of windows: " +
417 to_string(m_ViewOrigins.size()) +
418 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
419 }
420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000422 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
423 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
424 {
telsoa01c577f2c2018-08-31 09:22:23 +0100425 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000426 ViewOrigin const& e = m_ViewOrigins[w];
427 if (e.m_Origin.size() != outputDims)
428 {
429 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
430 "have the same dimensionality as the output tensor. "
431 "Window origin (index: " +
432 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
433 " dimensions, the output "
434 "tensor has " +
435 to_string(outputDims) + " dimensions.");
436 }
telsoa01c577f2c2018-08-31 09:22:23 +0100437 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000438 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
439 {
440 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
441 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
442 {
443 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
444 "be smaller or equal than the size of the output in that coord.");
445 }
446 }
447 }
448}
449
450//---------------------------------------------------------------
451void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
452{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100453 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
454 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000455 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
456
457 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
458 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
459 {
460 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
461 }
462
463 if (m_Weight == nullptr)
464 {
465 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
466 }
467
468 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
469
470 if (m_Parameters.m_BiasEnabled)
471 {
472 if (m_Bias == nullptr)
473 {
474 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
475 "bias value tensor descriptor is missing.");
476 }
477
telsoa01c577f2c2018-08-31 09:22:23 +0100478 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000479 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
480 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
481
482 ValidateTensorDataType(m_Bias->GetTensorInfo(),
483 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
484 "FullyConnectedQueueDescriptor", "bias");
485
486 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
487 }
488
489 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
490 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
491}
492
493//---------------------------------------------------------------
494void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
495{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100496 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
497 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000498 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
499 workloadInfo.m_OutputTensorInfos[0],
500 "NormalizationQueueDescriptor",
501 "input",
502 "output");
503}
504
505void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
506{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100507 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
508 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000509
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100510 std::vector<DataType> supportedTypes = {
511 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100512 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100513 DataType::QuantisedSymm16,
514 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100515 };
516
517 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
518 supportedTypes,
519 "AdditionQueueDescriptor");
520
521 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
522 supportedTypes,
523 "AdditionQueueDescriptor");
524
525 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
526 supportedTypes,
527 "AdditionQueueDescriptor");
528
telsoa014fcda012018-03-09 14:13:49 +0000529 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
530 workloadInfo.m_InputTensorInfos[1],
531 workloadInfo.m_OutputTensorInfos[0],
532 "AdditionQueueDescriptor",
533 "first input",
534 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000535}
536
537//---------------------------------------------------------------
538void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
539{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100540 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
541 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100542
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100543 std::vector<DataType> supportedTypes = {
544 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100545 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100546 DataType::QuantisedSymm16,
547 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100548 };
549
550 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
551 supportedTypes,
552 "MultiplicationQueueDescriptor");
553
554 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
555 supportedTypes,
556 "MultiplicationQueueDescriptor");
557
558 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
559 supportedTypes,
560 "MultiplicationQueueDescriptor");
561
surmeh01bceff2f2018-03-29 16:29:27 +0100562 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
563 workloadInfo.m_InputTensorInfos[1],
564 workloadInfo.m_OutputTensorInfos[0],
565 "MultiplicationQueueDescriptor",
566 "first input",
567 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000568}
569
570void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
571{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100572 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
573 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000574 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
575 workloadInfo.m_OutputTensorInfos[0],
576 "BatchNormalizationQueueDescriptor",
577 "input",
578 "output");
579 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
580 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
581 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
582 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
583
584
585 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
586 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
587 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
588 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
589
590 ValidateTensorShapesMatch(
591 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
592 ValidateTensorShapesMatch(
593 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
594 ValidateTensorShapesMatch(
595 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
596}
597
598void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
599{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100600 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
601 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000602
603 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
604 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
605
606 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
607 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
608 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
609 "Convolution2dQueueDescriptor", "weight");
610 if (m_Parameters.m_BiasEnabled)
611 {
612 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
613 ValidateTensorDataType(m_Bias->GetTensorInfo(),
614 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
615 "Convolution2dQueueDescriptor", "bias");
616 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
617 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
618 }
619
620 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
621 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
622}
623
624void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
625{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100626 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
627 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000628
629 ValidateTensorNumDimensions(
630 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
631 ValidateTensorNumDimensions(
632 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
633
634 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
635 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
636
Nikhil Rajcec6b652018-10-12 13:51:57 +0100637 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
638
Matteo Martincigh747ef822018-12-18 09:26:39 +0000639 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
640 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000641 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000642 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100643 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000644 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
645 {
646 throw InvalidArgumentException(
647 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
648 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
649 "(provided %3%).")
650 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
651 }
652
653 if (m_Parameters.m_BiasEnabled)
654 {
655 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
656 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
657 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
658 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
659
660 ValidateTensorDataType(m_Bias->GetTensorInfo(),
661 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
662 "DepthwiseConvolution2dQueueDescriptor", "bias");
663 }
664
665 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
666 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
667}
668
669void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
670{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100671 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
672 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000673
674 const PermutationVector& mapping = m_Parameters.m_DimMappings;
675
676 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
677 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
678
679 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
680 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
681
682 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
683 {
684 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
685 {
686 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
687 " (=" + to_string(input.GetShape()[i]) + ") " +
688 "must match dst dimension " + to_string(mapping[i]) +
689 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
690 }
691 }
692}
693
694void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
695{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100696 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
697 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000698
699 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
700 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
701}
702
703void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
704{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100705 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
706 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000707
708 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
709 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
710
telsoa01c577f2c2018-08-31 09:22:23 +0100711 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000712 {
713 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
714 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
715 if (inputBatchSize != outputBatchSize)
716 {
717 throw InvalidArgumentException(
718 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
719 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
720 }
721 }
722
723 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000724 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100725 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000726 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100727 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000728 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000729 if (inputChannelCount != outputChannelCount)
730 {
731 throw InvalidArgumentException(
732 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
733 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
734 }
735 }
736}
737
738void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
739{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100740 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
741 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000742
743 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
744 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
745 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
746 workloadInfo.m_OutputTensorInfos[0],
747 "FakeQuantizationQueueDescriptor",
748 "input",
749 "output");
750 if (m_Parameters.m_Min > m_Parameters.m_Max)
751 {
752 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
753 }
754
755}
756
757void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
758{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100759 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
760 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000761
762 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
763 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
764 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
765 workloadInfo.m_OutputTensorInfos[0],
766 "L2NormalizationQueueDescriptor",
767 "input",
768 "output");
769}
770
771void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
772{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100773 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
774 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000775
776 if (!m_LayerOutput)
777 {
778 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
779 }
780
781 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
782 workloadInfo.m_OutputTensorInfos[0],
783 "ConstantQueueDescriptor",
784 "constant",
785 "output");
786}
787
788void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
789{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100790 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
791 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000792
793 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
794 {
795 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
796 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
797 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
798 }
799}
800
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000801void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
802{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100803 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
804 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000805
806 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
807 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
808
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000809 if (m_Parameters.m_BlockShape.size() != 2)
810 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000811 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000812 }
813
814 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
815 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000816 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000817 }
818
819 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
820
821 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
822 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
823
Matthew Bentham8800c002018-11-19 13:19:28 +0000824 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
825 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000826 + heightPad.first + heightPad.second;
827
Matthew Bentham8800c002018-11-19 13:19:28 +0000828 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000829 + widthPad.first + widthPad.second;
830
831 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000832 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000833
834 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
835 {
836 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
837 to_string(numInputElements) + " after padding but output tensor has " +
838 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
839 }
840
841 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000842 {
843 throw InvalidArgumentException(
844 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
845 }
846}
847
telsoa014fcda012018-03-09 14:13:49 +0000848void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
849{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100850 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
851 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000852
853 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
854 {
855 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
856 }
857}
858
telsoa01c577f2c2018-08-31 09:22:23 +0100859void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
860{
861 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
862 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
863}
864
865void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
866{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100867 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
868 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100869
870 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
871 {
872 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
873 }
874
875 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
876 {
877 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
878 }
879
880 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
881 workloadInfo.m_OutputTensorInfos[0],
882 "ConvertFp32ToFp16QueueDescriptor",
883 "input",
884 "output");
885}
886
887void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
888{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100889 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
890 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100891
892 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
893 {
894 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
895 }
896 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
897 {
898 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
899 }
900
901 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
902 workloadInfo.m_OutputTensorInfos[0],
903 "ConvertFp16ToFp32QueueDescriptor",
904 "input",
905 "output");
906}
907
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100908void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
909{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100910 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
911 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100912
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100913 std::vector<DataType> supportedTypes = {
914 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100915 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100916 DataType::QuantisedSymm16,
917 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100918 };
919
920 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
921 supportedTypes,
922 "DivisionQueueDescriptor");
923
924 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
925 supportedTypes,
926 "DivisionQueueDescriptor");
927
928 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
929 supportedTypes,
930 "DivisionQueueDescriptor");
931
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100932 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
933 workloadInfo.m_InputTensorInfos[1],
934 workloadInfo.m_OutputTensorInfos[0],
935 "DivisionQueueDescriptor",
936 "first input",
937 "second input");
938}
939
David Beckc2044fe2018-09-05 15:00:38 +0100940void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
941{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100942 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
943 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +0100944
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100945 std::vector<DataType> supportedTypes = {
946 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100947 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100948 DataType::QuantisedSymm16,
949 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100950 };
951
952 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
953 supportedTypes,
954 "SubtractionQueueDescriptor");
955
956 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
957 supportedTypes,
958 "SubtractionQueueDescriptor");
959
960 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
961 supportedTypes,
962 "SubtractionQueueDescriptor");
963
David Beckc2044fe2018-09-05 15:00:38 +0100964 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
965 workloadInfo.m_InputTensorInfos[1],
966 workloadInfo.m_OutputTensorInfos[0],
967 "SubtractionQueueDescriptor",
968 "first input",
969 "second input");
970}
971
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000972void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
973{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100974 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
975 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000976
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100977 std::vector<DataType> supportedTypes = {
978 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100979 DataType::QuantisedAsymm8,
980 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100981 };
982
983 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
984 supportedTypes,
985 "MaximumQueueDescriptor");
986
987 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
988 supportedTypes,
989 "MaximumQueueDescriptor");
990
991 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
992 supportedTypes,
993 "MaximumQueueDescriptor");
994
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000995 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
996 workloadInfo.m_InputTensorInfos[1],
997 workloadInfo.m_OutputTensorInfos[0],
998 "MaximumQueueDescriptor",
999 "first input",
1000 "second input");
1001}
1002
narpra01a6bf9122018-09-10 09:50:09 +01001003void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1004{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001005 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1006 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001007
1008 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1009 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1010
narpra0132b90462018-09-13 11:07:48 +01001011 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001012 {
1013 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1014 }
narpra0132b90462018-09-13 11:07:48 +01001015 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001016 {
1017 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1018 }
1019 else
1020 {
narpra0132b90462018-09-13 11:07:48 +01001021 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001022 ValidateTensorNumDimensions(output,
1023 "MeanQueueDescriptor",
1024 outputDim > 0 ? outputDim : 1,
1025 "output");
1026 }
narpra01a6bf9122018-09-10 09:50:09 +01001027}
1028
jimfly012c9322a2018-09-19 10:59:49 +01001029void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1030{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001031 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1032 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001033
1034 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001035 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1036
jimfly012c9322a2018-09-19 10:59:49 +01001037 // input and output should have the same number of dimensions
1038 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1039 // there should be entry in the pad list for each dimension in the input tensor
1040 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1041 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1042 " are dimensions in the input tensor that is " +
1043 to_string(input.GetNumDimensions()) + " entries " +
1044 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1045 }
1046}
1047
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001048void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1049{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001050 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1051 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001052
1053
1054 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1055 {
1056 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1057 }
1058
1059 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1060 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1061 {
1062 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1063 }
1064}
1065
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001066void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1067{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001068 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1069 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001070}
1071
Conor Kennedy430b5d82018-11-14 15:28:28 +00001072void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1073{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001074 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1075 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001076
1077 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1078 const uint32_t rank = input.GetNumDimensions();
1079
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001080 if (rank > 4)
1081 {
1082 throw InvalidArgumentException(
1083 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1084 }
1085
Conor Kennedy430b5d82018-11-14 15:28:28 +00001086 // Begin, End & Stride length must be of rank(input0)
1087 if (m_Parameters.m_Begin.size() != rank)
1088 {
1089 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1090 + to_string(rank) + ")");
1091 }
1092
1093 if (m_Parameters.m_End.size() != rank)
1094 {
1095 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1096 + to_string(rank) + ")");
1097 }
1098
1099 if (m_Parameters.m_Stride.size() != rank)
1100 {
1101 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1102 + to_string(rank) + ")");
1103 }
1104
1105 // Stride entries must be non-zero
1106 for (auto& stride : m_Parameters.m_Stride)
1107 {
1108 if (stride == 0)
1109 {
1110 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1111 }
1112 }
1113}
1114
kevmay0190539692018-11-29 08:40:19 +00001115void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1116{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001117 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1118 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001119
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001120 std::vector<DataType> supportedTypes = {
1121 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001122 DataType::QuantisedAsymm8,
1123 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001124 };
1125
1126 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1127 supportedTypes,
1128 "MinimumQueueDescriptor");
1129
1130 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1131 supportedTypes,
1132 "MinimumQueueDescriptor");
1133
1134 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1135 supportedTypes,
1136 "MinimumQueueDescriptor");
1137
kevmay0190539692018-11-29 08:40:19 +00001138 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1139 workloadInfo.m_InputTensorInfos[1],
1140 workloadInfo.m_OutputTensorInfos[0],
1141 "MinimumQueueDescriptor",
1142 "first input",
1143 "second input");
1144}
1145
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001146void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1147{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001148 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1149 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001150}
1151
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001152void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1153{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001154 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1155 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001156
1157 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1158 workloadInfo.m_InputTensorInfos[1],
1159 workloadInfo.m_OutputTensorInfos[0],
1160 "EqualQueueDescriptor",
1161 "first input",
1162 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001163
1164 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1165 {
1166 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1167 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001168}
1169
FrancisMurtagh878f0232018-12-19 10:56:15 +00001170void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1171{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001172 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1173 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001174
1175 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1176 workloadInfo.m_InputTensorInfos[1],
1177 workloadInfo.m_OutputTensorInfos[0],
1178 "GreaterQueueDescriptor",
1179 "first input",
1180 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001181
1182 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1183 {
1184 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1185 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001186}
1187
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001188void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1189{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001190 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1191 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001192 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1193 workloadInfo.m_OutputTensorInfos[0],
1194 "RsqrtQueueDescriptor",
1195 "input",
1196 "output");
1197}
1198
narpra01b89b05f2019-01-16 09:53:09 +00001199void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1200{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001201 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1202 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001203
1204 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1205
1206 if (indices.GetDataType() != DataType::Signed32)
1207 {
1208 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1209 }
1210
1211 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1212 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1213 unsigned int paramsDim = params.GetNumDimensions();
1214 unsigned int indicesDim = indices.GetNumDimensions();
1215 unsigned int outputDim = paramsDim - 1 + indicesDim;
1216
1217 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001218}
1219
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001220void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1221{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001222 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001223
1224 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1225 {
1226 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1227 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1228 }
1229
1230 if (m_Anchors == nullptr)
1231 {
1232 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1233 }
1234
1235 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1236 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1237 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1238 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001239 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1240 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001241 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1242
1243 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1244 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1245 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1246
1247 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1248 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1249 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1250 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1251
1252 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1253 "DetectionPostProcessQueueDescriptor", "detection boxes");
1254 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1255 "DetectionPostProcessQueueDescriptor", "detection scores");
1256 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1257 "DetectionPostProcessQueueDescriptor", "detection classes");
1258 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1259 "DetectionPostProcessQueueDescriptor", "num detections");
1260
1261 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1262 {
1263 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1264 "must be positive and less than or equal to 1.");
1265 }
1266 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1267 {
1268 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1269 "should be equal to number of classes + 1.");
1270 }
1271}
1272
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001273void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1274{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001275 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1276 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001277
1278 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1279 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1280 {
1281 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1282 }
1283
1284 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1285 {
1286 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1287 }
1288}
1289
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001290void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1291{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001292 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1293 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001294
1295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1296 workloadInfo.m_InputTensorInfos[1],
1297 "MergeQueueDescriptor",
1298 "input0",
1299 "input1");
1300
1301 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1302 workloadInfo.m_OutputTensorInfos[0],
1303 "MergeQueueDescriptor",
1304 "input0",
1305 "output");
1306
1307 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1308 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1309 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1310}
1311
Sadik Armaganeff363d2019-04-05 15:25:46 +01001312void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1313{
1314 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1315 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1316
1317 std::vector<DataType> supportedTypes = {
1318 DataType::Float32,
1319 DataType::QuantisedAsymm8,
1320 DataType::QuantisedSymm16
1321 };
1322
1323 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1324 supportedTypes,
1325 "SwitchQueueDescriptor");
1326
1327 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1328 supportedTypes,
1329 "SwitchQueueDescriptor");
1330
1331 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1332 supportedTypes,
1333 "SwitchQueueDescriptor");
1334
1335 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1336 workloadInfo.m_OutputTensorInfos[0],
1337 "SwitchQueueDescriptor",
1338 "input0",
1339 "output0");
1340
1341 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1342 workloadInfo.m_OutputTensorInfos[1],
1343 "SwitchQueueDescriptor",
1344 "input0",
1345 "output1");
1346}
1347
Matteo Martincigh49124022019-01-11 13:25:59 +00001348void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1349{
1350 // This is internally generated so it should not need validation.
1351}
1352
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001353} //namespace armnn