blob: a3470ad601170663f2aa0071ed3debaa87f9c591 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 default:
36 BOOST_ASSERT_MSG(false, "Invalid input data type");
37 return DataType::Float32;
38 }
39}
40
41namespace
42{
43
44//---------------------------------------------------------------
45//android ndk does not support std::to_string function.
46template <typename T>
47std::string to_string(T value)
48{
49 std::ostringstream os;
50 os << value;
51 return os.str();
52}
53
54//---------------------------------------------------------------
55void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
56{
57 if (!ptr)
58 {
59 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
60 paramName + " parameter must be set.");
61 }
62}
63
64//---------------------------------------------------------------
65void ValidateTensorShapesMatch(const TensorInfo& first,
66 const TensorInfo& second,
67 std::string const& descName,
68 std::string const& firstName,
69 std::string const& secondName)
70{
71 if (first.GetShape() != second.GetShape())
72 {
73 throw InvalidArgumentException(descName + ": "
74 + firstName + " & " + secondName + " must have identical shapes");
75 }
76}
77
78//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010079void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000080{
Sadik Armaganeff363d2019-04-05 15:25:46 +010081 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082 {
83 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010084 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000085 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
86 }
87}
88
89//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010090void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091{
Sadik Armaganeff363d2019-04-05 15:25:46 +010092 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093 {
94 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010095 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000096 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
97 }
98}
99
100//---------------------------------------------------------------
101void ValidateTensorNumDimensions(const TensorInfo& tensor,
102 std::string const& descName,
103 unsigned int numDimensions,
104 std::string const& tensorName)
105{
106 if (tensor.GetNumDimensions() != numDimensions)
107 {
108 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
109 to_string(tensor.GetNumDimensions()) + " dimensions for " +
110 tensorName + " tensor.");
111 }
112}
113
114//---------------------------------------------------------------
115void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
116 const std::string& descName, std::string const& tensorName)
117{
118 if (tensor.GetDataType() != dataType)
119 {
120 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
121 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
122 }
123}
124
125//---------------------------------------------------------------
126void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
127 const TensorInfo& weightsTensorInfo, const std::string& descName)
128{
129 if (biasTensor.GetQuantizationOffset() != 0)
130 {
131 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
132 to_string(biasTensor.GetQuantizationOffset()));
133 }
134 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000135 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
137 // Print the float values with extra precision to see very small differences
138 std::stringstream msg;
139 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
140 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
141 biasTensor.GetQuantizationScale();
142 throw InvalidArgumentException(msg.str());
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateTensors(const std::vector<ITensorHandle*>& vec,
148 unsigned int numExpected,
149 const std::string& descName,
150 const std::string& varName)
151{
152 if (vec.empty() && numExpected > 0)
153 {
154 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
155 }
156
157 for (unsigned int i = 0; i < numExpected; ++i)
158 {
159 if (!vec[i])
160 {
161 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
162 }
163 }
164}
165
166//---------------------------------------------------------------
167void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
168 const TensorInfo& second,
169 const TensorInfo& output,
170 std::string const& descName,
171 std::string const& firstName,
172 std::string const& secondName)
173{
174 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
175 // broadcasted.
176 if (first.GetNumDimensions() != second.GetNumDimensions())
177 {
178 throw InvalidArgumentException(descName + ": Tensors "
179 + firstName + " & " + secondName
180 + " must have the same number of dimensions in order to be broadcasted");
181 }
182 uint32_t numDims = first.GetNumDimensions();
183 std::vector<uint32_t> outputDims(numDims, 0u);
184 for (uint32_t i = 0; i < numDims; i++)
185 {
186 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
187 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
188 if (dimsNotEqual && dimsNotOne)
189 {
190 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
191 }
192 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
193 }
194 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
195 if (broadcastShape != output.GetShape())
196 {
197 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
198 + firstName + " & " + secondName
199 + " does not match the output shape");
200 }
201}
202
203//---------------------------------------------------------------
204/// Validates that the output tensor's quantization scale is greater than the product
205/// of the two input tensors' quantization scales. This is a requirement of the implementation of
206/// the quantized multiplication.
207void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
208 const TensorInfo& outputTensorInfo, std::string const& descName,
209 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
210{
211 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
212 {
213 if (outputTensorInfo.GetQuantizationScale() <=
214 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
215 {
216 std::stringstream msg;
217 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
218 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
219 throw InvalidArgumentException(msg.str());
220 }
221 }
222}
223
Sadik Armaganeff363d2019-04-05 15:25:46 +0100224//---------------------------------------------------------------
225void ValidateDataTypes(const TensorInfo& info,
226 const std::vector<armnn::DataType>& supportedTypes,
227 std::string const& descName)
228{
229 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
230 if (iterator == supportedTypes.end())
231 {
232 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
233 }
234}
235
telsoa014fcda012018-03-09 14:13:49 +0000236} //namespace
237
238void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
239 unsigned int numExpectedIn, unsigned int numExpectedOut) const
240{
241 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
242 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
243}
244
245//---------------------------------------------------------------
246void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
247{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100248 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
249 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000250
251 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
252 {
253 throw InvalidArgumentException(boost::str(
254 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
255 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
256 }
257
258 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
259 {
260 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
261 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
262 {
263 throw InvalidArgumentException(boost::str(
264 boost::format("Number of elements for tensor input and output %1% does not match")
265 % i ));
266 }
267 }
268
269 if (m_Inputs.size() != m_Outputs.size())
270 {
271 throw InvalidArgumentException(boost::str(
272 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
273 % m_Inputs.size() % m_Outputs.size()));
274 }
275
276 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
277 {
278 if (!m_Inputs[i])
279 {
280 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
281 }
282
283 if (!m_Outputs[i])
284 {
285 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
286 }
287 }
288}
289
290//---------------------------------------------------------------
291void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
292{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100293 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
294 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
296 workloadInfo.m_OutputTensorInfos[0],
297 "ActivationQueueDescriptor",
298 "input",
299 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100300
301 std::vector<DataType> supportedTypes = {
302 DataType::Float32,
303 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100304 DataType::QuantisedAsymm8,
305 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100306 };
307
308 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
309 supportedTypes,
310 "ActivationQueueDescriptor");
311
312 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
313 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
314 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000315}
316
317//---------------------------------------------------------------
318void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100320 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
321 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000322
323 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
324 workloadInfo.m_OutputTensorInfos[0],
325 "SoftmaxQueueDescriptor",
326 "input",
327 "output");
328}
329
330//---------------------------------------------------------------
331void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
332{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100333 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000334
335 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
336 {
337 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
338 }
339
340 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
341 {
342 throw InvalidArgumentException(
343 "SplitterQueueDescriptor: Number of split windows "
344 "has to match number of workloadInfo.m_OutputTensorInfos. "
345 "Number of windows: " +
346 to_string(m_ViewOrigins.size()) +
347 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
348 }
349
telsoa01c577f2c2018-08-31 09:22:23 +0100350 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000351 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
352 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
353 {
telsoa01c577f2c2018-08-31 09:22:23 +0100354 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000355 ViewOrigin const& e = m_ViewOrigins[w];
356 if (e.m_Origin.size() != inputDims)
357 {
358 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
359 "have the same dimensionality as the input tensor. "
360 "Window origin (index: " +
361 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
362 " dimensions, the input "
363 "tensor has " +
364 to_string(inputDims) + " dimensions.");
365 }
366 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
367 {
368 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
369 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
370 {
371 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
372 "be smaller or equal than the size of the input in that coord.");
373 }
374 }
375 }
376}
377
378//---------------------------------------------------------------
379void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
380{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100381 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000382
383 if (m_Inputs.size() <= 0)
384 {
385 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
386 }
387 if (m_Outputs.size() <= 0)
388 {
389 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
390 }
391
392 if (workloadInfo.m_InputTensorInfos.size() <= 0)
393 {
394 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
395 }
396 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
397 {
398 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
399 }
400
Nikhil Raj8599a412018-11-19 14:51:07 +0000401 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
402 {
403 throw InvalidArgumentException("Invalid Concatenation Axis provided");
404 }
405
406 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
407 {
408 return;
409 }
410
telsoa014fcda012018-03-09 14:13:49 +0000411 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
412 {
413 throw InvalidArgumentException(
414 "MergerQueueDescriptor: Number of split windows "
415 "has to match number of workloadInfo.m_InputTensorInfos. "
416 "Number of windows: " +
417 to_string(m_ViewOrigins.size()) +
418 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
419 }
420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000422 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
423 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
424 {
telsoa01c577f2c2018-08-31 09:22:23 +0100425 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000426 ViewOrigin const& e = m_ViewOrigins[w];
427 if (e.m_Origin.size() != outputDims)
428 {
429 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
430 "have the same dimensionality as the output tensor. "
431 "Window origin (index: " +
432 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
433 " dimensions, the output "
434 "tensor has " +
435 to_string(outputDims) + " dimensions.");
436 }
telsoa01c577f2c2018-08-31 09:22:23 +0100437 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000438 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
439 {
440 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
441 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
442 {
443 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
444 "be smaller or equal than the size of the output in that coord.");
445 }
446 }
447 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100448
449 // Check the supported data types
450 std::vector<DataType> supportedTypes =
451 {
452 DataType::Float32,
453 DataType::Float16,
454 DataType::Boolean,
455 DataType::Signed32,
456 DataType::QuantisedAsymm8,
457 DataType::QuantisedSymm16
458 };
459
460 for (unsigned long i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
461 {
462 ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
463 supportedTypes,
464 "MergerQueueDescriptor");
465 }
466 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
467 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
468 "MergerQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000469}
470
471//---------------------------------------------------------------
472void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
473{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100474 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
475 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000476 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
477
478 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
479 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
480 {
481 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
482 }
483
484 if (m_Weight == nullptr)
485 {
486 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
487 }
488
489 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
490
491 if (m_Parameters.m_BiasEnabled)
492 {
493 if (m_Bias == nullptr)
494 {
495 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
496 "bias value tensor descriptor is missing.");
497 }
498
telsoa01c577f2c2018-08-31 09:22:23 +0100499 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000500 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
501 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
502
503 ValidateTensorDataType(m_Bias->GetTensorInfo(),
504 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
505 "FullyConnectedQueueDescriptor", "bias");
506
507 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
508 }
509
510 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
511 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
512}
513
514//---------------------------------------------------------------
515void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
516{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100517 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
518 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000519 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
520 workloadInfo.m_OutputTensorInfos[0],
521 "NormalizationQueueDescriptor",
522 "input",
523 "output");
524}
525
526void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
527{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100528 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
529 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000530
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100531 std::vector<DataType> supportedTypes = {
532 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100533 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100534 DataType::QuantisedSymm16,
535 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100536 };
537
538 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
539 supportedTypes,
540 "AdditionQueueDescriptor");
541
542 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
543 supportedTypes,
544 "AdditionQueueDescriptor");
545
546 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
547 supportedTypes,
548 "AdditionQueueDescriptor");
549
telsoa014fcda012018-03-09 14:13:49 +0000550 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
551 workloadInfo.m_InputTensorInfos[1],
552 workloadInfo.m_OutputTensorInfos[0],
553 "AdditionQueueDescriptor",
554 "first input",
555 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000556}
557
558//---------------------------------------------------------------
559void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
560{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100561 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
562 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100563
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100564 std::vector<DataType> supportedTypes = {
565 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100566 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100567 DataType::QuantisedSymm16,
568 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100569 };
570
571 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
572 supportedTypes,
573 "MultiplicationQueueDescriptor");
574
575 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
576 supportedTypes,
577 "MultiplicationQueueDescriptor");
578
579 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
580 supportedTypes,
581 "MultiplicationQueueDescriptor");
582
surmeh01bceff2f2018-03-29 16:29:27 +0100583 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
584 workloadInfo.m_InputTensorInfos[1],
585 workloadInfo.m_OutputTensorInfos[0],
586 "MultiplicationQueueDescriptor",
587 "first input",
588 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000589}
590
591void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
592{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100593 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
594 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000595 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
596 workloadInfo.m_OutputTensorInfos[0],
597 "BatchNormalizationQueueDescriptor",
598 "input",
599 "output");
600 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
601 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
602 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
603 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
604
605
606 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
607 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
608 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
609 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
610
611 ValidateTensorShapesMatch(
612 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
613 ValidateTensorShapesMatch(
614 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
615 ValidateTensorShapesMatch(
616 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
617}
618
619void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
620{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100621 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
622 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000623
624 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
625 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
626
627 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
628 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
629 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
630 "Convolution2dQueueDescriptor", "weight");
631 if (m_Parameters.m_BiasEnabled)
632 {
633 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
634 ValidateTensorDataType(m_Bias->GetTensorInfo(),
635 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
636 "Convolution2dQueueDescriptor", "bias");
637 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
638 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
639 }
640
641 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
642 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
643}
644
645void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
646{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100647 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
648 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000649
650 ValidateTensorNumDimensions(
651 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
652 ValidateTensorNumDimensions(
653 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
654
655 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
656 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
657
Bruno Goncalves22972f02019-04-26 21:03:24 -0300658 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
659 {
660 throw InvalidArgumentException(
661 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: dilationX (provided %1%) "
662 "and dilationY (provided %2%) cannot be smaller than 1.")
663 % m_Parameters.m_DilationX % m_Parameters.m_DilationX));
664 }
665
Nikhil Rajcec6b652018-10-12 13:51:57 +0100666 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
667
Matteo Martincigh747ef822018-12-18 09:26:39 +0000668 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
669 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000670 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000671 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100672 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000673 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
674 {
675 throw InvalidArgumentException(
676 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
677 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
678 "(provided %3%).")
679 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
680 }
681
682 if (m_Parameters.m_BiasEnabled)
683 {
684 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
685 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
686 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
687 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
688
689 ValidateTensorDataType(m_Bias->GetTensorInfo(),
690 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
691 "DepthwiseConvolution2dQueueDescriptor", "bias");
692 }
693
694 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
695 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
696}
697
698void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
699{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100700 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
701 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000702
703 const PermutationVector& mapping = m_Parameters.m_DimMappings;
704
705 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
706 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
707
708 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
709 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
710
711 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
712 {
713 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
714 {
715 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
716 " (=" + to_string(input.GetShape()[i]) + ") " +
717 "must match dst dimension " + to_string(mapping[i]) +
718 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
719 }
720 }
721}
722
723void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
724{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100725 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
726 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000727
728 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
729 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
730}
731
732void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
733{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100734 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
735 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000736
737 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
738 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
739
telsoa01c577f2c2018-08-31 09:22:23 +0100740 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000741 {
742 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
743 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
744 if (inputBatchSize != outputBatchSize)
745 {
746 throw InvalidArgumentException(
747 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
748 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
749 }
750 }
751
752 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000753 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100754 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000755 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100756 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000757 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000758 if (inputChannelCount != outputChannelCount)
759 {
760 throw InvalidArgumentException(
761 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
762 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
763 }
764 }
765}
766
767void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
768{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100769 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
770 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000771
772 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
773 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
774 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
775 workloadInfo.m_OutputTensorInfos[0],
776 "FakeQuantizationQueueDescriptor",
777 "input",
778 "output");
779 if (m_Parameters.m_Min > m_Parameters.m_Max)
780 {
781 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
782 }
783
784}
785
786void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
787{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100788 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
789 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000790
791 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
792 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
793 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
794 workloadInfo.m_OutputTensorInfos[0],
795 "L2NormalizationQueueDescriptor",
796 "input",
797 "output");
798}
799
800void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
801{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100802 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
803 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000804
805 if (!m_LayerOutput)
806 {
807 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
808 }
809
810 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
811 workloadInfo.m_OutputTensorInfos[0],
812 "ConstantQueueDescriptor",
813 "constant",
814 "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +0100815
816 // Check the supported data types
817 std::vector<DataType> supportedTypes =
818 {
819 DataType::Float32,
820 DataType::Float16,
821 DataType::Signed32,
822 DataType::QuantisedAsymm8,
823 DataType::QuantisedSymm16
824 };
825
826 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ConstantQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000827}
828
829void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
830{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100831 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
832 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000833
834 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
835 {
836 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
837 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
838 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
839 }
840}
841
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000842void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
843{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100844 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
845 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000846
847 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
848 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
849
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000850 if (m_Parameters.m_BlockShape.size() != 2)
851 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000852 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000853 }
854
855 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
856 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000857 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000858 }
859
860 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
861
862 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
863 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
864
Matthew Bentham8800c002018-11-19 13:19:28 +0000865 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
866 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000867 + heightPad.first + heightPad.second;
868
Matthew Bentham8800c002018-11-19 13:19:28 +0000869 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000870 + widthPad.first + widthPad.second;
871
872 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000873 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000874
875 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
876 {
877 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
878 to_string(numInputElements) + " after padding but output tensor has " +
879 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
880 }
881
882 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000883 {
884 throw InvalidArgumentException(
885 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
886 }
887}
888
telsoa014fcda012018-03-09 14:13:49 +0000889void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
890{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100891 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
892 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000893
894 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
895 {
896 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
897 }
898}
899
telsoa01c577f2c2018-08-31 09:22:23 +0100900void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
901{
902 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
903 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100904
905 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +0100906 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100907 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100908 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100909 };
910
911 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
912 supportedTypes,
913 "LstmQueueDescriptor");
914
915 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
916 supportedTypes,
917 "LstmQueueDescriptor");
telsoa01c577f2c2018-08-31 09:22:23 +0100918}
919
920void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
921{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100922 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
923 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100924
925 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
926 {
927 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
928 }
929
930 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
931 {
932 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
933 }
934
935 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
936 workloadInfo.m_OutputTensorInfos[0],
937 "ConvertFp32ToFp16QueueDescriptor",
938 "input",
939 "output");
940}
941
942void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
943{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100944 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
945 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100946
947 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
948 {
949 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
950 }
951 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
952 {
953 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
954 }
955
956 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
957 workloadInfo.m_OutputTensorInfos[0],
958 "ConvertFp16ToFp32QueueDescriptor",
959 "input",
960 "output");
961}
962
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100963void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
964{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100965 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
966 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100967
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100968 std::vector<DataType> supportedTypes = {
969 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100970 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100971 DataType::QuantisedSymm16,
972 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100973 };
974
975 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
976 supportedTypes,
977 "DivisionQueueDescriptor");
978
979 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
980 supportedTypes,
981 "DivisionQueueDescriptor");
982
983 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
984 supportedTypes,
985 "DivisionQueueDescriptor");
986
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100987 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
988 workloadInfo.m_InputTensorInfos[1],
989 workloadInfo.m_OutputTensorInfos[0],
990 "DivisionQueueDescriptor",
991 "first input",
992 "second input");
993}
994
David Beckc2044fe2018-09-05 15:00:38 +0100995void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
996{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100997 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
998 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +0100999
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001000 std::vector<DataType> supportedTypes = {
1001 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001002 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001003 DataType::QuantisedSymm16,
1004 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001005 };
1006
1007 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1008 supportedTypes,
1009 "SubtractionQueueDescriptor");
1010
1011 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1012 supportedTypes,
1013 "SubtractionQueueDescriptor");
1014
1015 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1016 supportedTypes,
1017 "SubtractionQueueDescriptor");
1018
David Beckc2044fe2018-09-05 15:00:38 +01001019 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1020 workloadInfo.m_InputTensorInfos[1],
1021 workloadInfo.m_OutputTensorInfos[0],
1022 "SubtractionQueueDescriptor",
1023 "first input",
1024 "second input");
1025}
1026
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001027void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1028{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001029 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
1030 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001031
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001032 std::vector<DataType> supportedTypes = {
1033 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001034 DataType::QuantisedAsymm8,
1035 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001036 };
1037
1038 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1039 supportedTypes,
1040 "MaximumQueueDescriptor");
1041
1042 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1043 supportedTypes,
1044 "MaximumQueueDescriptor");
1045
1046 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1047 supportedTypes,
1048 "MaximumQueueDescriptor");
1049
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001050 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1051 workloadInfo.m_InputTensorInfos[1],
1052 workloadInfo.m_OutputTensorInfos[0],
1053 "MaximumQueueDescriptor",
1054 "first input",
1055 "second input");
1056}
1057
narpra01a6bf9122018-09-10 09:50:09 +01001058void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1059{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001060 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1061 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001062
1063 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1064 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1065
narpra0132b90462018-09-13 11:07:48 +01001066 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001067 {
1068 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1069 }
narpra0132b90462018-09-13 11:07:48 +01001070 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001071 {
1072 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1073 }
1074 else
1075 {
narpra0132b90462018-09-13 11:07:48 +01001076 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001077 ValidateTensorNumDimensions(output,
1078 "MeanQueueDescriptor",
1079 outputDim > 0 ? outputDim : 1,
1080 "output");
1081 }
narpra01a6bf9122018-09-10 09:50:09 +01001082}
1083
jimfly012c9322a2018-09-19 10:59:49 +01001084void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1085{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001086 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1087 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001088
1089 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001090 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1091
jimfly012c9322a2018-09-19 10:59:49 +01001092 // input and output should have the same number of dimensions
1093 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1094 // there should be entry in the pad list for each dimension in the input tensor
1095 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1096 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1097 " are dimensions in the input tensor that is " +
1098 to_string(input.GetNumDimensions()) + " entries " +
1099 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1100 }
1101}
1102
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001103void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1104{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001105 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1106 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001107
1108
1109 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1110 {
1111 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1112 }
1113
1114 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1115 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1116 {
1117 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1118 }
1119}
1120
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001121void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1122{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001123 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1124 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001125}
1126
Conor Kennedy430b5d82018-11-14 15:28:28 +00001127void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1128{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001129 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1130 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001131
1132 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1133 const uint32_t rank = input.GetNumDimensions();
1134
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001135 if (rank > 4)
1136 {
1137 throw InvalidArgumentException(
1138 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1139 }
1140
Conor Kennedy430b5d82018-11-14 15:28:28 +00001141 // Begin, End & Stride length must be of rank(input0)
1142 if (m_Parameters.m_Begin.size() != rank)
1143 {
1144 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1145 + to_string(rank) + ")");
1146 }
1147
1148 if (m_Parameters.m_End.size() != rank)
1149 {
1150 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1151 + to_string(rank) + ")");
1152 }
1153
1154 if (m_Parameters.m_Stride.size() != rank)
1155 {
1156 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1157 + to_string(rank) + ")");
1158 }
1159
1160 // Stride entries must be non-zero
1161 for (auto& stride : m_Parameters.m_Stride)
1162 {
1163 if (stride == 0)
1164 {
1165 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1166 }
1167 }
1168}
1169
kevmay0190539692018-11-29 08:40:19 +00001170void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1171{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001172 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1173 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001174
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001175 std::vector<DataType> supportedTypes = {
1176 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001177 DataType::QuantisedAsymm8,
1178 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001179 };
1180
1181 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1182 supportedTypes,
1183 "MinimumQueueDescriptor");
1184
1185 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1186 supportedTypes,
1187 "MinimumQueueDescriptor");
1188
1189 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1190 supportedTypes,
1191 "MinimumQueueDescriptor");
1192
kevmay0190539692018-11-29 08:40:19 +00001193 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1194 workloadInfo.m_InputTensorInfos[1],
1195 workloadInfo.m_OutputTensorInfos[0],
1196 "MinimumQueueDescriptor",
1197 "first input",
1198 "second input");
1199}
1200
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001201void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1202{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001203 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1204 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001205}
1206
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001207void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1208{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001209 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1210 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001211
1212 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1213 workloadInfo.m_InputTensorInfos[1],
1214 workloadInfo.m_OutputTensorInfos[0],
1215 "EqualQueueDescriptor",
1216 "first input",
1217 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001218
1219 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1220 {
1221 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1222 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001223}
1224
FrancisMurtagh878f0232018-12-19 10:56:15 +00001225void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1226{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001227 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1228 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001229
1230 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1231 workloadInfo.m_InputTensorInfos[1],
1232 workloadInfo.m_OutputTensorInfos[0],
1233 "GreaterQueueDescriptor",
1234 "first input",
1235 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001236
1237 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1238 {
1239 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1240 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001241}
1242
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001243void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1244{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001245 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1246 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001247 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1248 workloadInfo.m_OutputTensorInfos[0],
1249 "RsqrtQueueDescriptor",
1250 "input",
1251 "output");
1252}
1253
narpra01b89b05f2019-01-16 09:53:09 +00001254void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1255{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001256 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1257 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001258
1259 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1260
1261 if (indices.GetDataType() != DataType::Signed32)
1262 {
1263 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1264 }
1265
1266 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1267 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1268 unsigned int paramsDim = params.GetNumDimensions();
1269 unsigned int indicesDim = indices.GetNumDimensions();
1270 unsigned int outputDim = paramsDim - 1 + indicesDim;
1271
1272 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001273}
1274
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001275void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1276{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001277 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001278
1279 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1280 {
1281 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1282 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1283 }
1284
1285 if (m_Anchors == nullptr)
1286 {
1287 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1288 }
1289
1290 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1291 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1292 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1293 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001294 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1295 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001296 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1297
1298 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1299 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1300 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1301
1302 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1303 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1304 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1305 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1306
1307 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1308 "DetectionPostProcessQueueDescriptor", "detection boxes");
1309 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1310 "DetectionPostProcessQueueDescriptor", "detection scores");
1311 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1312 "DetectionPostProcessQueueDescriptor", "detection classes");
1313 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1314 "DetectionPostProcessQueueDescriptor", "num detections");
1315
1316 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1317 {
1318 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1319 "must be positive and less than or equal to 1.");
1320 }
1321 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1322 {
1323 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1324 "should be equal to number of classes + 1.");
1325 }
1326}
1327
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001328void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1329{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001330 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1331 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001332
1333 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1334 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1335 {
1336 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1337 }
1338
1339 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1340 {
1341 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1342 }
1343}
1344
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001345void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1346{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001347 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1348 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001349
1350 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1351 workloadInfo.m_InputTensorInfos[1],
1352 "MergeQueueDescriptor",
1353 "input0",
1354 "input1");
1355
1356 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1357 workloadInfo.m_OutputTensorInfos[0],
1358 "MergeQueueDescriptor",
1359 "input0",
1360 "output");
1361
1362 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1363 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1364 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1365}
1366
Sadik Armaganeff363d2019-04-05 15:25:46 +01001367void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1368{
1369 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1370 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1371
1372 std::vector<DataType> supportedTypes = {
1373 DataType::Float32,
1374 DataType::QuantisedAsymm8,
1375 DataType::QuantisedSymm16
1376 };
1377
1378 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1379 supportedTypes,
1380 "SwitchQueueDescriptor");
1381
1382 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1383 supportedTypes,
1384 "SwitchQueueDescriptor");
1385
1386 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1387 supportedTypes,
1388 "SwitchQueueDescriptor");
1389
1390 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1391 workloadInfo.m_OutputTensorInfos[0],
1392 "SwitchQueueDescriptor",
1393 "input0",
1394 "output0");
1395
1396 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1397 workloadInfo.m_OutputTensorInfos[1],
1398 "SwitchQueueDescriptor",
1399 "input0",
1400 "output1");
1401}
1402
Matteo Martincigh49124022019-01-11 13:25:59 +00001403void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1404{
1405 // This is internally generated so it should not need validation.
1406}
1407
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001408} //namespace armnn