blob: d9779e4e37272bb0cc7108c279cc8add52ad75ba [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
Ruomei Yan88d44b82019-05-23 14:29:06 +010035 case DataType::QuantisedSymm16:
36 return DataType::Signed32;
telsoa014fcda012018-03-09 14:13:49 +000037 default:
38 BOOST_ASSERT_MSG(false, "Invalid input data type");
39 return DataType::Float32;
40 }
41}
42
43namespace
44{
45
46//---------------------------------------------------------------
47//android ndk does not support std::to_string function.
48template <typename T>
49std::string to_string(T value)
50{
51 std::ostringstream os;
52 os << value;
53 return os.str();
54}
55
56//---------------------------------------------------------------
57void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
58{
59 if (!ptr)
60 {
61 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
62 paramName + " parameter must be set.");
63 }
64}
65
66//---------------------------------------------------------------
67void ValidateTensorShapesMatch(const TensorInfo& first,
68 const TensorInfo& second,
69 std::string const& descName,
70 std::string const& firstName,
71 std::string const& secondName)
72{
73 if (first.GetShape() != second.GetShape())
74 {
75 throw InvalidArgumentException(descName + ": "
76 + firstName + " & " + secondName + " must have identical shapes");
77 }
78}
79
80//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010081void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082{
Sadik Armaganeff363d2019-04-05 15:25:46 +010083 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000084 {
85 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010086 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000087 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
88 }
89}
90
91//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010092void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093{
Sadik Armaganeff363d2019-04-05 15:25:46 +010094 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000095 {
96 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010097 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000098 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
99 }
100}
101
102//---------------------------------------------------------------
103void ValidateTensorNumDimensions(const TensorInfo& tensor,
104 std::string const& descName,
105 unsigned int numDimensions,
106 std::string const& tensorName)
107{
108 if (tensor.GetNumDimensions() != numDimensions)
109 {
110 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111 to_string(tensor.GetNumDimensions()) + " dimensions for " +
112 tensorName + " tensor.");
113 }
114}
115
116//---------------------------------------------------------------
117void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
118 const std::string& descName, std::string const& tensorName)
119{
120 if (tensor.GetDataType() != dataType)
121 {
122 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
123 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
124 }
125}
126
127//---------------------------------------------------------------
128void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
129 const TensorInfo& weightsTensorInfo, const std::string& descName)
130{
131 if (biasTensor.GetQuantizationOffset() != 0)
132 {
133 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
134 to_string(biasTensor.GetQuantizationOffset()));
135 }
136 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000137 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000138 {
139 // Print the float values with extra precision to see very small differences
140 std::stringstream msg;
141 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
142 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
143 biasTensor.GetQuantizationScale();
144 throw InvalidArgumentException(msg.str());
145 }
146}
147
148//---------------------------------------------------------------
149void ValidateTensors(const std::vector<ITensorHandle*>& vec,
150 unsigned int numExpected,
151 const std::string& descName,
152 const std::string& varName)
153{
154 if (vec.empty() && numExpected > 0)
155 {
156 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
157 }
158
159 for (unsigned int i = 0; i < numExpected; ++i)
160 {
161 if (!vec[i])
162 {
163 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
164 }
165 }
166}
167
168//---------------------------------------------------------------
169void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
170 const TensorInfo& second,
171 const TensorInfo& output,
172 std::string const& descName,
173 std::string const& firstName,
174 std::string const& secondName)
175{
176 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
177 // broadcasted.
178 if (first.GetNumDimensions() != second.GetNumDimensions())
179 {
180 throw InvalidArgumentException(descName + ": Tensors "
181 + firstName + " & " + secondName
182 + " must have the same number of dimensions in order to be broadcasted");
183 }
184 uint32_t numDims = first.GetNumDimensions();
185 std::vector<uint32_t> outputDims(numDims, 0u);
186 for (uint32_t i = 0; i < numDims; i++)
187 {
188 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
189 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
190 if (dimsNotEqual && dimsNotOne)
191 {
192 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
193 }
194 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
195 }
196 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
197 if (broadcastShape != output.GetShape())
198 {
199 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
200 + firstName + " & " + secondName
201 + " does not match the output shape");
202 }
203}
204
205//---------------------------------------------------------------
206/// Validates that the output tensor's quantization scale is greater than the product
207/// of the two input tensors' quantization scales. This is a requirement of the implementation of
208/// the quantized multiplication.
209void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
210 const TensorInfo& outputTensorInfo, std::string const& descName,
211 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
212{
213 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
214 {
215 if (outputTensorInfo.GetQuantizationScale() <=
216 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
217 {
218 std::stringstream msg;
219 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
220 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
221 throw InvalidArgumentException(msg.str());
222 }
223 }
224}
225
Sadik Armaganeff363d2019-04-05 15:25:46 +0100226//---------------------------------------------------------------
227void ValidateDataTypes(const TensorInfo& info,
228 const std::vector<armnn::DataType>& supportedTypes,
229 std::string const& descName)
230{
231 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
232 if (iterator == supportedTypes.end())
233 {
234 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
235 }
236}
237
telsoa014fcda012018-03-09 14:13:49 +0000238} //namespace
239
240void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
241 unsigned int numExpectedIn, unsigned int numExpectedOut) const
242{
243 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
244 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
245}
246
247//---------------------------------------------------------------
248void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
249{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100250 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
251 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000252
253 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
254 {
255 throw InvalidArgumentException(boost::str(
256 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
257 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
258 }
259
260 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
261 {
262 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
263 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
264 {
265 throw InvalidArgumentException(boost::str(
266 boost::format("Number of elements for tensor input and output %1% does not match")
267 % i ));
268 }
269 }
270
271 if (m_Inputs.size() != m_Outputs.size())
272 {
273 throw InvalidArgumentException(boost::str(
274 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
275 % m_Inputs.size() % m_Outputs.size()));
276 }
277
278 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
279 {
280 if (!m_Inputs[i])
281 {
282 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
283 }
284
285 if (!m_Outputs[i])
286 {
287 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
288 }
289 }
290}
291
292//---------------------------------------------------------------
293void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
294{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100295 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
296 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000297 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
298 workloadInfo.m_OutputTensorInfos[0],
299 "ActivationQueueDescriptor",
300 "input",
301 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100302
303 std::vector<DataType> supportedTypes = {
304 DataType::Float32,
305 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100306 DataType::QuantisedAsymm8,
307 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100308 };
309
310 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
311 supportedTypes,
312 "ActivationQueueDescriptor");
313
314 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
315 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
316 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319//---------------------------------------------------------------
320void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
321{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100322 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
323 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000324
325 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
326 workloadInfo.m_OutputTensorInfos[0],
327 "SoftmaxQueueDescriptor",
328 "input",
329 "output");
330}
331
332//---------------------------------------------------------------
333void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
334{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100335 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000336
337 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
338 {
339 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
340 }
341
342 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
343 {
344 throw InvalidArgumentException(
345 "SplitterQueueDescriptor: Number of split windows "
346 "has to match number of workloadInfo.m_OutputTensorInfos. "
347 "Number of windows: " +
348 to_string(m_ViewOrigins.size()) +
349 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
350 }
351
telsoa01c577f2c2018-08-31 09:22:23 +0100352 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000353 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
354 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
355 {
telsoa01c577f2c2018-08-31 09:22:23 +0100356 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000357 ViewOrigin const& e = m_ViewOrigins[w];
358 if (e.m_Origin.size() != inputDims)
359 {
360 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
361 "have the same dimensionality as the input tensor. "
362 "Window origin (index: " +
363 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
364 " dimensions, the input "
365 "tensor has " +
366 to_string(inputDims) + " dimensions.");
367 }
368 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
369 {
370 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
371 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
372 {
373 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
374 "be smaller or equal than the size of the input in that coord.");
375 }
376 }
377 }
378}
379
380//---------------------------------------------------------------
381void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
382{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100383 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000384
385 if (m_Inputs.size() <= 0)
386 {
387 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
388 }
389 if (m_Outputs.size() <= 0)
390 {
391 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
392 }
393
394 if (workloadInfo.m_InputTensorInfos.size() <= 0)
395 {
396 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
397 }
398 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
399 {
400 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
401 }
402
Nikhil Raj8599a412018-11-19 14:51:07 +0000403 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
404 {
405 throw InvalidArgumentException("Invalid Concatenation Axis provided");
406 }
407
408 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
409 {
410 return;
411 }
412
telsoa014fcda012018-03-09 14:13:49 +0000413 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
414 {
415 throw InvalidArgumentException(
416 "MergerQueueDescriptor: Number of split windows "
417 "has to match number of workloadInfo.m_InputTensorInfos. "
418 "Number of windows: " +
419 to_string(m_ViewOrigins.size()) +
420 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
421 }
422
telsoa01c577f2c2018-08-31 09:22:23 +0100423 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000424 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
425 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
426 {
telsoa01c577f2c2018-08-31 09:22:23 +0100427 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000428 ViewOrigin const& e = m_ViewOrigins[w];
429 if (e.m_Origin.size() != outputDims)
430 {
431 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
432 "have the same dimensionality as the output tensor. "
433 "Window origin (index: " +
434 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
435 " dimensions, the output "
436 "tensor has " +
437 to_string(outputDims) + " dimensions.");
438 }
telsoa01c577f2c2018-08-31 09:22:23 +0100439 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000440 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
441 {
442 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
443 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
444 {
445 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
446 "be smaller or equal than the size of the output in that coord.");
447 }
448 }
449 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100450
451 // Check the supported data types
452 std::vector<DataType> supportedTypes =
453 {
454 DataType::Float32,
455 DataType::Float16,
456 DataType::Boolean,
457 DataType::Signed32,
458 DataType::QuantisedAsymm8,
459 DataType::QuantisedSymm16
460 };
461
462 for (unsigned long i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
463 {
464 ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
465 supportedTypes,
466 "MergerQueueDescriptor");
467 }
468 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
469 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
470 "MergerQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000471}
472
473//---------------------------------------------------------------
474void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
475{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100476 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
477 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000478 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
479
480 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
481 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
482 {
483 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
484 }
485
486 if (m_Weight == nullptr)
487 {
488 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
489 }
490
491 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
492
493 if (m_Parameters.m_BiasEnabled)
494 {
495 if (m_Bias == nullptr)
496 {
497 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
498 "bias value tensor descriptor is missing.");
499 }
500
telsoa01c577f2c2018-08-31 09:22:23 +0100501 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000502 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
503 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
504
505 ValidateTensorDataType(m_Bias->GetTensorInfo(),
506 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
507 "FullyConnectedQueueDescriptor", "bias");
508
509 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
510 }
511
512 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
513 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
Francis Murtagh46c09d02019-05-28 08:15:28 +0100514
515 // Check the supported data types
516 std::vector<DataType> supportedTypes =
517 {
518 DataType::Float32,
519 DataType::Float16,
520 DataType::QuantisedAsymm8,
521 DataType::QuantisedSymm16
522 };
523
524 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
525 supportedTypes,
526 "FullyConnectedQueueDescriptor");
527
528 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
529 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
530 "FullyConnectedQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000531}
532
533//---------------------------------------------------------------
534void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
535{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100536 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
537 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000538 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
539 workloadInfo.m_OutputTensorInfos[0],
540 "NormalizationQueueDescriptor",
541 "input",
542 "output");
543}
544
545void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
546{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100547 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
548 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000549
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100550 std::vector<DataType> supportedTypes = {
551 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100552 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100553 DataType::QuantisedSymm16,
554 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100555 };
556
557 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
558 supportedTypes,
559 "AdditionQueueDescriptor");
560
561 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
562 supportedTypes,
563 "AdditionQueueDescriptor");
564
565 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
566 supportedTypes,
567 "AdditionQueueDescriptor");
568
telsoa014fcda012018-03-09 14:13:49 +0000569 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
570 workloadInfo.m_InputTensorInfos[1],
571 workloadInfo.m_OutputTensorInfos[0],
572 "AdditionQueueDescriptor",
573 "first input",
574 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000575}
576
577//---------------------------------------------------------------
578void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
579{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100580 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
581 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100582
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100583 std::vector<DataType> supportedTypes = {
584 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100585 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100586 DataType::QuantisedSymm16,
587 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100588 };
589
590 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
591 supportedTypes,
592 "MultiplicationQueueDescriptor");
593
594 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
595 supportedTypes,
596 "MultiplicationQueueDescriptor");
597
598 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
599 supportedTypes,
600 "MultiplicationQueueDescriptor");
601
surmeh01bceff2f2018-03-29 16:29:27 +0100602 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
603 workloadInfo.m_InputTensorInfos[1],
604 workloadInfo.m_OutputTensorInfos[0],
605 "MultiplicationQueueDescriptor",
606 "first input",
607 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000608}
609
610void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
611{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100612 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
613 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000614 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
615 workloadInfo.m_OutputTensorInfos[0],
616 "BatchNormalizationQueueDescriptor",
617 "input",
618 "output");
619 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
620 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
621 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
622 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
623
624
625 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
626 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
627 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
628 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
629
630 ValidateTensorShapesMatch(
631 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
632 ValidateTensorShapesMatch(
633 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
634 ValidateTensorShapesMatch(
635 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
636}
637
638void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
639{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100640 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
641 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000642
643 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
644 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
645
646 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
647 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
648 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
649 "Convolution2dQueueDescriptor", "weight");
650 if (m_Parameters.m_BiasEnabled)
651 {
652 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
653 ValidateTensorDataType(m_Bias->GetTensorInfo(),
654 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
655 "Convolution2dQueueDescriptor", "bias");
656 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
657 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
658 }
659
660 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
661 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
662}
663
664void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
665{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100666 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
667 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000668
669 ValidateTensorNumDimensions(
670 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
671 ValidateTensorNumDimensions(
672 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
673
674 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
675 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
676
Bruno Goncalves22972f02019-04-26 21:03:24 -0300677 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
678 {
679 throw InvalidArgumentException(
680 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: dilationX (provided %1%) "
681 "and dilationY (provided %2%) cannot be smaller than 1.")
682 % m_Parameters.m_DilationX % m_Parameters.m_DilationX));
683 }
684
Nikhil Rajcec6b652018-10-12 13:51:57 +0100685 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
686
Matteo Martincigh747ef822018-12-18 09:26:39 +0000687 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
688 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000689 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000690 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100691 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000692 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
693 {
694 throw InvalidArgumentException(
695 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
696 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
697 "(provided %3%).")
698 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
699 }
700
701 if (m_Parameters.m_BiasEnabled)
702 {
703 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
704 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
705 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
706 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
707
708 ValidateTensorDataType(m_Bias->GetTensorInfo(),
709 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
710 "DepthwiseConvolution2dQueueDescriptor", "bias");
711 }
712
713 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
714 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
Ruomei Yan88d44b82019-05-23 14:29:06 +0100715
716 // Check the supported data types
717 std::vector<DataType> supportedTypes = {
718 DataType::Float32,
719 DataType::QuantisedAsymm8,
720 DataType::QuantisedSymm16,
721 DataType::Float16
722 };
723
724 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
725 supportedTypes,
726 "DepthwiseConvolution2dQueueDescriptor");
727
728 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
729 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
730 "DepthwiseConvolution2dQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000731}
732
733void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
734{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100735 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
736 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000737
738 const PermutationVector& mapping = m_Parameters.m_DimMappings;
739
740 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
741 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
742
743 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
744 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
745
746 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
747 {
748 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
749 {
750 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
751 " (=" + to_string(input.GetShape()[i]) + ") " +
752 "must match dst dimension " + to_string(mapping[i]) +
753 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
754 }
755 }
756}
757
758void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
759{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100760 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
761 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000762
763 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
764 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
765}
766
767void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
768{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100769 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
770 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000771
772 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
773 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
774
telsoa01c577f2c2018-08-31 09:22:23 +0100775 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000776 {
777 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
778 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
779 if (inputBatchSize != outputBatchSize)
780 {
781 throw InvalidArgumentException(
782 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
783 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
784 }
785 }
786
787 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000788 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100789 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000790 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100791 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000792 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000793 if (inputChannelCount != outputChannelCount)
794 {
795 throw InvalidArgumentException(
796 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
797 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
798 }
799 }
800}
801
802void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
803{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100804 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
805 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000806
807 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
808 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
809 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
810 workloadInfo.m_OutputTensorInfos[0],
811 "FakeQuantizationQueueDescriptor",
812 "input",
813 "output");
814 if (m_Parameters.m_Min > m_Parameters.m_Max)
815 {
816 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
817 }
818
819}
820
821void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
822{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100823 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
824 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000825
826 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
827 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
828 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
829 workloadInfo.m_OutputTensorInfos[0],
830 "L2NormalizationQueueDescriptor",
831 "input",
832 "output");
833}
834
835void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
836{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100837 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
838 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000839
840 if (!m_LayerOutput)
841 {
842 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
843 }
844
845 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
846 workloadInfo.m_OutputTensorInfos[0],
847 "ConstantQueueDescriptor",
848 "constant",
849 "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +0100850
851 // Check the supported data types
852 std::vector<DataType> supportedTypes =
853 {
854 DataType::Float32,
855 DataType::Float16,
856 DataType::Signed32,
857 DataType::QuantisedAsymm8,
858 DataType::QuantisedSymm16
859 };
860
861 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ConstantQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000862}
863
864void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
865{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100866 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
867 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000868
869 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
870 {
871 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
872 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
873 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
874 }
875}
876
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000877void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
878{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100879 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
880 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000881
882 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
883 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
884
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000885 if (m_Parameters.m_BlockShape.size() != 2)
886 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000887 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000888 }
889
890 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
891 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000892 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000893 }
894
895 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
896
897 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
898 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
899
Matthew Bentham8800c002018-11-19 13:19:28 +0000900 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
901 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000902 + heightPad.first + heightPad.second;
903
Matthew Bentham8800c002018-11-19 13:19:28 +0000904 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000905 + widthPad.first + widthPad.second;
906
907 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000908 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000909
910 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
911 {
912 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
913 to_string(numInputElements) + " after padding but output tensor has " +
914 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
915 }
916
917 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000918 {
919 throw InvalidArgumentException(
920 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
921 }
922}
923
telsoa014fcda012018-03-09 14:13:49 +0000924void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
925{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100926 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
927 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000928
929 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
930 {
931 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
932 }
933}
934
telsoa01c577f2c2018-08-31 09:22:23 +0100935void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
936{
937 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
938 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100939
940 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +0100941 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100942 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100943 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100944 };
945
946 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
947 supportedTypes,
948 "LstmQueueDescriptor");
949
950 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
951 supportedTypes,
952 "LstmQueueDescriptor");
telsoa01c577f2c2018-08-31 09:22:23 +0100953}
954
955void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
956{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100957 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
958 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100959
960 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
961 {
962 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
963 }
964
965 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
966 {
967 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
968 }
969
970 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
971 workloadInfo.m_OutputTensorInfos[0],
972 "ConvertFp32ToFp16QueueDescriptor",
973 "input",
974 "output");
975}
976
977void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
978{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100979 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
980 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100981
982 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
983 {
984 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
985 }
986 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
987 {
988 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
989 }
990
991 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
992 workloadInfo.m_OutputTensorInfos[0],
993 "ConvertFp16ToFp32QueueDescriptor",
994 "input",
995 "output");
996}
997
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100998void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
999{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001000 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
1001 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001002
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001003 std::vector<DataType> supportedTypes = {
1004 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001005 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001006 DataType::QuantisedSymm16,
1007 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001008 };
1009
1010 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1011 supportedTypes,
1012 "DivisionQueueDescriptor");
1013
1014 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1015 supportedTypes,
1016 "DivisionQueueDescriptor");
1017
1018 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1019 supportedTypes,
1020 "DivisionQueueDescriptor");
1021
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001022 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1023 workloadInfo.m_InputTensorInfos[1],
1024 workloadInfo.m_OutputTensorInfos[0],
1025 "DivisionQueueDescriptor",
1026 "first input",
1027 "second input");
1028}
1029
David Beckc2044fe2018-09-05 15:00:38 +01001030void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1031{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001032 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
1033 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +01001034
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001035 std::vector<DataType> supportedTypes = {
1036 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001037 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001038 DataType::QuantisedSymm16,
1039 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001040 };
1041
1042 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1043 supportedTypes,
1044 "SubtractionQueueDescriptor");
1045
1046 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1047 supportedTypes,
1048 "SubtractionQueueDescriptor");
1049
1050 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1051 supportedTypes,
1052 "SubtractionQueueDescriptor");
1053
David Beckc2044fe2018-09-05 15:00:38 +01001054 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1055 workloadInfo.m_InputTensorInfos[1],
1056 workloadInfo.m_OutputTensorInfos[0],
1057 "SubtractionQueueDescriptor",
1058 "first input",
1059 "second input");
1060}
1061
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001062void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1063{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001064 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
1065 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001066
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001067 std::vector<DataType> supportedTypes = {
1068 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001069 DataType::QuantisedAsymm8,
1070 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001071 };
1072
1073 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1074 supportedTypes,
1075 "MaximumQueueDescriptor");
1076
1077 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1078 supportedTypes,
1079 "MaximumQueueDescriptor");
1080
1081 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1082 supportedTypes,
1083 "MaximumQueueDescriptor");
1084
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001085 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1086 workloadInfo.m_InputTensorInfos[1],
1087 workloadInfo.m_OutputTensorInfos[0],
1088 "MaximumQueueDescriptor",
1089 "first input",
1090 "second input");
1091}
1092
narpra01a6bf9122018-09-10 09:50:09 +01001093void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1094{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001095 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1096 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001097
1098 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1099 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1100
narpra0132b90462018-09-13 11:07:48 +01001101 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001102 {
1103 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1104 }
narpra0132b90462018-09-13 11:07:48 +01001105 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001106 {
1107 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1108 }
1109 else
1110 {
narpra0132b90462018-09-13 11:07:48 +01001111 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001112 ValidateTensorNumDimensions(output,
1113 "MeanQueueDescriptor",
1114 outputDim > 0 ? outputDim : 1,
1115 "output");
1116 }
narpra01a6bf9122018-09-10 09:50:09 +01001117}
1118
jimfly012c9322a2018-09-19 10:59:49 +01001119void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1120{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001121 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1122 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001123
1124 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001125 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1126
jimfly012c9322a2018-09-19 10:59:49 +01001127 // input and output should have the same number of dimensions
1128 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1129 // there should be entry in the pad list for each dimension in the input tensor
1130 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1131 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1132 " are dimensions in the input tensor that is " +
1133 to_string(input.GetNumDimensions()) + " entries " +
1134 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1135 }
1136}
1137
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001138void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1139{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001140 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1141 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001142
1143
1144 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1145 {
1146 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1147 }
1148
1149 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1150 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1151 {
1152 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1153 }
1154}
1155
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001156void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1157{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001158 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1159 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001160}
1161
Conor Kennedy430b5d82018-11-14 15:28:28 +00001162void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1163{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001164 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1165 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001166
1167 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1168 const uint32_t rank = input.GetNumDimensions();
1169
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001170 if (rank > 4)
1171 {
1172 throw InvalidArgumentException(
1173 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1174 }
1175
Conor Kennedy430b5d82018-11-14 15:28:28 +00001176 // Begin, End & Stride length must be of rank(input0)
1177 if (m_Parameters.m_Begin.size() != rank)
1178 {
1179 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1180 + to_string(rank) + ")");
1181 }
1182
1183 if (m_Parameters.m_End.size() != rank)
1184 {
1185 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1186 + to_string(rank) + ")");
1187 }
1188
1189 if (m_Parameters.m_Stride.size() != rank)
1190 {
1191 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1192 + to_string(rank) + ")");
1193 }
1194
1195 // Stride entries must be non-zero
1196 for (auto& stride : m_Parameters.m_Stride)
1197 {
1198 if (stride == 0)
1199 {
1200 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1201 }
1202 }
1203}
1204
kevmay0190539692018-11-29 08:40:19 +00001205void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1206{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001207 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1208 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001209
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001210 std::vector<DataType> supportedTypes = {
1211 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001212 DataType::QuantisedAsymm8,
1213 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001214 };
1215
1216 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1217 supportedTypes,
1218 "MinimumQueueDescriptor");
1219
1220 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1221 supportedTypes,
1222 "MinimumQueueDescriptor");
1223
1224 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1225 supportedTypes,
1226 "MinimumQueueDescriptor");
1227
kevmay0190539692018-11-29 08:40:19 +00001228 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1229 workloadInfo.m_InputTensorInfos[1],
1230 workloadInfo.m_OutputTensorInfos[0],
1231 "MinimumQueueDescriptor",
1232 "first input",
1233 "second input");
1234}
1235
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001236void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1237{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001238 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1239 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001240}
1241
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001242void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1243{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001244 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1245 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001246
1247 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1248 workloadInfo.m_InputTensorInfos[1],
1249 workloadInfo.m_OutputTensorInfos[0],
1250 "EqualQueueDescriptor",
1251 "first input",
1252 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001253
1254 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1255 {
1256 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1257 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001258}
1259
FrancisMurtagh878f0232018-12-19 10:56:15 +00001260void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1261{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001262 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1263 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001264
1265 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1266 workloadInfo.m_InputTensorInfos[1],
1267 workloadInfo.m_OutputTensorInfos[0],
1268 "GreaterQueueDescriptor",
1269 "first input",
1270 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001271
1272 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1273 {
1274 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1275 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001276}
1277
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001278void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1279{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001280 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1281 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001282 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1283 workloadInfo.m_OutputTensorInfos[0],
1284 "RsqrtQueueDescriptor",
1285 "input",
1286 "output");
1287}
1288
narpra01b89b05f2019-01-16 09:53:09 +00001289void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1290{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001291 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1292 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001293
1294 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1295
1296 if (indices.GetDataType() != DataType::Signed32)
1297 {
1298 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1299 }
1300
1301 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1302 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1303 unsigned int paramsDim = params.GetNumDimensions();
1304 unsigned int indicesDim = indices.GetNumDimensions();
1305 unsigned int outputDim = paramsDim - 1 + indicesDim;
1306
1307 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001308}
1309
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001310void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1311{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001312 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001313
1314 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1315 {
1316 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1317 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1318 }
1319
1320 if (m_Anchors == nullptr)
1321 {
1322 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1323 }
1324
1325 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1326 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1327 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1328 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001329 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1330 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001331 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1332
1333 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1334 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1335 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1336
1337 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1338 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1339 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1340 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1341
1342 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1343 "DetectionPostProcessQueueDescriptor", "detection boxes");
1344 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1345 "DetectionPostProcessQueueDescriptor", "detection scores");
1346 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1347 "DetectionPostProcessQueueDescriptor", "detection classes");
1348 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1349 "DetectionPostProcessQueueDescriptor", "num detections");
1350
1351 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1352 {
1353 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1354 "must be positive and less than or equal to 1.");
1355 }
1356 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1357 {
1358 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1359 "should be equal to number of classes + 1.");
1360 }
1361}
1362
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001363void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1364{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001365 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1366 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001367
1368 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1369 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1370 {
1371 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1372 }
1373
1374 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1375 {
1376 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1377 }
1378}
1379
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001380void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1381{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001382 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1383 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001384
1385 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1386 workloadInfo.m_InputTensorInfos[1],
1387 "MergeQueueDescriptor",
1388 "input0",
1389 "input1");
1390
1391 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1392 workloadInfo.m_OutputTensorInfos[0],
1393 "MergeQueueDescriptor",
1394 "input0",
1395 "output");
1396
1397 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1398 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1399 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1400}
1401
Sadik Armaganeff363d2019-04-05 15:25:46 +01001402void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1403{
1404 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1405 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1406
1407 std::vector<DataType> supportedTypes = {
1408 DataType::Float32,
1409 DataType::QuantisedAsymm8,
1410 DataType::QuantisedSymm16
1411 };
1412
1413 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1414 supportedTypes,
1415 "SwitchQueueDescriptor");
1416
1417 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1418 supportedTypes,
1419 "SwitchQueueDescriptor");
1420
1421 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1422 supportedTypes,
1423 "SwitchQueueDescriptor");
1424
1425 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1426 workloadInfo.m_OutputTensorInfos[0],
1427 "SwitchQueueDescriptor",
1428 "input0",
1429 "output0");
1430
1431 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1432 workloadInfo.m_OutputTensorInfos[1],
1433 "SwitchQueueDescriptor",
1434 "input0",
1435 "output1");
1436}
1437
Matteo Martincigh49124022019-01-11 13:25:59 +00001438void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1439{
1440 // This is internally generated so it should not need validation.
1441}
1442
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001443} //namespace armnn