blob: 878602391c90fd15c3f36989e7ab59fed5f3d7ba [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
Ruomei Yan88d44b82019-05-23 14:29:06 +010035 case DataType::QuantisedSymm16:
36 return DataType::Signed32;
telsoa014fcda012018-03-09 14:13:49 +000037 default:
38 BOOST_ASSERT_MSG(false, "Invalid input data type");
39 return DataType::Float32;
40 }
41}
42
43namespace
44{
45
46//---------------------------------------------------------------
47//android ndk does not support std::to_string function.
48template <typename T>
49std::string to_string(T value)
50{
51 std::ostringstream os;
52 os << value;
53 return os.str();
54}
55
56//---------------------------------------------------------------
57void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
58{
59 if (!ptr)
60 {
61 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
62 paramName + " parameter must be set.");
63 }
64}
65
66//---------------------------------------------------------------
67void ValidateTensorShapesMatch(const TensorInfo& first,
68 const TensorInfo& second,
69 std::string const& descName,
70 std::string const& firstName,
71 std::string const& secondName)
72{
73 if (first.GetShape() != second.GetShape())
74 {
75 throw InvalidArgumentException(descName + ": "
76 + firstName + " & " + secondName + " must have identical shapes");
77 }
78}
79
80//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010081void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082{
Sadik Armaganeff363d2019-04-05 15:25:46 +010083 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000084 {
85 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010086 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000087 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
88 }
89}
90
91//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010092void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093{
Sadik Armaganeff363d2019-04-05 15:25:46 +010094 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000095 {
96 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010097 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000098 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
99 }
100}
101
102//---------------------------------------------------------------
103void ValidateTensorNumDimensions(const TensorInfo& tensor,
104 std::string const& descName,
105 unsigned int numDimensions,
106 std::string const& tensorName)
107{
108 if (tensor.GetNumDimensions() != numDimensions)
109 {
110 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111 to_string(tensor.GetNumDimensions()) + " dimensions for " +
112 tensorName + " tensor.");
113 }
114}
115
116//---------------------------------------------------------------
Jan Eilers38e05bd2019-06-26 13:10:09 +0100117void ValidateTensorNumElements(const TensorInfo& tensor,
118 std::string const& descName,
119 unsigned int numElements,
120 std::string const& tensorName)
121{
122 if (tensor.GetNumElements() != numElements)
123 {
124 throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
125 to_string(tensor.GetNumDimensions()) + " elements for " +
126 tensorName + " tensor.");
127 }
128}
129
130//---------------------------------------------------------------
131void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
132 unsigned int numDimension,
133 unsigned int numElements,
134 std::string const& tensorName)
135{
136 ValidateTensorNumDimensions(tensorInfo, "ValidateTensorNumDimNumElem: NumDimensionCheck", numDimension, tensorName);
137 ValidateTensorNumElements(tensorInfo, "ValidateTensorNumDimNumElem: NumElementsCheck", numElements, tensorName);
138}
139
140//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000141void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
142 const std::string& descName, std::string const& tensorName)
143{
144 if (tensor.GetDataType() != dataType)
145 {
146 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
147 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
148 }
149}
150
151//---------------------------------------------------------------
Matteo Martincighe851b3d2019-05-28 14:31:20 +0100152void ValidateTensorQuantizationSpace(const TensorInfo& first,
153 const TensorInfo& second,
154 const std::string& descName,
155 std::string const& firstName,
156 std::string const& secondName)
157{
158 if (!first.IsQuantized() ||
159 !second.IsQuantized())
160 {
161 // Not a quantized type, ignore the validation
162 return;
163 }
164
165 DataType firstDataType = first.GetDataType();
166 DataType secondDataType = second.GetDataType();
167
168 if (firstDataType != secondDataType)
169 {
170 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
171 " must be of the same quantized type, " +
172 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
173 secondName + " is " + GetDataTypeName(secondDataType));
174 }
175
176 if (!first.IsTypeSpaceMatch(second))
177 {
178 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
179 " must have the same quantization space, " +
180 firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
181 " and scale " + to_string(first.GetQuantizationScale()) + ", " +
182 secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
183 " and scale " + to_string(second.GetQuantizationScale()));
184 }
185}
186
187//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000188void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
189 const TensorInfo& weightsTensorInfo, const std::string& descName)
190{
191 if (biasTensor.GetQuantizationOffset() != 0)
192 {
193 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
194 to_string(biasTensor.GetQuantizationOffset()));
195 }
196 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000197 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000198 {
199 // Print the float values with extra precision to see very small differences
200 std::stringstream msg;
201 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
202 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
203 biasTensor.GetQuantizationScale();
204 throw InvalidArgumentException(msg.str());
205 }
206}
207
208//---------------------------------------------------------------
209void ValidateTensors(const std::vector<ITensorHandle*>& vec,
210 unsigned int numExpected,
211 const std::string& descName,
212 const std::string& varName)
213{
214 if (vec.empty() && numExpected > 0)
215 {
216 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
217 }
218
219 for (unsigned int i = 0; i < numExpected; ++i)
220 {
221 if (!vec[i])
222 {
223 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
224 }
225 }
226}
227
228//---------------------------------------------------------------
229void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
230 const TensorInfo& second,
231 const TensorInfo& output,
232 std::string const& descName,
233 std::string const& firstName,
234 std::string const& secondName)
235{
236 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
237 // broadcasted.
238 if (first.GetNumDimensions() != second.GetNumDimensions())
239 {
240 throw InvalidArgumentException(descName + ": Tensors "
241 + firstName + " & " + secondName
242 + " must have the same number of dimensions in order to be broadcasted");
243 }
244 uint32_t numDims = first.GetNumDimensions();
245 std::vector<uint32_t> outputDims(numDims, 0u);
246 for (uint32_t i = 0; i < numDims; i++)
247 {
248 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
249 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
250 if (dimsNotEqual && dimsNotOne)
251 {
252 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
253 }
254 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
255 }
256 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
257 if (broadcastShape != output.GetShape())
258 {
259 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
260 + firstName + " & " + secondName
261 + " does not match the output shape");
262 }
263}
264
265//---------------------------------------------------------------
266/// Validates that the output tensor's quantization scale is greater than the product
267/// of the two input tensors' quantization scales. This is a requirement of the implementation of
268/// the quantized multiplication.
269void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
270 const TensorInfo& outputTensorInfo, std::string const& descName,
271 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
272{
273 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
274 {
275 if (outputTensorInfo.GetQuantizationScale() <=
276 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
277 {
278 std::stringstream msg;
279 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
280 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
281 throw InvalidArgumentException(msg.str());
282 }
283 }
284}
285
Sadik Armaganeff363d2019-04-05 15:25:46 +0100286//---------------------------------------------------------------
287void ValidateDataTypes(const TensorInfo& info,
288 const std::vector<armnn::DataType>& supportedTypes,
289 std::string const& descName)
290{
291 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
292 if (iterator == supportedTypes.end())
293 {
294 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
295 }
296}
297
James Conroy4d1ff582019-06-10 17:06:39 +0100298//---------------------------------------------------------------
299void ValidateTensorDataTypesMatch(const TensorInfo& first,
300 const TensorInfo& second,
301 std::string const& descName,
302 std::string const& firstName,
303 std::string const& secondName)
304{
305 if (first.GetDataType() != second.GetDataType())
306 {
307 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
308 " must have identical data types.");
309 }
310}
311
telsoa014fcda012018-03-09 14:13:49 +0000312} //namespace
313
314void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
315 unsigned int numExpectedIn, unsigned int numExpectedOut) const
316{
317 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
318 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
319}
320
321//---------------------------------------------------------------
322void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
323{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100324 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
325 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000326
327 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
328 {
329 throw InvalidArgumentException(boost::str(
330 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
331 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
332 }
333
334 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
335 {
336 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
337 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
338 {
339 throw InvalidArgumentException(boost::str(
340 boost::format("Number of elements for tensor input and output %1% does not match")
341 % i ));
342 }
343 }
344
345 if (m_Inputs.size() != m_Outputs.size())
346 {
347 throw InvalidArgumentException(boost::str(
348 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
349 % m_Inputs.size() % m_Outputs.size()));
350 }
351
352 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
353 {
354 if (!m_Inputs[i])
355 {
356 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
357 }
358
359 if (!m_Outputs[i])
360 {
361 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
362 }
363 }
364}
365
366//---------------------------------------------------------------
367void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
368{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100369 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
370 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000371 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
372 workloadInfo.m_OutputTensorInfos[0],
373 "ActivationQueueDescriptor",
374 "input",
375 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100376
377 std::vector<DataType> supportedTypes = {
378 DataType::Float32,
379 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100380 DataType::QuantisedAsymm8,
381 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100382 };
383
384 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
385 supportedTypes,
386 "ActivationQueueDescriptor");
387
388 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
389 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
390 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000391}
392
393//---------------------------------------------------------------
394void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
395{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100396 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
397 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000398
399 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
400 workloadInfo.m_OutputTensorInfos[0],
401 "SoftmaxQueueDescriptor",
402 "input",
403 "output");
nikraj01248683f2019-05-29 16:46:50 +0100404
405 std::vector<DataType> supportedTypes =
406 {
407 DataType::Float16,
408 DataType::Float32,
409 DataType::QuantisedAsymm8,
410 DataType::QuantisedSymm16
411 };
412
413 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
414 supportedTypes,
415 "SoftmaxQueueDescriptor");
416
417 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
418 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
419 "SoftmaxQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000420}
421
422//---------------------------------------------------------------
423void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
424{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100425 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000426
Ruomei Yan25339c32019-05-28 16:48:20 +0100427 // Check the supported data types
428 std::vector<DataType> supportedTypes =
429 {
430 DataType::Float32,
431 DataType::Float16,
432 DataType::Boolean,
433 DataType::Signed32,
434 DataType::QuantisedAsymm8,
435 DataType::QuantisedSymm16
436 };
437
438 for (unsigned long i = 0; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
439 {
440 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[i],
441 supportedTypes,
442 "SplitterQueueDescriptor");
443 }
444 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
445 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
446 "SplitterQueueDescriptor");
447
telsoa014fcda012018-03-09 14:13:49 +0000448 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
449 {
450 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
451 }
452
453 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
454 {
455 throw InvalidArgumentException(
456 "SplitterQueueDescriptor: Number of split windows "
457 "has to match number of workloadInfo.m_OutputTensorInfos. "
458 "Number of windows: " +
459 to_string(m_ViewOrigins.size()) +
460 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
461 }
462
telsoa01c577f2c2018-08-31 09:22:23 +0100463 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000464 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
465 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
466 {
telsoa01c577f2c2018-08-31 09:22:23 +0100467 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000468 ViewOrigin const& e = m_ViewOrigins[w];
469 if (e.m_Origin.size() != inputDims)
470 {
471 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
472 "have the same dimensionality as the input tensor. "
473 "Window origin (index: " +
474 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
475 " dimensions, the input "
476 "tensor has " +
477 to_string(inputDims) + " dimensions.");
478 }
479 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
480 {
481 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
482 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
483 {
484 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
485 "be smaller or equal than the size of the input in that coord.");
486 }
487 }
488 }
489}
490
491//---------------------------------------------------------------
Jim Flynne242f2d2019-05-22 14:24:13 +0100492void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
telsoa014fcda012018-03-09 14:13:49 +0000493{
Jim Flynne242f2d2019-05-22 14:24:13 +0100494 ValidateNumOutputs(workloadInfo, "ConcatQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000495
496 if (m_Inputs.size() <= 0)
497 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100498 throw InvalidArgumentException("ConcatQueueDescriptor: At least one input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000499 }
500 if (m_Outputs.size() <= 0)
501 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100502 throw InvalidArgumentException("ConcatQueueDescriptor: At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000503 }
504
505 if (workloadInfo.m_InputTensorInfos.size() <= 0)
506 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100507 throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000508 }
509 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
510 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100511 throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000512 }
513
Nikhil Raj8599a412018-11-19 14:51:07 +0000514 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
515 {
516 throw InvalidArgumentException("Invalid Concatenation Axis provided");
517 }
518
519 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
520 {
521 return;
522 }
523
telsoa014fcda012018-03-09 14:13:49 +0000524 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
525 {
526 throw InvalidArgumentException(
Jim Flynne242f2d2019-05-22 14:24:13 +0100527 "ConcatQueueDescriptor: Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000528 "has to match number of workloadInfo.m_InputTensorInfos. "
529 "Number of windows: " +
530 to_string(m_ViewOrigins.size()) +
531 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
532 }
533
telsoa01c577f2c2018-08-31 09:22:23 +0100534 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000535 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
536 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
537 {
telsoa01c577f2c2018-08-31 09:22:23 +0100538 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000539 ViewOrigin const& e = m_ViewOrigins[w];
540 if (e.m_Origin.size() != outputDims)
541 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100542 throw InvalidArgumentException("ConcatQueueDescriptor: Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000543 "have the same dimensionality as the output tensor. "
544 "Window origin (index: " +
545 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
546 " dimensions, the output "
547 "tensor has " +
548 to_string(outputDims) + " dimensions.");
549 }
telsoa01c577f2c2018-08-31 09:22:23 +0100550 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000551 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
552 {
553 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
554 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
555 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100556 throw InvalidArgumentException("ConcatQueueDescriptor: Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000557 "be smaller or equal than the size of the output in that coord.");
558 }
559 }
560 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100561
562 // Check the supported data types
563 std::vector<DataType> supportedTypes =
564 {
565 DataType::Float32,
566 DataType::Float16,
567 DataType::Boolean,
568 DataType::Signed32,
569 DataType::QuantisedAsymm8,
570 DataType::QuantisedSymm16
571 };
572
573 for (unsigned long i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
574 {
575 ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
576 supportedTypes,
Jim Flynne242f2d2019-05-22 14:24:13 +0100577 "ConcatQueueDescriptor");
Jim Flynncbb66aa2019-05-15 13:03:54 +0100578 }
579 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
580 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
Jim Flynne242f2d2019-05-22 14:24:13 +0100581 "ConcatQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000582}
583
584//---------------------------------------------------------------
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100585void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
586{
587 ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1);
588
589 if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
590 {
591 throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors.");
592 }
593
594 // All inputs must have the same shape, which is defined in parameters
595 const TensorShape& inputShape = m_Parameters.m_InputShape;
596 for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
597 {
598 if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
599 {
600 throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes "
601 "must match the defined shape.");
602 }
603 }
604
605 // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
606 // since the output tensor has an additional dimension.
607 if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
608 {
609 throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater "
610 "than the number of input dimensions.");
611 }
612
613 // Output shape must be as inferred from the input shape
614 const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
615 for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
616 {
617 if (outputShape[i] != inputShape[i])
618 {
619 throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
620 "match shape inferred from input tensor.");
621 }
622 }
623
624 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
625 {
626 throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
627 "match shape inferred from input tensor.");
628 }
629
630 for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
631 {
632 if (outputShape[i] != inputShape[i-1])
633 {
634 throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
635 "match shape inferred from input tensor.");
636 }
637 }
638
639 // Check the supported data types
640 std::vector<DataType> supportedTypes =
641 {
642 DataType::Float32,
643 DataType::Float16,
644 DataType::Boolean,
645 DataType::Signed32,
646 DataType::QuantisedAsymm8,
647 DataType::QuantisedSymm16
648 };
649
650 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
651 supportedTypes,
652 "StackQueueDescriptor");
653
654 for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
655 {
656 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
657 workloadInfo.m_InputTensorInfos[i],
658 "StackQueueDescriptor",
659 "InputTensor[0]",
660 "InputTensor[" + std::to_string(i) + "]");
661 }
662 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
663 workloadInfo.m_OutputTensorInfos[0],
664 "StackQueueDescriptor",
665 "InputTensor[0]",
666 "OutputTensor[0]");
667}
668
669//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000670void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
671{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100672 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
673 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000674 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
675
676 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
677 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
678 {
679 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
680 }
681
682 if (m_Weight == nullptr)
683 {
684 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
685 }
686
687 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
688
689 if (m_Parameters.m_BiasEnabled)
690 {
691 if (m_Bias == nullptr)
692 {
693 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
694 "bias value tensor descriptor is missing.");
695 }
696
telsoa01c577f2c2018-08-31 09:22:23 +0100697 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000698 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
699 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
700
701 ValidateTensorDataType(m_Bias->GetTensorInfo(),
702 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
703 "FullyConnectedQueueDescriptor", "bias");
704
705 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
706 }
707
708 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
709 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
Francis Murtagh46c09d02019-05-28 08:15:28 +0100710
711 // Check the supported data types
712 std::vector<DataType> supportedTypes =
713 {
714 DataType::Float32,
715 DataType::Float16,
716 DataType::QuantisedAsymm8,
717 DataType::QuantisedSymm16
718 };
719
720 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
721 supportedTypes,
722 "FullyConnectedQueueDescriptor");
723
724 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
725 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
726 "FullyConnectedQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000727}
728
729//---------------------------------------------------------------
730void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
731{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100732 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
733 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100734
735 // Check the supported data types
736 std::vector<DataType> supportedTypes =
737 {
738 DataType::Float16,
739 DataType::Float32,
Matteo Martincigh6aeb7712019-06-05 17:23:29 +0100740 DataType::QuantisedAsymm8,
741 DataType::QuantisedSymm16
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100742 };
743
744 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
745 supportedTypes,
746 "NormalizationQueueDescriptor");
747
748 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
749 { workloadInfo.m_InputTensorInfos[0].GetDataType() },
750 "NormalizationQueueDescriptor");
751
telsoa014fcda012018-03-09 14:13:49 +0000752 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
753 workloadInfo.m_OutputTensorInfos[0],
754 "NormalizationQueueDescriptor",
755 "input",
756 "output");
757}
758
759void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
760{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100761 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
762 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000763
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100764 std::vector<DataType> supportedTypes = {
765 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100766 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100767 DataType::QuantisedSymm16,
768 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100769 };
770
771 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
772 supportedTypes,
773 "AdditionQueueDescriptor");
774
775 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
776 supportedTypes,
777 "AdditionQueueDescriptor");
778
779 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
780 supportedTypes,
781 "AdditionQueueDescriptor");
782
telsoa014fcda012018-03-09 14:13:49 +0000783 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
784 workloadInfo.m_InputTensorInfos[1],
785 workloadInfo.m_OutputTensorInfos[0],
786 "AdditionQueueDescriptor",
787 "first input",
788 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000789}
790
791//---------------------------------------------------------------
792void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
793{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100794 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
795 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100796
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100797 std::vector<DataType> supportedTypes = {
798 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100799 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100800 DataType::QuantisedSymm16,
801 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100802 };
803
804 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
805 supportedTypes,
806 "MultiplicationQueueDescriptor");
807
808 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
809 supportedTypes,
810 "MultiplicationQueueDescriptor");
811
812 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
813 supportedTypes,
814 "MultiplicationQueueDescriptor");
815
surmeh01bceff2f2018-03-29 16:29:27 +0100816 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
817 workloadInfo.m_InputTensorInfos[1],
818 workloadInfo.m_OutputTensorInfos[0],
819 "MultiplicationQueueDescriptor",
820 "first input",
821 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000822}
823
824void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
825{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100826 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
827 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100828
829 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
830 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
831
832 std::vector<DataType> supportedTypes =
833 {
834 DataType::Float16,
835 DataType::Float32,
Matteo Martincighf5507132019-06-04 10:59:47 +0100836 DataType::QuantisedAsymm8,
837 DataType::QuantisedSymm16
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100838 };
839
840 ValidateDataTypes(input, supportedTypes, "BatchNormalizationQueueDescriptor");
841 ValidateDataTypes(output, supportedTypes, "BatchNormalizationQueueDescriptor");
842
843 ValidateDataTypes(output, { input.GetDataType() }, "BatchNormalizationQueueDescriptor");
844
845 ValidateTensorQuantizationSpace(input, output, "BatchNormalizationQueueDescriptor", "input", "output");
846
telsoa014fcda012018-03-09 14:13:49 +0000847 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
848 workloadInfo.m_OutputTensorInfos[0],
849 "BatchNormalizationQueueDescriptor",
850 "input",
851 "output");
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100852
853 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
telsoa014fcda012018-03-09 14:13:49 +0000854 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100855 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
856 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
telsoa014fcda012018-03-09 14:13:49 +0000857
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100858 const TensorInfo& mean = m_Mean->GetTensorInfo();
859 const TensorInfo& variance = m_Variance->GetTensorInfo();
860 const TensorInfo& beta = m_Beta->GetTensorInfo();
861 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +0000862
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100863 ValidateTensorNumDimensions(mean, "BatchNormalizationQueueDescriptor", 1, "mean");
864 ValidateTensorNumDimensions(variance, "BatchNormalizationQueueDescriptor", 1, "variance");
865 ValidateTensorNumDimensions(beta, "BatchNormalizationQueueDescriptor", 1, "beta");
866 ValidateTensorNumDimensions(gamma, "BatchNormalizationQueueDescriptor", 1, "gamma");
telsoa014fcda012018-03-09 14:13:49 +0000867
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100868 ValidateTensorShapesMatch(mean, variance, "BatchNormalizationQueueDescriptor", "mean", "variance");
869 ValidateTensorShapesMatch(mean, beta, "BatchNormalizationQueueDescriptor", "mean", "beta");
870 ValidateTensorShapesMatch(mean, gamma, "BatchNormalizationQueueDescriptor", "mean", "gamma");
telsoa014fcda012018-03-09 14:13:49 +0000871}
872
873void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
874{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100875 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
876 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000877
878 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
879 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
880
881 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
882 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
883 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
884 "Convolution2dQueueDescriptor", "weight");
885 if (m_Parameters.m_BiasEnabled)
886 {
887 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
888 ValidateTensorDataType(m_Bias->GetTensorInfo(),
889 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
890 "Convolution2dQueueDescriptor", "bias");
891 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
892 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
893 }
894
895 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
896 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
897}
898
899void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
900{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100901 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
902 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000903
904 ValidateTensorNumDimensions(
905 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
906 ValidateTensorNumDimensions(
907 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
908
909 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
910 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
911
Bruno Goncalves22972f02019-04-26 21:03:24 -0300912 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
913 {
914 throw InvalidArgumentException(
915 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: dilationX (provided %1%) "
916 "and dilationY (provided %2%) cannot be smaller than 1.")
917 % m_Parameters.m_DilationX % m_Parameters.m_DilationX));
918 }
919
Nikhil Rajcec6b652018-10-12 13:51:57 +0100920 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
921
Matteo Martincigh747ef822018-12-18 09:26:39 +0000922 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
923 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000924 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000925 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100926 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000927 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
928 {
929 throw InvalidArgumentException(
930 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
931 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
932 "(provided %3%).")
933 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
934 }
935
936 if (m_Parameters.m_BiasEnabled)
937 {
938 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
939 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
940 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
941 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
942
943 ValidateTensorDataType(m_Bias->GetTensorInfo(),
944 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
945 "DepthwiseConvolution2dQueueDescriptor", "bias");
946 }
947
948 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
949 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
Ruomei Yan88d44b82019-05-23 14:29:06 +0100950
951 // Check the supported data types
952 std::vector<DataType> supportedTypes = {
953 DataType::Float32,
954 DataType::QuantisedAsymm8,
955 DataType::QuantisedSymm16,
956 DataType::Float16
957 };
958
959 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
960 supportedTypes,
961 "DepthwiseConvolution2dQueueDescriptor");
962
963 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
964 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
965 "DepthwiseConvolution2dQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000966}
967
968void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
969{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100970 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
971 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000972
973 const PermutationVector& mapping = m_Parameters.m_DimMappings;
974
975 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
976 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
977
978 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
979 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
980
981 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
982 {
983 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
984 {
985 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
986 " (=" + to_string(input.GetShape()[i]) + ") " +
987 "must match dst dimension " + to_string(mapping[i]) +
988 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
989 }
990 }
991}
992
993void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
994{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100995 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
996 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000997
998 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
999 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
Teresa Charlina3b20472019-06-06 11:12:32 +01001000
1001 std::vector<DataType> supportedTypes =
1002 {
1003 DataType::Float32,
1004 DataType::Float16,
Teresa Charlin0434df62019-06-06 13:40:35 +01001005 DataType::QuantisedAsymm8,
1006 DataType::QuantisedSymm16
Teresa Charlina3b20472019-06-06 11:12:32 +01001007 };
1008
1009 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1010 supportedTypes,
1011 "Pooling2dQueueDescriptor");
1012
1013 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1014 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
1015 "Pooling2dQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +00001016}
1017
1018void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1019{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001020 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
1021 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +00001022
1023 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
1024 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
1025
Ellen Norris-Thompson3cb85f32019-06-17 11:32:49 +01001026 std::vector<DataType> supportedTypes =
Teresa Charlin970f43b2019-07-01 13:51:07 +01001027 {
1028 DataType::Float16,
1029 DataType::Float32,
1030 DataType::QuantisedAsymm8,
1031 DataType::QuantisedSymm16
1032 };
Ellen Norris-Thompson3cb85f32019-06-17 11:32:49 +01001033
1034 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1035 supportedTypes,
1036 "ResizeBilinearQueueDescriptor");
1037
1038 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1039 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
1040 "ResizeBilinearQueueDescriptor");
1041
telsoa01c577f2c2018-08-31 09:22:23 +01001042 // Resizes bilinear only changes width and height: batch and channel count must match.
Teresa Charlin970f43b2019-07-01 13:51:07 +01001043 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1044 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
1045 if (inputBatchSize != outputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +00001046 {
Teresa Charlin970f43b2019-07-01 13:51:07 +01001047 throw InvalidArgumentException(
1048 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
1049 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
telsoa014fcda012018-03-09 14:13:49 +00001050 }
1051
Teresa Charlin970f43b2019-07-01 13:51:07 +01001052 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1053 const unsigned int inputChannelCount =
1054 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
1055 const unsigned int outputChannelCount =
1056 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
1057 if (inputChannelCount != outputChannelCount)
telsoa014fcda012018-03-09 14:13:49 +00001058 {
Teresa Charlin970f43b2019-07-01 13:51:07 +01001059 throw InvalidArgumentException(
1060 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
1061 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
1062 }
1063}
1064
1065void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1066{
1067 ValidateNumInputs(workloadInfo, "ResizeQueueDescriptor", 1);
1068 ValidateNumOutputs(workloadInfo, "ResizeQueueDescriptor", 1);
1069
1070 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeQueueDescriptor", 4, "input");
1071 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeQueueDescriptor", 4, "output");
1072
1073 std::vector<DataType> supportedTypes =
1074 {
1075 DataType::Float16,
1076 DataType::Float32,
1077 DataType::QuantisedAsymm8,
1078 DataType::QuantisedSymm16
1079 };
1080
1081 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1082 supportedTypes,
1083 "ResizeQueueDescriptor");
1084
1085 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1086 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
1087 "ResizeQueueDescriptor");
1088
1089 // Resizes only changes width and height: batch and channel count must match.
1090 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1091 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
1092 if (inputBatchSize != outputBatchSize)
1093 {
1094 throw InvalidArgumentException(
1095 boost::str(boost::format("ResizeQueueDescriptor: Input batch size (%1%) "
1096 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
1097 }
1098
1099 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1100 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +00001101 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001102 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +00001103 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001104 if (inputChannelCount != outputChannelCount)
1105 {
1106 throw InvalidArgumentException(
1107 boost::str(boost::format("ResizeQueueDescriptor: Input channel count (%1%) "
1108 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
telsoa014fcda012018-03-09 14:13:49 +00001109 }
1110}
1111
1112void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1113{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001114 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
1115 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +00001116
1117 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
1118 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
1119 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1120 workloadInfo.m_OutputTensorInfos[0],
1121 "FakeQuantizationQueueDescriptor",
1122 "input",
1123 "output");
1124 if (m_Parameters.m_Min > m_Parameters.m_Max)
1125 {
1126 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
1127 }
1128
1129}
1130
1131void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1132{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001133 const std::string& descriptorName = "L2NormalizationQueueDescriptor";
telsoa014fcda012018-03-09 14:13:49 +00001134
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001135 ValidateNumInputs(workloadInfo, descriptorName, 1);
1136 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1137
1138 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], descriptorName, 4, "input");
1139 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], descriptorName, 4, "output");
telsoa014fcda012018-03-09 14:13:49 +00001140 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1141 workloadInfo.m_OutputTensorInfos[0],
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001142 descriptorName,
telsoa014fcda012018-03-09 14:13:49 +00001143 "input",
1144 "output");
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001145
1146 // Check the supported data types
1147 std::vector<DataType> supportedTypes =
1148 {
1149 DataType::Float32,
1150 DataType::Float16,
1151 DataType::QuantisedAsymm8,
1152 DataType::QuantisedSymm16
1153 };
1154
1155 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1156 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, descriptorName);
1157 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1158 {workloadInfo.m_InputTensorInfos[0].GetDataType()}, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001159}
1160
1161void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1162{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001163 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
1164 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +00001165
1166 if (!m_LayerOutput)
1167 {
1168 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
1169 }
1170
1171 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
1172 workloadInfo.m_OutputTensorInfos[0],
1173 "ConstantQueueDescriptor",
1174 "constant",
1175 "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +01001176
1177 // Check the supported data types
1178 std::vector<DataType> supportedTypes =
Nina Drozd2f2778f2019-05-27 10:37:05 +01001179 {
1180 DataType::Float32,
1181 DataType::Float16,
1182 DataType::Signed32,
1183 DataType::QuantisedAsymm8,
1184 DataType::QuantisedSymm16
1185 };
Nina Drozd58ef2c62019-05-16 12:09:18 +01001186
1187 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ConstantQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +00001188}
1189
1190void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1191{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001192 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
1193 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +00001194
1195 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
1196 {
1197 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
1198 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
1199 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
1200 }
Nina Drozd2f2778f2019-05-27 10:37:05 +01001201
1202 // Check the supported data types
1203 std::vector<DataType> supportedTypes =
1204 {
1205 DataType::Float32,
1206 DataType::Float16,
Nina Drozd8ed4b8c2019-05-29 10:41:04 +01001207 DataType::QuantisedAsymm8,
1208 DataType::QuantisedSymm16
Nina Drozd2f2778f2019-05-27 10:37:05 +01001209 };
1210
1211 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor");
1212 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +00001213}
1214
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001215void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1216{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001217 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
1218 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001219
1220 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
1221 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
1222
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001223 if (m_Parameters.m_BlockShape.size() != 2)
1224 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001225 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001226 }
1227
1228 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1229 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001230 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001231 }
1232
1233 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
1234
1235 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1236 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1237
Matthew Bentham8800c002018-11-19 13:19:28 +00001238 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1239 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001240 + heightPad.first + heightPad.second;
1241
Matthew Bentham8800c002018-11-19 13:19:28 +00001242 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001243 + widthPad.first + widthPad.second;
1244
1245 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +00001246 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001247
1248 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
1249 {
1250 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
1251 to_string(numInputElements) + " after padding but output tensor has " +
1252 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
1253 }
1254
1255 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001256 {
1257 throw InvalidArgumentException(
1258 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
1259 }
nikraj01120522a2019-05-31 11:33:07 +01001260
1261 std::vector<DataType> supportedTypes =
1262 {
1263 DataType::Float16,
1264 DataType::Float32,
1265 DataType::QuantisedAsymm8,
1266 DataType::QuantisedSymm16
1267 };
1268
1269 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1270 supportedTypes,
1271 "SpaceToBatchNdQueueDescriptor");
1272
1273 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1274 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
1275 "SpaceToBatchNdQueueDescriptor");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001276}
1277
Keith Davisa57eccb2019-06-14 17:33:22 +01001278void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1279{
1280 ValidateNumInputs(workloadInfo, "SpaceToDepthQueueDescriptor", 1);
1281 ValidateNumOutputs(workloadInfo, "SpaceToDepthQueueDescriptor", 1);
1282
1283 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0],
1284 "SpaceToDepthQueueDescriptor", 4, "input");
1285 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0],
1286 "SpaceToDepthQueueDescriptor", 4, "output");
1287
1288 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1289
1290 std::vector<DataType> supportedTypes =
1291 {
1292 DataType::Float32,
1293 DataType::Float16,
James Conroyd2aa85e2019-07-01 17:12:40 +01001294 DataType::QuantisedAsymm8,
1295 DataType::QuantisedSymm16
Keith Davisa57eccb2019-06-14 17:33:22 +01001296 };
1297
1298 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1299 supportedTypes,
1300 "SpaceToDepthQueueDescriptor");
1301 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1302 supportedTypes,
1303 "SpaceToDepthQueueDescriptor");
1304
1305 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
1306
1307 unsigned int numInputElements = inputShape[0]
1308 * inputShape[dimensionIndices.GetWidthIndex()]
1309 * inputShape[dimensionIndices.GetHeightIndex()]
1310 * inputShape[dimensionIndices.GetChannelsIndex()];
1311
1312 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
1313 {
1314 throw InvalidArgumentException("SpaceToDepthQueueDescriptor: Input tensor has " +
1315 to_string(numInputElements) + " but output tensor has " +
1316 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
1317 }
1318
1319 if (inputShape[dimensionIndices.GetHeightIndex()] % m_Parameters.m_BlockSize != 0 ||
1320 inputShape[dimensionIndices.GetWidthIndex()] % m_Parameters.m_BlockSize != 0)
1321 {
1322 throw InvalidArgumentException(
1323 "Input shape must be divisible by block size in all spatial dimensions");
1324 }
1325}
1326
telsoa014fcda012018-03-09 14:13:49 +00001327void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1328{
James Conroy83735b12019-05-30 16:36:59 +01001329 const std::string floorQueueDescString = "FloorQueueDescriptor";
1330
1331 ValidateNumInputs(workloadInfo, floorQueueDescString, 1);
1332 ValidateNumOutputs(workloadInfo, floorQueueDescString, 1);
1333
1334 std::vector<DataType> supportedTypes =
1335 {
James Conroyb40d7102019-06-04 12:32:09 +01001336 DataType::Float32,
1337 DataType::QuantisedSymm16
James Conroy83735b12019-05-30 16:36:59 +01001338 };
1339
1340 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, floorQueueDescString);
1341 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, floorQueueDescString);
telsoa014fcda012018-03-09 14:13:49 +00001342
1343 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
1344 {
James Conroy83735b12019-05-30 16:36:59 +01001345 throw InvalidArgumentException(floorQueueDescString + ": Input and output tensor infos do not match.");
telsoa014fcda012018-03-09 14:13:49 +00001346 }
1347}
1348
telsoa01c577f2c2018-08-31 09:22:23 +01001349void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1350{
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001351 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +01001352 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001353 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001354 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001355 };
Jan Eilers38e05bd2019-06-26 13:10:09 +01001356 // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001357
Jan Eilers38e05bd2019-06-26 13:10:09 +01001358 // check for supported type of one input and match them with all the other input and output
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001359 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1360 supportedTypes,
1361 "LstmQueueDescriptor");
Jan Eilers38e05bd2019-06-26 13:10:09 +01001362 // type matches all other inputs
1363 for (uint32_t i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1364 {
1365 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1366 workloadInfo.m_InputTensorInfos[i],
1367 "LstmQueueDescriptor",
1368 "InputTensor[0]",
1369 "InputTensor[" + std::to_string(i) + "]");
1370 }
1371 // type matches all other outputs
1372 for (uint32_t i = 0; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1373 {
1374 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1375 workloadInfo.m_OutputTensorInfos[i],
1376 "LstmQueueDescriptor",
1377 "InputTensor[0]",
1378 "OutputTensor[" + std::to_string(i) + "]");
1379 }
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001380
Jan Eilers38e05bd2019-06-26 13:10:09 +01001381 // TODO: check clipping parameter is valid
1382
1383 // Inferring batch size, number of outputs and number of cells from the inputs.
1384 // TODO: figure out if there is a way to make sure the specific inputs are at that index of workloadInfo
1385 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1386 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1387 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1388 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1389 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1390 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1391
1392 // check dimensions of all inputs and outputs
1393 if (workloadInfo.m_InputTensorInfos.size() != 3)
1394 {
1395 throw InvalidArgumentException("Invalid number of inputs.");
1396 }
1397 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1398 {
1399 throw InvalidArgumentException("Invalid number of outputs.");
1400 }
1401 // input tensor
1402 ValidateTensorNumDimNumElem( workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1403 "LstmQueueDescriptor input[0]");
1404 // outputStateInTensor
1405 ValidateTensorNumDimNumElem( workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1406 "LstmQueueDescriptor input[1]");
1407 // outputStateInTensor
1408 ValidateTensorNumDimNumElem( workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1409 "LstmQueueDescriptor input[2]");
1410 // scratchBufferTensor
1411 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1412 ValidateTensorNumDimNumElem( workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1413 "LstmQueueDescriptor output[0]");
1414 // outputStateOutTensor
1415 ValidateTensorNumDimNumElem( workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1416 "LstmQueueDescriptor output[1]");
1417 // cellStateOutTensor
1418 ValidateTensorNumDimNumElem( workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1419 "LstmQueueDescriptor output[2]");
1420 // outputTensor
1421 ValidateTensorNumDimNumElem( workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1422 "LstmQueueDescriptor output[3]");
1423
1424
1425 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1426 if ( m_InputToInputWeights )
1427 {
1428 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1429 (n_cell * n_input), "InputLayerNormWeights");
1430 }
1431
1432 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1433 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1434 (n_cell * n_input), "InputToForgetWeights");
1435
1436 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1437 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1438 (n_cell * n_input), "InputToCellWeights");
1439
1440 if ( m_RecurrentToInputWeights )
1441 {
1442 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1443 (n_cell * n_output), "RecurrentToInputWeights");
1444 }
1445
1446 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1447 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1448 (n_cell * n_output), "RecurrentToForgetWeights");
1449
1450 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1451 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1452 (n_cell * n_output), "RecurrentToCellWeights");
1453
1454 // Make sure the input-gate's parameters are either both present (regular
1455 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1456 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1457 !m_Parameters.m_CifgEnabled) ||
1458 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1459 m_Parameters.m_CifgEnabled));
1460 if (!cifg_weights_all_or_none)
1461 {
1462 throw InvalidArgumentException("Input-Gate's parameters InputToInputWeights and RecurrentToInputWeights must "
1463 "either both be present (regular LSTM) or both not present (CIFG-LSTM). In "
1464 "addition CifgEnable must be set accordingly");
1465 }
1466
1467 if ( m_CellToInputWeights )
1468 {
1469 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1470 n_cell, "CellToInputWeights");
1471 }
1472 if ( m_CellToForgetWeights )
1473 {
1474 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1475 n_cell, "CellToForgetWeights");
1476 }
1477 if ( m_CellToOutputWeights )
1478 {
1479 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1480 n_cell, "CellToOutputWeights");
1481 }
1482
1483 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1484 bool peephole_weights_all_or_none =
1485 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1486 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1487 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1488 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1489 if (!peephole_weights_all_or_none)
1490 {
1491 throw InvalidArgumentException("Invalid combination of peephole parameters");
1492 }
1493
1494 // Make sure the input gate bias is present only when not a CIFG-LSTM.
1495 if (m_Parameters.m_CifgEnabled)
1496 {
1497 if (m_InputGateBias)
1498 {
1499 throw InvalidArgumentException("InputGateBias is present and CIFG-LSTM is enabled");
1500 }
1501 }
1502 else
1503 {
1504 if (!m_InputGateBias)
1505 {
1506 throw InvalidArgumentException("If CIFG-LSTM is disabled InputGateBias must be present.");
1507 }
1508 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1509 n_cell, "InputGateBias");
1510 }
1511
1512 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1513 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1514
1515 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1516 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1517
1518 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1519 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1520
1521 if (m_ProjectionWeights)
1522 {
1523 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1524 (n_cell * n_output), "ProjectionWeights");
1525 }
1526 if (m_ProjectionBias)
1527 {
1528 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1529 }
1530
1531 // Making sure the projection tensors are consistent:
1532 // 1) If projection weight is not present, then projection bias should not be
1533 // present.
1534 // 2) If projection weight is present, then projection bias is optional.
1535 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1536 !m_Parameters.m_ProjectionEnabled)
1537 || (m_ProjectionWeights && !m_ProjectionBias &&
1538 m_Parameters.m_ProjectionEnabled)
1539 || (m_ProjectionWeights && m_ProjectionBias &&
1540 m_Parameters.m_ProjectionEnabled));
1541 if (!projecton_tensors_consistent)
1542 {
1543 throw InvalidArgumentException("Projection tensors are inconsistent.");
1544 }
1545
1546 // The four layer normalization weights either all have values or none of them have values. Additionally, if
1547 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1548 // either all have values or none of them have values. Layer normalization is used when the values of all the
1549 // layer normalization weights are present
1550 if (m_InputLayerNormWeights)
1551 {
1552 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1553 }
1554 if (m_ForgetLayerNormWeights)
1555 {
1556 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1557 }
1558 if (m_CellLayerNormWeights)
1559 {
1560 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1561 }
1562 if (m_OutputLayerNormWeights)
1563 {
1564 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1565 }
1566
1567
1568 if (m_Parameters.m_LayerNormEnabled)
1569 {
1570 if (!m_Parameters.m_CifgEnabled)
1571 {
1572 if (!m_InputLayerNormWeights)
1573 {
1574 throw InvalidArgumentException("Layer normalisation is enabled and CIFG-LSTM is disabled but "
1575 "InputLayerNormWeights are not present");
1576 }
1577 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1578 1, n_cell, "InputLayerNormWeights");
1579 }
1580 else if (m_InputLayerNormWeights)
1581 {
1582 throw InvalidArgumentException("InputLayerNormWeights are present while CIFG is enabled");
1583 }
1584
1585 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1586 "ForgetLayerNormWeights");
1587 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1588
1589 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1590 "OutputLayerNormWeights");
1591 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1592
1593 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1594 "CellLayerNormWeights");
1595 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1596 }
1597 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1598 {
1599 throw InvalidArgumentException("Layer normalisation is disabled but one or more layer normalisation weights "
1600 "are present.");
1601 }
telsoa01c577f2c2018-08-31 09:22:23 +01001602}
1603
1604void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1605{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001606 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
1607 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001608
1609 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1610 {
1611 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
1612 }
1613
1614 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
1615 {
1616 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
1617 }
1618
1619 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1620 workloadInfo.m_OutputTensorInfos[0],
1621 "ConvertFp32ToFp16QueueDescriptor",
1622 "input",
1623 "output");
1624}
1625
1626void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1627{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001628 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
1629 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001630
1631 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
1632 {
1633 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
1634 }
1635 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1636 {
1637 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
1638 }
1639
1640 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1641 workloadInfo.m_OutputTensorInfos[0],
1642 "ConvertFp16ToFp32QueueDescriptor",
1643 "input",
1644 "output");
1645}
1646
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001647void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1648{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001649 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
1650 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001651
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001652 std::vector<DataType> supportedTypes = {
1653 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001654 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001655 DataType::QuantisedSymm16,
1656 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001657 };
1658
1659 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1660 supportedTypes,
1661 "DivisionQueueDescriptor");
1662
1663 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1664 supportedTypes,
1665 "DivisionQueueDescriptor");
1666
1667 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1668 supportedTypes,
1669 "DivisionQueueDescriptor");
1670
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001671 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1672 workloadInfo.m_InputTensorInfos[1],
1673 workloadInfo.m_OutputTensorInfos[0],
1674 "DivisionQueueDescriptor",
1675 "first input",
1676 "second input");
1677}
1678
David Beckc2044fe2018-09-05 15:00:38 +01001679void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1680{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001681 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
1682 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +01001683
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001684 std::vector<DataType> supportedTypes = {
1685 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001686 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001687 DataType::QuantisedSymm16,
1688 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001689 };
1690
1691 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1692 supportedTypes,
1693 "SubtractionQueueDescriptor");
1694
1695 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1696 supportedTypes,
1697 "SubtractionQueueDescriptor");
1698
1699 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1700 supportedTypes,
1701 "SubtractionQueueDescriptor");
1702
David Beckc2044fe2018-09-05 15:00:38 +01001703 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1704 workloadInfo.m_InputTensorInfos[1],
1705 workloadInfo.m_OutputTensorInfos[0],
1706 "SubtractionQueueDescriptor",
1707 "first input",
1708 "second input");
1709}
1710
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001711void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1712{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001713 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
1714 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001715
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001716 std::vector<DataType> supportedTypes = {
1717 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001718 DataType::QuantisedAsymm8,
1719 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001720 };
1721
1722 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1723 supportedTypes,
1724 "MaximumQueueDescriptor");
1725
1726 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1727 supportedTypes,
1728 "MaximumQueueDescriptor");
1729
1730 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1731 supportedTypes,
1732 "MaximumQueueDescriptor");
1733
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001734 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1735 workloadInfo.m_InputTensorInfos[1],
1736 workloadInfo.m_OutputTensorInfos[0],
1737 "MaximumQueueDescriptor",
1738 "first input",
1739 "second input");
1740}
1741
narpra01a6bf9122018-09-10 09:50:09 +01001742void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1743{
James Conroy4d1ff582019-06-10 17:06:39 +01001744 const std::string meanQueueDescString = "MeanQueueDescriptor";
1745
1746 ValidateNumInputs(workloadInfo, meanQueueDescString, 1);
1747 ValidateNumOutputs(workloadInfo, meanQueueDescString, 1);
1748
1749 std::vector<DataType> supportedTypes =
1750 {
1751 DataType::Float32,
1752 DataType::Float16,
1753 DataType::QuantisedAsymm8,
1754 DataType::QuantisedSymm16
1755 };
narpra01eb061912018-09-10 17:35:27 +01001756
1757 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1758 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1759
James Conroy4d1ff582019-06-10 17:06:39 +01001760 // First check if input tensor data type is supported, then
1761 // check if this data type matches the output tensor data type
1762 ValidateDataTypes(input, supportedTypes, meanQueueDescString);
1763 ValidateTensorDataTypesMatch(input, output, meanQueueDescString, "input", "output");
1764
narpra0132b90462018-09-13 11:07:48 +01001765 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001766 {
James Conroy4d1ff582019-06-10 17:06:39 +01001767 ValidateTensorNumDimensions(output, meanQueueDescString, input.GetNumDimensions(), "output");
narpra01eb061912018-09-10 17:35:27 +01001768 }
narpra0132b90462018-09-13 11:07:48 +01001769 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001770 {
James Conroy4d1ff582019-06-10 17:06:39 +01001771 ValidateTensorNumDimensions(output, meanQueueDescString, 1, "output");
narpra01eb061912018-09-10 17:35:27 +01001772 }
1773 else
1774 {
narpra0132b90462018-09-13 11:07:48 +01001775 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001776 ValidateTensorNumDimensions(output,
James Conroy4d1ff582019-06-10 17:06:39 +01001777 meanQueueDescString,
narpra01eb061912018-09-10 17:35:27 +01001778 outputDim > 0 ? outputDim : 1,
1779 "output");
1780 }
narpra01a6bf9122018-09-10 09:50:09 +01001781}
1782
jimfly012c9322a2018-09-19 10:59:49 +01001783void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1784{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001785 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1786 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001787
1788 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001789 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1790
jimfly012c9322a2018-09-19 10:59:49 +01001791 // input and output should have the same number of dimensions
1792 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1793 // there should be entry in the pad list for each dimension in the input tensor
1794 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1795 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1796 " are dimensions in the input tensor that is " +
1797 to_string(input.GetNumDimensions()) + " entries " +
1798 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1799 }
1800}
1801
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001802void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1803{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001804 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1805 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001806
1807
1808 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1809 {
1810 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1811 }
1812
1813 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1814 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1815 {
1816 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1817 }
1818}
1819
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001820void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1821{
Francis Murtaghd0dfe172019-06-25 10:57:10 +01001822 const std::string batchToSpaceNdQueueDescriptorStr = "BatchToSpaceNdQueueDescriptor";
1823
1824 ValidateNumInputs(workloadInfo, batchToSpaceNdQueueDescriptorStr, 1);
1825 ValidateNumOutputs(workloadInfo, batchToSpaceNdQueueDescriptorStr, 1);
1826
1827 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1828 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1829
1830 std::vector<DataType> supportedTypes =
1831 {
1832 DataType::Float32,
1833 DataType::QuantisedAsymm8,
1834 DataType::QuantisedSymm16
1835 };
1836
1837 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1838 supportedTypes,
1839 batchToSpaceNdQueueDescriptorStr);
1840
1841 ValidateTensorDataTypesMatch(input, output, batchToSpaceNdQueueDescriptorStr, "input", "output");
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001842}
1843
Conor Kennedy430b5d82018-11-14 15:28:28 +00001844void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1845{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001846 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1847 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001848
1849 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Matteo Martincighe851b3d2019-05-28 14:31:20 +01001850 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1851
1852 std::vector<DataType> supportedTypes =
1853 {
1854 DataType::Float16,
1855 DataType::Float32,
Matteo Martincigh42666a12019-05-29 08:53:41 +01001856 DataType::QuantisedAsymm8,
1857 DataType::QuantisedSymm16
Matteo Martincighe851b3d2019-05-28 14:31:20 +01001858 };
1859
1860 ValidateDataTypes(input, supportedTypes, "StridedSliceQueueDescriptor");
1861 ValidateDataTypes(output, supportedTypes, "StridedSliceQueueDescriptor");
1862
1863 ValidateDataTypes(output, { input.GetDataType() }, "StridedSliceQueueDescriptor");
1864
1865 ValidateTensorQuantizationSpace(input, output, "StridedSliceQueueDescriptor", "input", "output");
1866
Conor Kennedy430b5d82018-11-14 15:28:28 +00001867 const uint32_t rank = input.GetNumDimensions();
1868
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001869 if (rank > 4)
1870 {
1871 throw InvalidArgumentException(
1872 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1873 }
1874
Conor Kennedy430b5d82018-11-14 15:28:28 +00001875 // Begin, End & Stride length must be of rank(input0)
1876 if (m_Parameters.m_Begin.size() != rank)
1877 {
1878 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1879 + to_string(rank) + ")");
1880 }
1881
1882 if (m_Parameters.m_End.size() != rank)
1883 {
1884 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1885 + to_string(rank) + ")");
1886 }
1887
1888 if (m_Parameters.m_Stride.size() != rank)
1889 {
1890 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1891 + to_string(rank) + ")");
1892 }
1893
1894 // Stride entries must be non-zero
1895 for (auto& stride : m_Parameters.m_Stride)
1896 {
1897 if (stride == 0)
1898 {
1899 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1900 }
1901 }
1902}
1903
kevmay0190539692018-11-29 08:40:19 +00001904void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1905{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001906 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1907 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001908
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001909 std::vector<DataType> supportedTypes = {
1910 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001911 DataType::QuantisedAsymm8,
1912 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001913 };
1914
1915 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1916 supportedTypes,
1917 "MinimumQueueDescriptor");
1918
1919 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1920 supportedTypes,
1921 "MinimumQueueDescriptor");
1922
1923 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1924 supportedTypes,
1925 "MinimumQueueDescriptor");
1926
kevmay0190539692018-11-29 08:40:19 +00001927 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1928 workloadInfo.m_InputTensorInfos[1],
1929 workloadInfo.m_OutputTensorInfos[0],
1930 "MinimumQueueDescriptor",
1931 "first input",
1932 "second input");
1933}
1934
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001935void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1936{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001937 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1938 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001939}
1940
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001941void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1942{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001943 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1944 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001945
1946 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1947 workloadInfo.m_InputTensorInfos[1],
1948 workloadInfo.m_OutputTensorInfos[0],
1949 "EqualQueueDescriptor",
1950 "first input",
1951 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001952
1953 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1954 {
1955 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1956 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001957}
1958
FrancisMurtagh878f0232018-12-19 10:56:15 +00001959void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1960{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001961 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1962 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001963
1964 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1965 workloadInfo.m_InputTensorInfos[1],
1966 workloadInfo.m_OutputTensorInfos[0],
1967 "GreaterQueueDescriptor",
1968 "first input",
1969 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001970
1971 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1972 {
1973 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1974 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001975}
1976
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001977void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1978{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001979 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1980 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001981 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1982 workloadInfo.m_OutputTensorInfos[0],
1983 "RsqrtQueueDescriptor",
1984 "input",
1985 "output");
nikraj010421e7f2019-06-14 09:40:34 +01001986
1987 std::vector<DataType> supportedTypes =
1988 {
1989 DataType::Float16,
1990 DataType::Float32,
nikraj0124d73212019-06-14 14:20:40 +01001991 DataType::QuantisedAsymm8,
1992 DataType::QuantisedSymm16
nikraj010421e7f2019-06-14 09:40:34 +01001993 };
1994
1995 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1996 supportedTypes,
1997 "RsqrtQueueDescriptor");
1998
1999 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
2000 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
2001 "RsqrtQueueDescriptor");
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002002}
2003
narpra01b89b05f2019-01-16 09:53:09 +00002004void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2005{
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002006 const std::string GatherQueueDescriptorStr = "GatherQueueDescriptor";
2007
2008 ValidateNumInputs(workloadInfo, GatherQueueDescriptorStr, 2);
2009 ValidateNumOutputs(workloadInfo, GatherQueueDescriptorStr, 1);
narpra014951d842019-01-18 16:53:53 +00002010
2011 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
2012
2013 if (indices.GetDataType() != DataType::Signed32)
2014 {
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002015 throw InvalidArgumentException(GatherQueueDescriptorStr + ": Indices tensor type must be int32.");
narpra014951d842019-01-18 16:53:53 +00002016 }
2017
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002018 std::vector<DataType> supportedTypes =
2019 {
2020 DataType::Float16,
2021 DataType::Float32,
2022 DataType::QuantisedAsymm8,
2023 DataType::QuantisedSymm16
2024 };
2025
2026 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
2027 supportedTypes,
2028 GatherQueueDescriptorStr);
2029
2030 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
2031 workloadInfo.m_OutputTensorInfos[0],
2032 GatherQueueDescriptorStr, "Input", "Output");
2033
narpra014951d842019-01-18 16:53:53 +00002034 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
2035 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
2036 unsigned int paramsDim = params.GetNumDimensions();
2037 unsigned int indicesDim = indices.GetNumDimensions();
2038 unsigned int outputDim = paramsDim - 1 + indicesDim;
2039
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002040 ValidateTensorNumDimensions(output, GatherQueueDescriptorStr, outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00002041}
2042
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002043void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2044{
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002045 const std::string& descriptorName = " DetectionPostProcessQueueDescriptor";
2046 ValidateNumInputs(workloadInfo, descriptorName, 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002047
2048 if (workloadInfo.m_OutputTensorInfos.size() != 4)
2049 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002050 throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002051 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2052 }
2053
2054 if (m_Anchors == nullptr)
2055 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002056 throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002057 }
2058
2059 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002060 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2061 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2062
2063 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002064 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002065 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2066 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002067
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002068 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2069 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2070 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002071
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002072 const std::vector<DataType> supportedInputTypes =
2073 {
2074 DataType::Float32,
2075 DataType::QuantisedAsymm8,
2076 DataType::QuantisedSymm16
2077 };
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002078
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002079 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2080 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2081 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2082
2083 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2084 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2085 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2086 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2087
2088 // NOTE: Output is always Float32 regardless of input type
2089 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2090 ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2091 ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2092 ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002093
2094 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2095 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002096 throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002097 "must be positive and less than or equal to 1.");
2098 }
2099 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2100 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002101 throw InvalidArgumentException(descriptorName + ": Number of classes with background "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002102 "should be equal to number of classes + 1.");
2103 }
2104}
2105
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002106void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2107{
Sadik Armaganeff363d2019-04-05 15:25:46 +01002108 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
2109 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002110
2111 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
2112 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
2113 {
2114 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
2115 }
2116
2117 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
2118 {
2119 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
2120 }
2121}
2122
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002123void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2124{
Sadik Armaganeff363d2019-04-05 15:25:46 +01002125 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
2126 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002127
2128 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
2129 workloadInfo.m_InputTensorInfos[1],
2130 "MergeQueueDescriptor",
2131 "input0",
2132 "input1");
2133
2134 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
2135 workloadInfo.m_OutputTensorInfos[0],
2136 "MergeQueueDescriptor",
2137 "input0",
2138 "output");
2139
2140 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
2141 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
2142 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
2143}
2144
Sadik Armaganeff363d2019-04-05 15:25:46 +01002145void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2146{
2147 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
2148 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
2149
2150 std::vector<DataType> supportedTypes = {
2151 DataType::Float32,
2152 DataType::QuantisedAsymm8,
2153 DataType::QuantisedSymm16
2154 };
2155
2156 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
2157 supportedTypes,
2158 "SwitchQueueDescriptor");
2159
2160 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
2161 supportedTypes,
2162 "SwitchQueueDescriptor");
2163
2164 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
2165 supportedTypes,
2166 "SwitchQueueDescriptor");
2167
2168 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
2169 workloadInfo.m_OutputTensorInfos[0],
2170 "SwitchQueueDescriptor",
2171 "input0",
2172 "output0");
2173
2174 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
2175 workloadInfo.m_OutputTensorInfos[1],
2176 "SwitchQueueDescriptor",
2177 "input0",
2178 "output1");
2179}
2180
Matteo Martincigh49124022019-01-11 13:25:59 +00002181void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2182{
2183 // This is internally generated so it should not need validation.
2184}
2185
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002186void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2187{
2188 ValidateNumInputs(workloadInfo, "PreluQueueDescriptor", 2);
2189 ValidateNumOutputs(workloadInfo, "PreluQueueDescriptor", 1);
2190
2191 std::vector<DataType> supportedTypes
2192 {
2193 DataType::Float16,
2194 DataType::Float32,
Matteo Martincighab9e5252019-06-13 17:27:46 +01002195 DataType::QuantisedAsymm8,
2196 DataType::QuantisedSymm16
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002197 };
2198
2199 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
2200 supportedTypes,
2201 "PreluQueueDescriptor");
2202
2203 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
2204 supportedTypes,
2205 "PreluQueueDescriptor");
2206
2207 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
2208 supportedTypes,
2209 "PreluQueueDescriptor");
2210
2211 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
2212 { workloadInfo.m_InputTensorInfos[1].GetDataType() },
2213 "PreluQueueDescriptor");
2214
2215 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
2216 { workloadInfo.m_OutputTensorInfos[0].GetDataType() },
2217 "PreluQueueDescriptor");
2218
2219 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
2220 workloadInfo.m_InputTensorInfos[1],
2221 workloadInfo.m_OutputTensorInfos[0],
2222 "PreluQueueDescriptor",
2223 "input",
2224 "alpha");
2225}
2226
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002227void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2228{
2229 const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2230
2231 ValidateNumInputs(workloadInfo, descriptorName, 1);
2232 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2233
2234 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], descriptorName, 4, "input");
2235 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], descriptorName, 4, "output");
2236
2237 ValidatePointer(m_Weight, descriptorName, "weight");
2238 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), descriptorName, 4, "weight");
2239
2240 ValidateTensorDataType(m_Weight->GetTensorInfo(),
2241 workloadInfo.m_InputTensorInfos[0].GetDataType(),
2242 descriptorName,
2243 "weight");
2244
2245 if (m_Parameters.m_BiasEnabled)
2246 {
2247 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), descriptorName, 1, "bias");
2248
2249 ValidateTensorDataType(m_Bias->GetTensorInfo(),
2250 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
2251 descriptorName, "bias");
2252
2253 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
2254 workloadInfo.m_InputTensorInfos[0],
2255 m_Weight->GetTensorInfo(),
2256 descriptorName);
2257 }
2258
2259 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0],
2260 m_Weight->GetTensorInfo(),
2261 workloadInfo.m_OutputTensorInfos[0],
2262 descriptorName,
2263 "input",
2264 "weights",
2265 "output");
2266}
2267
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00002268} //namespace armnn