blob: 6a5963ddcb1279611c6ce176f6c334bf35d8500e [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
Colm Donelan0c479742021-12-10 12:43:54 +00006#include <armnn/backends/TensorHandle.hpp>
7#include <armnn/backends/WorkloadData.hpp>
8#include <armnn/backends/WorkloadInfo.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +00009#include <armnnUtils/DataLayoutIndexed.hpp>
10#include <armnnUtils/TensorUtils.hpp>
Samuel Yapdc8ed9d2022-08-08 14:07:42 +010011#include <armnnUtils/Permute.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010012#include <armnn/utility/NumericCast.hpp>
mathad01df9a3222021-04-28 11:42:57 +010013#include <armnn/Logging.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000014
telsoa014fcda012018-03-09 14:13:49 +000015#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000017#include <string>
18#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000019
James Ward47fce872020-09-10 11:57:28 +010020#include <fmt/format.h>
telsoa014fcda012018-03-09 14:13:49 +000021
Matteo Martincigh21350152018-11-28 16:22:22 +000022using namespace armnnUtils;
23
telsoa014fcda012018-03-09 14:13:49 +000024namespace armnn
25{
26
27//---------------------------------------------------------------
28DataType GetBiasDataType(DataType inputDataType)
29{
30 switch (inputDataType)
31 {
telsoa01c577f2c2018-08-31 09:22:23 +010032 case DataType::Float16:
33 return DataType::Float16;
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +000034 case DataType::BFloat16:
telsoa014fcda012018-03-09 14:13:49 +000035 case DataType::Float32:
36 return DataType::Float32;
Keith Davis0c2eeac2020-02-11 16:51:50 +000037 case DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +000038 case DataType::QAsymmU8:
Keith Davis5204aa82020-01-27 15:24:59 +000039 case DataType::QSymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +000040 case DataType::QSymmS16:
Ruomei Yan88d44b82019-05-23 14:29:06 +010041 return DataType::Signed32;
telsoa014fcda012018-03-09 14:13:49 +000042 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010043 ARMNN_ASSERT_MSG(false, "Invalid input data type");
telsoa014fcda012018-03-09 14:13:49 +000044 return DataType::Float32;
45 }
46}
47
48namespace
49{
50
51//---------------------------------------------------------------
52//android ndk does not support std::to_string function.
53template <typename T>
54std::string to_string(T value)
55{
56 std::ostringstream os;
57 os << value;
58 return os.str();
59}
60
61//---------------------------------------------------------------
62void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
63{
64 if (!ptr)
65 {
66 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
67 paramName + " parameter must be set.");
68 }
69}
70
71//---------------------------------------------------------------
72void ValidateTensorShapesMatch(const TensorInfo& first,
73 const TensorInfo& second,
74 std::string const& descName,
75 std::string const& firstName,
76 std::string const& secondName)
77{
78 if (first.GetShape() != second.GetShape())
79 {
80 throw InvalidArgumentException(descName + ": "
81 + firstName + " & " + secondName + " must have identical shapes");
82 }
83}
84
85//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010086void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000087{
Sadik Armaganeff363d2019-04-05 15:25:46 +010088 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000089 {
90 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010091 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000092 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
93 }
94}
95
96//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010097void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000098{
Sadik Armaganeff363d2019-04-05 15:25:46 +010099 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +0000100 {
101 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +0100102 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +0000103 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
104 }
105}
106
107//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000108
109//---------------------------------------------------------------
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100110void ValidateTensorNumElements(const TensorInfo& tensor,
111 std::string const& descName,
112 unsigned int numElements,
113 std::string const& tensorName)
Jan Eilers38e05bd2019-06-26 13:10:09 +0100114{
115 if (tensor.GetNumElements() != numElements)
116 {
117 throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
James Conroyceda7852019-08-22 11:41:07 +0100118 to_string(tensor.GetNumElements()) + " elements for " +
Jan Eilers38e05bd2019-06-26 13:10:09 +0100119 tensorName + " tensor.");
120 }
121}
122
123//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000124void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
125 const std::string& descName, std::string const& tensorName)
126{
127 if (tensor.GetDataType() != dataType)
128 {
129 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
130 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
131 }
132}
133
Derek Lambertid466a542020-01-22 15:37:29 +0000134void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
135{
Jan Eilers1b2654f2021-09-24 15:45:46 +0100136 if (tensor.GetDataType() != DataType::QSymmS8)
Derek Lambertid466a542020-01-22 15:37:29 +0000137 {
138 throw InvalidArgumentException(descName +
139 ": Expected data type which supports per-axis quantization scheme but got " +
140 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
141 }
Derek Lambertid466a542020-01-22 15:37:29 +0000142}
143
telsoa014fcda012018-03-09 14:13:49 +0000144//---------------------------------------------------------------
Matteo Martincighe851b3d2019-05-28 14:31:20 +0100145void ValidateTensorQuantizationSpace(const TensorInfo& first,
146 const TensorInfo& second,
147 const std::string& descName,
148 std::string const& firstName,
149 std::string const& secondName)
150{
151 if (!first.IsQuantized() ||
152 !second.IsQuantized())
153 {
154 // Not a quantized type, ignore the validation
155 return;
156 }
157
158 DataType firstDataType = first.GetDataType();
159 DataType secondDataType = second.GetDataType();
160
161 if (firstDataType != secondDataType)
162 {
163 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
164 " must be of the same quantized type, " +
165 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
166 secondName + " is " + GetDataTypeName(secondDataType));
167 }
168
169 if (!first.IsTypeSpaceMatch(second))
170 {
171 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
172 " must have the same quantization space, " +
173 firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
174 " and scale " + to_string(first.GetQuantizationScale()) + ", " +
175 secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
176 " and scale " + to_string(second.GetQuantizationScale()));
177 }
178}
179
180//---------------------------------------------------------------
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100181void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
182 const TensorInfo& inputTensorInfo,
183 const TensorInfo& weightsTensorInfo,
184 const std::string& descName)
telsoa014fcda012018-03-09 14:13:49 +0000185{
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000186 // Helper lambda function to validate a single bias quantization scale value
187 auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
188 {
mathad01df9a3222021-04-28 11:42:57 +0100189 constexpr float tolerance = 0.0001f;
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000190 if (std::abs(biasScale - expectedScale) > tolerance)
191 {
192 // Print the float values with extra precision to see very small differences
mathad01df9a3222021-04-28 11:42:57 +0100193 ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale <<
194 " for bias quantization scale (product of input and weight scales), but got " <<
195 biasScale << ". Using scale provided.";
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000196 }
197 };
198
telsoa014fcda012018-03-09 14:13:49 +0000199 if (biasTensor.GetQuantizationOffset() != 0)
200 {
201 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
202 to_string(biasTensor.GetQuantizationOffset()));
203 }
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000204
James Conroy8502ade2020-11-12 19:26:29 +0000205 if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
telsoa014fcda012018-03-09 14:13:49 +0000206 {
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000207 // Validate per-axis quantization scales
208 const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
209 const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
210
211 if (weightScales.size() != biasScales.size())
212 {
213 std::stringstream msg;
James Conroy8502ade2020-11-12 19:26:29 +0000214 msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
215 << "but got different values. This is currently unsupported: weights=" << weightScales.size()
216 << ", biases=" << biasScales.size();
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000217 throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
218 }
219
220 for (size_t i = 0ul; i < biasScales.size(); ++i)
221 {
222 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
223 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
224 }
225 }
226 else
227 {
228 // Validate per-tensor quantization scale
229 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
230 VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
telsoa014fcda012018-03-09 14:13:49 +0000231 }
232}
233
234//---------------------------------------------------------------
235void ValidateTensors(const std::vector<ITensorHandle*>& vec,
236 unsigned int numExpected,
237 const std::string& descName,
238 const std::string& varName)
239{
240 if (vec.empty() && numExpected > 0)
241 {
242 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
243 }
244
245 for (unsigned int i = 0; i < numExpected; ++i)
246 {
247 if (!vec[i])
248 {
249 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
250 }
251 }
252}
253
254//---------------------------------------------------------------
255void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
256 const TensorInfo& second,
257 const TensorInfo& output,
258 std::string const& descName,
259 std::string const& firstName,
260 std::string const& secondName)
261{
262 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
263 // broadcasted.
264 if (first.GetNumDimensions() != second.GetNumDimensions())
265 {
266 throw InvalidArgumentException(descName + ": Tensors "
267 + firstName + " & " + secondName
268 + " must have the same number of dimensions in order to be broadcasted");
269 }
270 uint32_t numDims = first.GetNumDimensions();
271 std::vector<uint32_t> outputDims(numDims, 0u);
272 for (uint32_t i = 0; i < numDims; i++)
273 {
274 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
275 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
276 if (dimsNotEqual && dimsNotOne)
277 {
278 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
279 }
280 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
281 }
Matthew Sloyan171214c2020-09-09 09:07:37 +0100282 TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
telsoa014fcda012018-03-09 14:13:49 +0000283 if (broadcastShape != output.GetShape())
284 {
285 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
286 + firstName + " & " + secondName
287 + " does not match the output shape");
288 }
289}
290
291//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +0100292void ValidateDataTypes(const TensorInfo& info,
293 const std::vector<armnn::DataType>& supportedTypes,
294 std::string const& descName)
295{
296 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
297 if (iterator == supportedTypes.end())
298 {
299 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
300 }
301}
302
James Conroy4d1ff582019-06-10 17:06:39 +0100303//---------------------------------------------------------------
304void ValidateTensorDataTypesMatch(const TensorInfo& first,
305 const TensorInfo& second,
306 std::string const& descName,
307 std::string const& firstName,
308 std::string const& secondName)
309{
310 if (first.GetDataType() != second.GetDataType())
311 {
312 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
313 " must have identical data types.");
314 }
315}
316
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100317//---------------------------------------------------------------
318void ValidateTensorNumElementsMatch(const TensorInfo& first,
319 const TensorInfo& second,
320 std::string const& descName,
321 std::string const& firstName,
322 std::string const& secondName)
323{
324 if (first.GetNumElements() != second.GetNumElements())
325 {
326 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
327 " must have the same number of elements.");
328 }
329}
330
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000331void ValidateWeightDataType(const TensorInfo& inputInfo,
332 const TensorInfo& weightInfo,
333 const std::string& descName)
334{
335 const DataType inputType = inputInfo.GetDataType();
Keith Davis0c2eeac2020-02-11 16:51:50 +0000336 if (IsQuantized8BitType(inputType))
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000337 {
338 const std::vector<DataType> validTypes =
339 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000340 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +0100341 DataType::QAsymmU8,
Jan Eilers1b2654f2021-09-24 15:45:46 +0100342 DataType::QSymmS8
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000343 };
344
345 ValidateDataTypes(weightInfo, validTypes, descName);
346 }
347 else
348 {
349 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
350 }
351}
352
353void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
354 const std::string& descName,
355 const std::string& tensorName)
356{
357 const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
358 if (!quantizationDim.has_value())
359 {
James Ward47fce872020-09-10 11:57:28 +0100360 throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
361 "not set on tensor {1}.", descName, tensorName));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000362 }
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000363}
364
365void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
366 const std::string& descName,
367 const std::string& tensorName)
368{
369 int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
370 if (quantizationOffset != 0)
371 {
James Ward47fce872020-09-10 11:57:28 +0100372 throw InvalidArgumentException(fmt::format(
373 "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
374 descName, tensorName, quantizationOffset));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000375 }
376}
377
378void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
379 const TensorInfo& outputInfo,
380 const TensorInfo& weightInfo,
381 const Optional<TensorInfo>& optionalBiasInfo,
382 const std::string& descName)
383{
384 if (weightInfo.HasPerAxisQuantization())
385 {
386 const DataType inputDataType = inputInfo.GetDataType();
387 const DataType outputDataType = outputInfo.GetDataType();
388
Keith Davis0c2eeac2020-02-11 16:51:50 +0000389 const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000390
391 if (!canHavePerAxisQuantization)
392 {
James Ward47fce872020-09-10 11:57:28 +0100393 throw InvalidArgumentException(fmt::format(
394 "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
395 "per-axis quantization.", descName, "weight"));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000396 }
397
Derek Lambertid466a542020-01-22 15:37:29 +0000398
399 ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000400 ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
401 ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
402
403 if (optionalBiasInfo.has_value())
404 {
405 const TensorInfo& biasInfo = optionalBiasInfo.value();
406 if (!biasInfo.HasPerAxisQuantization())
407 {
James Ward47fce872020-09-10 11:57:28 +0100408 throw InvalidArgumentException(fmt::format(
409 "{}: Per-axis quantization parameters not set on bias tensor, "
410 "despite being set on weight tensor.", descName));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000411 }
412
413 ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
414 ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
415 ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
416 }
417 }
418}
419
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100420} // anonymous namespace
telsoa014fcda012018-03-09 14:13:49 +0000421
Mike Kelly80512b02022-05-16 23:10:42 +0100422//---------------------------------------------------------------
423void QueueDescriptor::ValidateTensorNumDimensions(const TensorInfo& tensor,
424 std::string const& descName,
425 unsigned int numDimensions,
426 std::string const& tensorName) const
427{
428 // If we're allowing expanded dimensions then numDimensions becomes the minimum number of Dimensions we can allow.
429 // Throw an Exception if the tensors has fewer than numDimensions or if the squeezed dimensions are greater than
430 // numDimensions.
431 if (m_AllowExpandedDims)
432 {
433 unsigned int squeezedDims = 0;
434
435 for (unsigned int i = 0; i < tensor.GetNumDimensions(); ++i)
436 {
437 if (tensor.GetShape()[i] != 1)
438 {
439 ++squeezedDims;
440 }
441 }
442 if (tensor.GetNumDimensions() < numDimensions || squeezedDims > numDimensions)
443 {
444 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " or less but got " +
445 to_string(tensor.GetNumDimensions()) + " dimensions for " +
446 tensorName + " tensor.");
447 }
448 }
449 else
450 {
451 if (tensor.GetNumDimensions() != numDimensions)
452 {
453 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
454 to_string(tensor.GetNumDimensions()) + " dimensions for " +
455 tensorName + " tensor.");
456 }
457 }
458}
459
460//---------------------------------------------------------------
461void QueueDescriptor::ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
462 unsigned int numDimension,
463 unsigned int numElements,
464 std::string const& tensorName) const
465{
466 const std::string functionName{"ValidateTensorNumDimNumElem"};
467 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
468 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
469}
470
471//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000472void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
473 unsigned int numExpectedIn, unsigned int numExpectedOut) const
474{
475 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
476 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
477}
478
479//---------------------------------------------------------------
Jim Flynn68db06f2020-10-06 10:14:50 +0100480void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
481{
482 const std::string descriptorName{"MapQueueDescriptor"};
483
484 ValidateNumInputs(workloadInfo, descriptorName, 1);
Jim Flynn3a40ea52020-10-08 11:42:30 +0100485 ValidateNumOutputs(workloadInfo, descriptorName, 0);
486
487 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
488 {
489 if (!m_Inputs[i])
490 {
491 throw InvalidArgumentException(
492 fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
493 }
494 }
495}
496
497//---------------------------------------------------------------
498void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
499{
500 const std::string descriptorName{"UnmapQueueDescriptor"};
501
502 ValidateNumInputs(workloadInfo, descriptorName, 1);
503 ValidateNumOutputs(workloadInfo, descriptorName, 0);
Jim Flynn68db06f2020-10-06 10:14:50 +0100504
505 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
506 {
507 if (!m_Inputs[i])
508 {
509 throw InvalidArgumentException(
510 fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
511 }
512 }
513}
514
515//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000516void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
517{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100518 const std::string descriptorName{"MemCopyQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +0000519
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100520 ValidateNumInputs(workloadInfo, descriptorName, 1);
521 ValidateNumOutputs(workloadInfo, descriptorName , 1);
telsoa014fcda012018-03-09 14:13:49 +0000522
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100523 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
524 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
525
526 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
527 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +0000528
529 if (m_Inputs.size() != m_Outputs.size())
530 {
James Ward47fce872020-09-10 11:57:28 +0100531 throw InvalidArgumentException(fmt::format(
532 "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
533 descriptorName, m_Inputs.size(), m_Outputs.size()));
telsoa014fcda012018-03-09 14:13:49 +0000534 }
535
536 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
537 {
538 if (!m_Inputs[i])
539 {
James Ward47fce872020-09-10 11:57:28 +0100540 throw InvalidArgumentException(fmt::format(
541 "{0}: Invalid NULL input {1}.", descriptorName, i));
telsoa014fcda012018-03-09 14:13:49 +0000542 }
543
544 if (!m_Outputs[i])
545 {
James Ward47fce872020-09-10 11:57:28 +0100546 throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
telsoa014fcda012018-03-09 14:13:49 +0000547 }
548 }
549}
550
Derek Lambertif674aa02019-08-01 15:56:25 +0100551//---------------------------------------------------------------
552void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
553{
554 ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
555 ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
556
557 if (workloadInfo.m_InputTensorInfos.size() != 1)
558 {
James Ward47fce872020-09-10 11:57:28 +0100559 throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
560 workloadInfo.m_InputTensorInfos.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100561
562 }
563
564 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
565 {
James Ward47fce872020-09-10 11:57:28 +0100566 throw InvalidArgumentException(fmt::format(
567 "Number of input infos ({0}) does not match the number of output infos ({1})",
568 workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100569 }
570
571 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
572 {
573 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
574 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
575 {
James Ward47fce872020-09-10 11:57:28 +0100576 throw InvalidArgumentException(fmt::format(
577 "Number of elements for tensor input and output {} does not match", i ));
Derek Lambertif674aa02019-08-01 15:56:25 +0100578 }
579 }
580
581 if (m_Inputs.size() != 1)
582 {
James Ward47fce872020-09-10 11:57:28 +0100583 throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100584 }
585
586 if (m_Inputs.size() != m_Outputs.size())
587 {
James Ward47fce872020-09-10 11:57:28 +0100588 throw InvalidArgumentException(fmt::format(
589 "Number of inputs ({0}) does not match the number of outputs ({1})",
590 m_Inputs.size(), m_Outputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100591 }
592
593 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
594 {
595 if (!m_Inputs[i])
596 {
James Ward47fce872020-09-10 11:57:28 +0100597 throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
Derek Lambertif674aa02019-08-01 15:56:25 +0100598 }
599
600 if (!m_Outputs[i])
601 {
James Ward47fce872020-09-10 11:57:28 +0100602 throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
Derek Lambertif674aa02019-08-01 15:56:25 +0100603 }
604 }
605}
606
607//---------------------------------------------------------------
608void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
609{
610 ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
Derek Lambertif674aa02019-08-01 15:56:25 +0100611
Derek Lambertif674aa02019-08-01 15:56:25 +0100612 if (m_Inputs.size() != 1)
613 {
James Ward47fce872020-09-10 11:57:28 +0100614 throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100615 }
616
617 if (m_Outputs.size() != 0)
618 {
James Ward47fce872020-09-10 11:57:28 +0100619 throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100620 }
621
622 if (!m_Inputs[0])
623 {
James Ward47fce872020-09-10 11:57:28 +0100624 throw InvalidArgumentException(fmt::format("Invalid null input 0"));
Derek Lambertif674aa02019-08-01 15:56:25 +0100625 }
626}
627
628//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000629void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
630{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100631 const std::string descriptorName{"ActivationQueueDescriptor"};
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100632
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100633 ValidateNumInputs(workloadInfo, descriptorName, 1);
634 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100635
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100636 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
637 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
nikraj01248683f2019-05-29 16:46:50 +0100638
639 std::vector<DataType> supportedTypes =
640 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000641 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100642 DataType::Float16,
643 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000644 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000645 DataType::QAsymmU8,
646 DataType::QSymmS16
nikraj01248683f2019-05-29 16:46:50 +0100647 };
648
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100649 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
650 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
651 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +0000652}
653
Nikhil Rajee391d52019-09-05 17:50:44 +0100654void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
655{
656 const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
657
658 ValidateNumInputs(workloadInfo, descriptorName, 1);
659 ValidateNumOutputs(workloadInfo, descriptorName, 1);
660
661 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
662 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
663
Inki Daed4619e22020-09-10 15:33:54 +0900664 if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
665 outputTensorInfo.GetDataType() != DataType::Signed64)
Nikhil Raj68c2c902019-09-19 11:21:11 +0100666 {
Inki Daed4619e22020-09-10 15:33:54 +0900667 throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
Nikhil Raj68c2c902019-09-19 11:21:11 +0100668 }
669
James Conroyd47a0642019-09-17 14:22:06 +0100670 std::vector<DataType> supportedInputTypes =
671 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000672 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100673 DataType::Float16,
674 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100675 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000676 DataType::QAsymmU8,
677 DataType::QSymmS16,
Inki Daed4619e22020-09-10 15:33:54 +0900678 DataType::Signed32,
679 DataType::Signed64
James Conroyd47a0642019-09-17 14:22:06 +0100680 };
Nikhil Rajee391d52019-09-05 17:50:44 +0100681
James Conroyd47a0642019-09-17 14:22:06 +0100682 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
James Conroyc8724c72019-10-08 15:41:34 +0100683
684 auto inputShape = inputTensorInfo.GetShape();
685 auto outputShape = outputTensorInfo.GetShape();
686
687 auto inputNumDimensions = inputShape.GetNumDimensions();
688 auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
689
690 const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
691
692 // 1D input shape results in scalar output shape
693 if (inputShape.GetNumDimensions() == 1)
694 {
695 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
696 {
697 throw InvalidArgumentException(descriptorName + outputShapeError);
698 }
699 }
700 else
701 {
702 for (unsigned int i = 0; i < unsignedAxis; ++i)
703 {
704 if (outputShape[i] != inputShape[i])
705 {
706 throw InvalidArgumentException(descriptorName + outputShapeError);
707 }
708 }
709
710 for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
711 {
712 if (outputShape[i - 1] != inputShape[i])
713 {
714 throw InvalidArgumentException(descriptorName + outputShapeError);
715 }
716 }
717 }
Nikhil Rajee391d52019-09-05 17:50:44 +0100718}
719
mathad01b392e982021-04-07 12:07:30 +0100720void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
721{
722 const std::string descriptorName{"CastQueueDescriptor"};
723
724 ValidateNumInputs(workloadInfo, descriptorName, 1);
725 ValidateNumOutputs(workloadInfo, descriptorName, 1);
726
727 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
728 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
729
730 std::vector<DataType> supportedTypes =
731 {
732 DataType::BFloat16,
733 DataType::Float16,
734 DataType::Float32,
735 DataType::QAsymmS8,
736 DataType::QAsymmU8,
737 DataType::QSymmS8,
738 DataType::QSymmS16,
739 DataType::Signed32,
740 DataType::Signed64
741 };
742
743 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
744 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
745}
746
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100747void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
748{
749 const std::string descriptorName{"SoftmaxQueueDescriptor"};
750
751 ValidateNumInputs(workloadInfo, descriptorName, 1);
752 ValidateNumOutputs(workloadInfo, descriptorName, 1);
753
754 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
755 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
756
757 std::vector<DataType> supportedTypes =
758 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000759 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100760 DataType::Float16,
761 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000762 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000763 DataType::QAsymmU8,
764 DataType::QSymmS16
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100765 };
766
767 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
768 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
769 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
770}
771
telsoa014fcda012018-03-09 14:13:49 +0000772void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
773{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100774 const std::string descriptorName{"SplitterQueueDescriptor"};
775
776 ValidateNumInputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +0000777
Ruomei Yan25339c32019-05-28 16:48:20 +0100778 // Check the supported data types
779 std::vector<DataType> supportedTypes =
780 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000781 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100782 DataType::Float32,
783 DataType::Float16,
784 DataType::Boolean,
785 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100786 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000787 DataType::QAsymmU8,
788 DataType::QSymmS16
Ruomei Yan25339c32019-05-28 16:48:20 +0100789 };
790
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100791 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
792 for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
Ruomei Yan25339c32019-05-28 16:48:20 +0100793 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100794 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
795 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
796
797 const std::string outputName = "output_" + std::to_string(i);
798 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
Ruomei Yan25339c32019-05-28 16:48:20 +0100799 }
Ruomei Yan25339c32019-05-28 16:48:20 +0100800
telsoa014fcda012018-03-09 14:13:49 +0000801 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
802 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100803 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000804 }
805
806 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
807 {
808 throw InvalidArgumentException(
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100809 descriptorName + ": Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000810 "has to match number of workloadInfo.m_OutputTensorInfos. "
811 "Number of windows: " +
812 to_string(m_ViewOrigins.size()) +
813 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
814 }
815
telsoa01c577f2c2018-08-31 09:22:23 +0100816 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000817 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
818 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
819 {
telsoa01c577f2c2018-08-31 09:22:23 +0100820 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000821 ViewOrigin const& e = m_ViewOrigins[w];
822 if (e.m_Origin.size() != inputDims)
823 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100824 throw InvalidArgumentException(descriptorName + ": Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000825 "have the same dimensionality as the input tensor. "
826 "Window origin (index: " +
827 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
828 " dimensions, the input "
829 "tensor has " +
830 to_string(inputDims) + " dimensions.");
831 }
832 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
833 {
834 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
835 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
836 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100837 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000838 "be smaller or equal than the size of the input in that coord.");
839 }
840 }
841 }
842}
843
Jim Flynne242f2d2019-05-22 14:24:13 +0100844void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
telsoa014fcda012018-03-09 14:13:49 +0000845{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100846 const std::string descriptorName{"ConcatQueueDescriptor"};
847
848 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +0000849
850 if (m_Inputs.size() <= 0)
851 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100852 throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000853 }
854 if (m_Outputs.size() <= 0)
855 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100856 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000857 }
858
859 if (workloadInfo.m_InputTensorInfos.size() <= 0)
860 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100861 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000862 }
863 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
864 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100865 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000866 }
867
Nikhil Raj8599a412018-11-19 14:51:07 +0000868 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
869 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100870 throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
Nikhil Raj8599a412018-11-19 14:51:07 +0000871 }
872
873 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
874 {
875 return;
876 }
877
telsoa014fcda012018-03-09 14:13:49 +0000878 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
879 {
880 throw InvalidArgumentException(
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100881 descriptorName + ": Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000882 "has to match number of workloadInfo.m_InputTensorInfos. "
883 "Number of windows: " +
884 to_string(m_ViewOrigins.size()) +
885 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
886 }
887
telsoa01c577f2c2018-08-31 09:22:23 +0100888 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000889 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
890 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
891 {
telsoa01c577f2c2018-08-31 09:22:23 +0100892 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000893 ViewOrigin const& e = m_ViewOrigins[w];
894 if (e.m_Origin.size() != outputDims)
895 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100896 throw InvalidArgumentException(descriptorName + ": Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000897 "have the same dimensionality as the output tensor. "
898 "Window origin (index: " +
899 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
900 " dimensions, the output "
901 "tensor has " +
902 to_string(outputDims) + " dimensions.");
903 }
telsoa01c577f2c2018-08-31 09:22:23 +0100904 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000905 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
906 {
907 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
908 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
909 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100910 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000911 "be smaller or equal than the size of the output in that coord.");
912 }
913 }
914 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100915
916 // Check the supported data types
917 std::vector<DataType> supportedTypes =
918 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000919 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100920 DataType::Float32,
921 DataType::Float16,
922 DataType::Boolean,
923 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100924 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000925 DataType::QAsymmU8,
926 DataType::QSymmS16
Jim Flynncbb66aa2019-05-15 13:03:54 +0100927 };
928
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100929 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
930 for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Jim Flynncbb66aa2019-05-15 13:03:54 +0100931 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100932 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
933 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
934
935 const std::string inputName = "input_" + std::to_string(i);
936 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
Jim Flynncbb66aa2019-05-15 13:03:54 +0100937 }
telsoa014fcda012018-03-09 14:13:49 +0000938}
939
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100940void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
941{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100942 const std::string descriptorName{"StackQueueDescriptor"};
943
944 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100945
946 if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
947 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100948 throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100949 }
950
951 // All inputs must have the same shape, which is defined in parameters
952 const TensorShape& inputShape = m_Parameters.m_InputShape;
953 for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
954 {
955 if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
956 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100957 throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100958 }
959 }
960
Matthew Jacksondba634f2019-08-15 15:14:18 +0100961 if (inputShape.GetNumDimensions() > 4)
962 {
963 throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
964 }
965
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100966 // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
967 // since the output tensor has an additional dimension.
968 if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
969 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100970 throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100971 "than the number of input dimensions.");
972 }
973
974 // Output shape must be as inferred from the input shape
975 const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
976 for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
977 {
978 if (outputShape[i] != inputShape[i])
979 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100980 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100981 "match shape inferred from input tensor.");
982 }
983 }
984
985 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
986 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100987 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100988 "match shape inferred from input tensor.");
989 }
990
991 for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
992 {
993 if (outputShape[i] != inputShape[i-1])
994 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100995 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100996 "match shape inferred from input tensor.");
997 }
998 }
999
Matthew Jacksondba634f2019-08-15 15:14:18 +01001000 if (outputShape.GetNumDimensions() > 5)
1001 {
1002 throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
1003 }
1004
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001005 // Check the supported data types
1006 std::vector<DataType> supportedTypes =
1007 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001008 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01001009 DataType::Float32,
1010 DataType::Float16,
1011 DataType::Boolean,
1012 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001013 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001014 DataType::QAsymmU8,
1015 DataType::QSymmS16
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001016 };
1017
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001018 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001019
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001020 for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001021 {
1022 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1023 workloadInfo.m_InputTensorInfos[i],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001024 descriptorName,
1025 "input_0",
1026 "input_" + std::to_string(i));
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001027 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001028
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001029 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1030 workloadInfo.m_OutputTensorInfos[0],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001031 descriptorName,
1032 "input_0",
1033 "output");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001034}
1035
Ryan OSheaec6c6802020-06-05 17:17:06 +01001036void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1037{
1038 const std::string descriptorName{"FillQueueDescriptor"};
1039
1040 ValidateNumInputs(workloadInfo, descriptorName, 1);
1041 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1042
1043 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1044 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1045
1046 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1047
1048 std::vector<DataType> supportedTypes =
1049 {
1050 DataType::BFloat16,
1051 DataType::Float32,
1052 DataType::Float16,
1053 DataType::Signed32
1054 };
1055
1056 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1057}
1058
telsoa014fcda012018-03-09 14:13:49 +00001059void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1060{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001061 const std::string descriptorName{"FullyConnectedQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001062
Matthew Sloyan81beae32021-07-13 19:46:11 +01001063 uint32_t numInputs = 2;
1064 if (m_Parameters.m_BiasEnabled)
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001065 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001066 numInputs = 3;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001067 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01001068
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001069 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001070 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1071
1072 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1073 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1074
1075 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1076
1077 if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
telsoa014fcda012018-03-09 14:13:49 +00001078 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001079 throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
telsoa014fcda012018-03-09 14:13:49 +00001080 }
1081
Matthew Sloyan81beae32021-07-13 19:46:11 +01001082 TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001083 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
telsoa014fcda012018-03-09 14:13:49 +00001084
1085 if (m_Parameters.m_BiasEnabled)
1086 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001087 TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001088 // Validates type and quantization values.
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001089 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001090 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1091 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
telsoa014fcda012018-03-09 14:13:49 +00001092 }
1093
Francis Murtagh46c09d02019-05-28 08:15:28 +01001094 // Check the supported data types
1095 std::vector<DataType> supportedTypes =
1096 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001097 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01001098 DataType::Float32,
1099 DataType::Float16,
Francis Murtaghddb1d062020-03-10 13:51:45 +00001100 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001101 DataType::QAsymmU8,
1102 DataType::QSymmS16
Francis Murtagh46c09d02019-05-28 08:15:28 +01001103 };
1104
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001105 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001106
1107 // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1108 if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1109 {
1110 if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1111 {
1112 throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1113 "for BFloat16 input.");
1114 }
1115 }
1116 else
1117 {
1118 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1119 }
telsoa014fcda012018-03-09 14:13:49 +00001120}
1121
telsoa014fcda012018-03-09 14:13:49 +00001122void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1123{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001124 const std::string descriptorName{"NormalizationQueueDescriptor"};
1125
1126 ValidateNumInputs(workloadInfo, descriptorName, 1);
1127 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1128
1129 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1130 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001131
1132 // Check the supported data types
1133 std::vector<DataType> supportedTypes =
1134 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001135 DataType::BFloat16,
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001136 DataType::Float16,
1137 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001138 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001139 DataType::QAsymmU8,
1140 DataType::QSymmS16
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001141 };
1142
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001143 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001144
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001145 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001146
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001147 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001148}
1149
1150void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1151{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001152 const std::string descriptorName{"AdditionQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001153
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001154 ValidateNumInputs(workloadInfo, descriptorName, 2);
1155 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1156
1157 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1158 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1159 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1160
1161 std::vector<DataType> supportedTypes =
1162 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001163 DataType::BFloat16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001164 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001165 DataType::Float16,
1166 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001167 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01001168 DataType::QSymmS16,
1169 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001170 };
1171
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001172 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1173 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1174 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001175
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001176 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1177 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001178
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001179 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1180 inputTensorInfo1,
1181 outputTensorInfo,
1182 descriptorName,
1183 "input_0",
1184 "input_1");
telsoa014fcda012018-03-09 14:13:49 +00001185}
1186
telsoa014fcda012018-03-09 14:13:49 +00001187void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1188{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001189 const std::string descriptorName{"MultiplicationQueueDescriptor"};
surmeh01bceff2f2018-03-29 16:29:27 +01001190
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001191 ValidateNumInputs(workloadInfo, descriptorName, 2);
1192 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1193
1194 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1195 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1196 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1197
1198 std::vector<DataType> supportedTypes =
1199 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001200 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001201 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001202 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00001203 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001204 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01001205 DataType::QSymmS16,
1206 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001207 };
1208
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001209 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1210 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1211 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001212
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001213 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1214 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001215
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001216 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1217 inputTensorInfo1,
1218 outputTensorInfo,
1219 descriptorName,
1220 "input_0",
1221 "input_1");
telsoa014fcda012018-03-09 14:13:49 +00001222}
1223
1224void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1225{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001226 const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001227
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001228 ValidateNumInputs(workloadInfo, descriptorName, 1);
1229 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1230
1231 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1232 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001233
1234 std::vector<DataType> supportedTypes =
1235 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001236 DataType::BFloat16,
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001237 DataType::Float16,
1238 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001239 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001240 DataType::QAsymmU8,
1241 DataType::QSymmS16
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001242 };
1243
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001244 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1245 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001246
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001247 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001248 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001249
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001250 ValidatePointer(m_Mean, descriptorName, "mean");
1251 ValidatePointer(m_Variance, descriptorName, "variance");
1252 ValidatePointer(m_Beta, descriptorName, "beta");
1253 ValidatePointer(m_Gamma, descriptorName, "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001254
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001255 const TensorInfo& mean = m_Mean->GetTensorInfo();
1256 const TensorInfo& variance = m_Variance->GetTensorInfo();
1257 const TensorInfo& beta = m_Beta->GetTensorInfo();
1258 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +00001259
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001260 ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1261 ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1262 ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1263 ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001264
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001265 ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1266 ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1267 ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001268}
1269
1270void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1271{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001272 const std::string descriptorName{"Convolution2dQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001273
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001274 uint32_t numInputs = 2;
1275 if (m_Parameters.m_BiasEnabled)
1276 {
1277 numInputs = 3;
1278 }
1279
1280 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001281 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001282
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001283 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1284 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
telsoa014fcda012018-03-09 14:13:49 +00001285
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001286 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1287 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
telsoa014fcda012018-03-09 14:13:49 +00001288
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001289 const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
telsoa014fcda012018-03-09 14:13:49 +00001290
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001291 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
telsoa014fcda012018-03-09 14:13:49 +00001292
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001293 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001294
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001295 Optional<TensorInfo> optionalBiasTensorInfo;
telsoa014fcda012018-03-09 14:13:49 +00001296 if (m_Parameters.m_BiasEnabled)
1297 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001298 optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001299 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001300
1301 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1302 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001303 }
1304
Teresa Charlinf2ed1b82020-11-24 15:11:54 +00001305 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1306 {
1307 throw InvalidArgumentException(
1308 fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1309 "cannot be either negative or 0.",
1310 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1311 }
1312
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001313 ValidatePerAxisQuantization(inputTensorInfo,
1314 outputTensorInfo,
1315 weightTensorInfo,
1316 optionalBiasTensorInfo,
1317 descriptorName);
1318
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001319 std::vector<DataType> supportedTypes =
1320 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001321 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001322 DataType::Float16,
Ruomei Yan88d44b82019-05-23 14:29:06 +01001323 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001324 DataType::QAsymmS8,
Francis Murtaghddb1d062020-03-10 13:51:45 +00001325 DataType::QAsymmU8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001326 DataType::QSymmS16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001327 DataType::QSymmS8
Ruomei Yan88d44b82019-05-23 14:29:06 +01001328 };
1329
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001330 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001331
1332 // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1333 if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1334 {
1335 if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1336 {
1337 throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1338 "for BFloat16 input.");
1339 }
1340 }
1341 else
1342 {
1343 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1344 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001345}
Ruomei Yan88d44b82019-05-23 14:29:06 +01001346
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001347void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1348{
1349 const std::string descriptorName{"Convolution3dQueueDescriptor"};
1350
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001351 uint32_t numInputs = 2;
1352 if (m_Parameters.m_BiasEnabled)
1353 {
1354 numInputs = 3;
1355 }
1356 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001357 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1358
1359 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1360 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1361
1362 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1363 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1364
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001365 const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001366 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
1367
1368 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1369
1370 Optional<TensorInfo> optionalBiasTensorInfo;
1371 if (m_Parameters.m_BiasEnabled)
1372 {
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001373 optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001374 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1375
1376 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1377 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1378 }
1379
1380 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 || m_Parameters.m_StrideZ <= 0 )
1381 {
1382 throw InvalidArgumentException(
1383 fmt::format("{}: strideX (provided {}), strideY (provided {}) or strideZ (provided {})"
1384 "cannot be either negative or 0.",
1385 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY, m_Parameters.m_StrideZ));
1386 }
1387
1388 ValidatePerAxisQuantization(inputTensorInfo,
1389 outputTensorInfo,
1390 weightTensorInfo,
1391 optionalBiasTensorInfo,
1392 descriptorName);
1393
1394 std::vector<DataType> supportedTypes =
1395 {
1396 DataType::BFloat16,
1397 DataType::Float16,
1398 DataType::Float32,
1399 DataType::QAsymmS8,
1400 DataType::QAsymmU8,
1401 DataType::QSymmS16,
1402 DataType::QSymmS8
1403 };
1404
1405 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1406 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1407}
1408
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001409void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1410{
1411 const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1412
Cathal Corbett06902652022-04-14 17:55:11 +01001413 uint32_t numInputs = 2;
1414 if (m_Parameters.m_BiasEnabled)
1415 {
1416 numInputs = 3;
1417 }
1418
1419 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001420 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1421
1422 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1423 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1424
1425 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1426 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1427
Cathal Corbett06902652022-04-14 17:55:11 +01001428 const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001429 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1430
1431 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1432 {
1433 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001434 fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1435 "cannot be smaller than 1.",
1436 descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001437 }
1438
Teresa Charlinf2ed1b82020-11-24 15:11:54 +00001439 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1440 {
1441 throw InvalidArgumentException(
1442 fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1443 "cannot be either negative or 0.",
1444 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1445 }
1446
Jan Eilers53ef7952021-06-02 12:01:25 +01001447 if (weightTensorInfo.GetShape()[0] != 1)
1448 {
1449 throw InvalidArgumentException(fmt::format(
1450 "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1451 "But first dimension is not equal to 1. Provided weight shape: [{1}, {2}, {3}, {4}]",
1452 descriptorName,
1453 weightTensorInfo.GetShape()[0],
1454 weightTensorInfo.GetShape()[1],
1455 weightTensorInfo.GetShape()[2],
1456 weightTensorInfo.GetShape()[3]));
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001457 }
1458
Cathal Corbett4b19d222022-05-11 20:12:17 +01001459 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1460 const unsigned int numWeightOutputChannelsRefFormat = weightTensorInfo.GetShape()[3];
1461 const unsigned int numWeightOutputChannelsAclFormat = weightTensorInfo.GetShape()[1];
1462 const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1463
1464 // Weights format has two valid options: [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] (CpuAcc/GpuAcc).
1465 bool validRefFormat = (numWeightOutputChannelsRefFormat == numOutputChannels);
1466 bool validAclFormat = (numWeightOutputChannelsAclFormat == numOutputChannels);
1467
1468 if (!(validRefFormat || validAclFormat))
1469 {
1470 throw InvalidArgumentException(fmt::format(
1471 "{0}: The weight format in armnn is expected to be [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] "
1472 "(CpuAcc/GpuAcc). But neither the 4th (CpuRef) or 2nd (CpuAcc/GpuAcc) dimension is equal to Cout."
1473 "Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
1474 descriptorName,
1475 numOutputChannels,
1476 weightTensorInfo.GetShape()[0],
1477 weightTensorInfo.GetShape()[1],
1478 weightTensorInfo.GetShape()[2],
1479 weightTensorInfo.GetShape()[3]));
1480 }
1481
Teresa Charlind8df0262019-11-11 12:28:15 +00001482 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001483
Teresa Charlind8df0262019-11-11 12:28:15 +00001484 Optional<TensorInfo> optionalBiasTensorInfo;
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001485 if (m_Parameters.m_BiasEnabled)
1486 {
Cathal Corbett06902652022-04-14 17:55:11 +01001487 optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
Teresa Charlind8df0262019-11-11 12:28:15 +00001488 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001489
1490 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1491 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1492 }
Teresa Charlind8df0262019-11-11 12:28:15 +00001493 ValidatePerAxisQuantization(inputTensorInfo,
1494 outputTensorInfo,
1495 weightTensorInfo,
1496 optionalBiasTensorInfo,
1497 descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001498
1499 std::vector<DataType> supportedTypes =
1500 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001501 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001502 DataType::Float16,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001503 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001504 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001505 DataType::QAsymmU8,
1506 DataType::QSymmS16
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001507 };
1508
1509 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1510 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001511}
1512
1513void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1514{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001515 const std::string descriptorName{"PermuteQueueDescriptor"};
1516
1517 ValidateNumInputs(workloadInfo, descriptorName, 1);
1518 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001519
1520 const PermutationVector& mapping = m_Parameters.m_DimMappings;
1521
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001522 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1523 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
telsoa014fcda012018-03-09 14:13:49 +00001524
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001525 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1526 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
telsoa014fcda012018-03-09 14:13:49 +00001527
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001528 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
telsoa014fcda012018-03-09 14:13:49 +00001529 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001530 if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
telsoa014fcda012018-03-09 14:13:49 +00001531 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001532 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1533 " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1534 "must match dst dimension " + to_string(mapping[i]) +
1535 " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
telsoa014fcda012018-03-09 14:13:49 +00001536 }
1537 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001538
1539 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001540}
1541
1542void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1543{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001544 const std::string descriptorName{"Pooling2dQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001545
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001546 ValidateNumInputs(workloadInfo, descriptorName, 1);
1547 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1548
1549 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1550 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1551
1552 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1553 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Teresa Charlina3b20472019-06-06 11:12:32 +01001554
1555 std::vector<DataType> supportedTypes =
1556 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001557 DataType::BFloat16,
Teresa Charlina3b20472019-06-06 11:12:32 +01001558 DataType::Float32,
1559 DataType::Float16,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001560 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001561 DataType::QAsymmU8,
1562 DataType::QSymmS16
Teresa Charlina3b20472019-06-06 11:12:32 +01001563 };
1564
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001565 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1566 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001567}
1568
Tamás Nyíri7b885b32021-10-26 14:47:57 +01001569void Pooling3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1570{
1571 const std::string descriptorName{"Pooling3dQueueDescriptor"};
1572
1573 ValidateNumInputs(workloadInfo, descriptorName, 1);
1574 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1575
1576 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1577 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1578
1579 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1580 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1581
1582 std::vector<DataType> supportedTypes =
1583 {
1584 DataType::BFloat16,
1585 DataType::Float32,
1586 DataType::Float16,
1587 DataType::QAsymmS8,
1588 DataType::QAsymmU8,
1589 DataType::QSymmS16
1590 };
1591
1592 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1593 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1594}
1595
Teresa Charlin970f43b2019-07-01 13:51:07 +01001596void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1597{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001598 const std::string descriptorName{"ResizeQueueDescriptor"};
Teresa Charlin970f43b2019-07-01 13:51:07 +01001599
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001600 ValidateNumInputs(workloadInfo, descriptorName, 1);
1601 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1602
1603 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1604 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1605
1606 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1607 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Teresa Charlin970f43b2019-07-01 13:51:07 +01001608
1609 std::vector<DataType> supportedTypes =
1610 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001611 DataType::BFloat16,
Teresa Charlin970f43b2019-07-01 13:51:07 +01001612 DataType::Float16,
1613 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00001614 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001615 DataType::QAsymmU8,
1616 DataType::QSymmS16
Teresa Charlin970f43b2019-07-01 13:51:07 +01001617 };
1618
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001619 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1620 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Teresa Charlin970f43b2019-07-01 13:51:07 +01001621
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001622 // Resize only changes width and height: batch and channel count must match.
1623 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1624 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001625 if (inputBatchSize != outputBatchSize)
1626 {
1627 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001628 fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1629 descriptorName, inputBatchSize, outputBatchSize));
Teresa Charlin970f43b2019-07-01 13:51:07 +01001630 }
1631
1632 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001633 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1634 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001635 if (inputChannelCount != outputChannelCount)
1636 {
1637 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001638 fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1639 descriptorName, inputChannelCount, outputChannelCount));
telsoa014fcda012018-03-09 14:13:49 +00001640 }
1641}
1642
1643void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1644{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001645 const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001646
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001647 ValidateNumInputs(workloadInfo, descriptorName, 1);
1648 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1649
1650 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1651 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1652
1653 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1654 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1655
1656 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1657
telsoa014fcda012018-03-09 14:13:49 +00001658 if (m_Parameters.m_Min > m_Parameters.m_Max)
1659 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001660 throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
telsoa014fcda012018-03-09 14:13:49 +00001661 }
telsoa014fcda012018-03-09 14:13:49 +00001662}
1663
Kevin Mayce5045a2019-10-02 14:07:47 +01001664void InstanceNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1665{
1666 const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1667
1668 ValidateNumInputs(workloadInfo, descriptorName, 1);
1669 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1670
1671 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1672 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1673
1674 if (inputTensorInfo.GetNumDimensions() > 4)
1675 {
1676 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1677 }
1678
1679 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1680
1681 // Check the supported data types
1682 std::vector<DataType> supportedTypes =
1683 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001684 DataType::BFloat16,
Kevin Mayce5045a2019-10-02 14:07:47 +01001685 DataType::Float32,
1686 DataType::Float16
1687 };
1688
1689 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Kevin Mayce5045a2019-10-02 14:07:47 +01001690 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Kevin Mayce5045a2019-10-02 14:07:47 +01001691}
1692
telsoa014fcda012018-03-09 14:13:49 +00001693void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1694{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001695 const std::string descriptorName{"L2NormalizationQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001696
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001697 ValidateNumInputs(workloadInfo, descriptorName, 1);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001698 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1699
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001700 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1701 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1702
Matthew Jackson82b15ed2019-07-25 16:14:30 +01001703 if (inputTensorInfo.GetNumDimensions() > 4)
1704 {
1705 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1706 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001707
1708 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001709
1710 // Check the supported data types
1711 std::vector<DataType> supportedTypes =
1712 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001713 DataType::BFloat16,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001714 DataType::Float32,
1715 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001716 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001717 DataType::QAsymmU8,
1718 DataType::QSymmS16
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001719 };
1720
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001721 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001722 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1723}
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001724
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001725void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1726{
1727 const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1728
1729 ValidateNumInputs(workloadInfo, descriptorName, 1);
1730 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1731
1732 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1733 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1734
1735 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1736
1737 std::vector<DataType> supportedTypes =
1738 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001739 DataType::BFloat16,
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001740 DataType::Float32,
1741 DataType::Float16,
1742 };
1743
1744 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001745 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001746}
1747
1748void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1749{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001750 const std::string descriptorName{"ConstantQueueDescriptor"};
1751
1752 ValidateNumInputs(workloadInfo, descriptorName, 0);
1753 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001754
1755 if (!m_LayerOutput)
1756 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001757 throw InvalidArgumentException(descriptorName + ": No const input specified.");
telsoa014fcda012018-03-09 14:13:49 +00001758 }
1759
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001760 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1761 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +01001762
1763 // Check the supported data types
1764 std::vector<DataType> supportedTypes =
Nina Drozd2f2778f2019-05-27 10:37:05 +01001765 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001766 DataType::BFloat16,
Nina Drozd2f2778f2019-05-27 10:37:05 +01001767 DataType::Float32,
1768 DataType::Float16,
Keith Davis67e6c542020-02-19 10:08:33 +00001769 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001770 DataType::QAsymmU8,
Keith Davis5204aa82020-01-27 15:24:59 +00001771 DataType::QSymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001772 DataType::QSymmS16,
1773 DataType::Signed32
Nina Drozd2f2778f2019-05-27 10:37:05 +01001774 };
Nina Drozd58ef2c62019-05-16 12:09:18 +01001775
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001776 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001777}
1778
1779void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1780{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001781 const std::string descriptorName{"ReshapeQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001782
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001783 ValidateNumInputs(workloadInfo, descriptorName, 1);
1784 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1785
1786 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1787 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1788
1789 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Nina Drozd2f2778f2019-05-27 10:37:05 +01001790
1791 // Check the supported data types
1792 std::vector<DataType> supportedTypes =
1793 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001794 DataType::BFloat16,
Nina Drozd2f2778f2019-05-27 10:37:05 +01001795 DataType::Float32,
1796 DataType::Float16,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001797 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001798 DataType::QAsymmU8,
1799 DataType::QSymmS16,
Narumol Prangnawarat0c95f4c2020-11-18 16:52:07 +00001800 DataType::Signed32,
1801 DataType::Boolean
Nina Drozd2f2778f2019-05-27 10:37:05 +01001802 };
1803
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001804 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1805 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001806}
1807
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001808void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1809{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001810 const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001811
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001812 ValidateNumInputs(workloadInfo, descriptorName, 1);
1813 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1814
1815 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1816 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1817
1818 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1819 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001820
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001821 if (m_Parameters.m_BlockShape.size() != 2)
1822 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001823 throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001824 }
1825
1826 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1827 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001828 throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1829 "dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001830 }
1831
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001832 const TensorShape& inputShape = inputTensorInfo.GetShape();
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001833
1834 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001835 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001836
Matthew Bentham8800c002018-11-19 13:19:28 +00001837 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001838
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001839 const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1840 widthPad.first + widthPad.second;
1841 const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1842 heightPad.first + heightPad.second;
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001843
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001844 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1845 inputShape[dimensionIndices.GetChannelsIndex()];
1846 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001847
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001848 if (numOutputElements != numInputElements)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001849 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001850 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001851 to_string(numInputElements) + " after padding but output tensor has " +
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001852 to_string(numOutputElements) + " elements.");
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001853 }
1854
1855 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001856 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001857 throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1858 "divisible by Block Shape in all spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001859 }
nikraj01120522a2019-05-31 11:33:07 +01001860
1861 std::vector<DataType> supportedTypes =
1862 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001863 DataType::BFloat16,
1864 DataType::Float16,
1865 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001866 DataType::QAsymmS8,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001867 DataType::QAsymmU8,
1868 DataType::QSymmS16
nikraj01120522a2019-05-31 11:33:07 +01001869 };
1870
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001871 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1872 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001873}
1874
Keith Davisa57eccb2019-06-14 17:33:22 +01001875void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1876{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001877 const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
Keith Davisa57eccb2019-06-14 17:33:22 +01001878
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001879 ValidateNumInputs(workloadInfo, descriptorName, 1);
1880 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Keith Davisa57eccb2019-06-14 17:33:22 +01001881
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001882 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1883 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1884
1885 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1886 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Keith Davisa57eccb2019-06-14 17:33:22 +01001887
1888 std::vector<DataType> supportedTypes =
1889 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001890 DataType::BFloat16,
Keith Davisa57eccb2019-06-14 17:33:22 +01001891 DataType::Float32,
1892 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001893 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001894 DataType::QAsymmU8,
1895 DataType::QSymmS16
Keith Davisa57eccb2019-06-14 17:33:22 +01001896 };
1897
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001898 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1899 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Keith Davisa57eccb2019-06-14 17:33:22 +01001900
Aron Virginas-Tar8a1b2182019-09-19 14:39:37 +01001901 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1902
1903 if (m_Parameters.m_BlockSize == 0)
1904 {
1905 throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1906 }
1907
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001908 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1909 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1910 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1911 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
Keith Davisa57eccb2019-06-14 17:33:22 +01001912
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001913 const TensorShape& inputShape = inputTensorInfo.GetShape();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001914 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
Keith Davisa57eccb2019-06-14 17:33:22 +01001915 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001916 throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1917 "by block size in all spatial dimensions");
Keith Davisa57eccb2019-06-14 17:33:22 +01001918 }
Aron Virginas-Tar8a1b2182019-09-19 14:39:37 +01001919
1920 const TensorShape& outputShape = outputTensorInfo.GetShape();
1921 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1922 {
1923 throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1924 "must be divisible by the square of block size." );
1925 }
Keith Davisa57eccb2019-06-14 17:33:22 +01001926}
1927
telsoa014fcda012018-03-09 14:13:49 +00001928void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1929{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001930 const std::string descriptorName{"FloorQueueDescriptor"};
James Conroy83735b12019-05-30 16:36:59 +01001931
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001932 ValidateNumInputs(workloadInfo, descriptorName, 1);
1933 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1934
1935 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1936 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
James Conroy83735b12019-05-30 16:36:59 +01001937
1938 std::vector<DataType> supportedTypes =
1939 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001940 DataType::BFloat16,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001941 DataType::Float32,
Matthew Jackson9bff1442019-09-12 09:08:23 +01001942 DataType::Float16,
Teresa Charlin3a3a6bf2022-05-05 15:26:27 +01001943 DataType::QSymmS16
James Conroy83735b12019-05-30 16:36:59 +01001944 };
1945
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001946 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Matthew Sloyan81beae32021-07-13 19:46:11 +01001947 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1948 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1949 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001950}
1951
telsoa01c577f2c2018-08-31 09:22:23 +01001952void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1953{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001954 // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1955
1956 const std::string descriptorName{"LstmQueueDescriptor"};
1957
1958 // check dimensions of all inputs and outputs
1959 if (workloadInfo.m_InputTensorInfos.size() != 3)
1960 {
1961 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1962 }
1963 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1964 {
1965 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1966 }
1967
1968 std::vector<DataType> supportedTypes =
1969 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001970 DataType::BFloat16,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001971 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001972 DataType::Float32,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001973 DataType::QSymmS16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001974 };
1975
Jan Eilers38e05bd2019-06-26 13:10:09 +01001976 // check for supported type of one input and match them with all the other input and output
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001977 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1978
Jan Eilers38e05bd2019-06-26 13:10:09 +01001979 // type matches all other inputs
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001980 for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Jan Eilers38e05bd2019-06-26 13:10:09 +01001981 {
1982 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1983 workloadInfo.m_InputTensorInfos[i],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001984 descriptorName,
1985 "input_0",
1986 "input_" + std::to_string(i));
Jan Eilers38e05bd2019-06-26 13:10:09 +01001987 }
1988 // type matches all other outputs
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001989 for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
Jan Eilers38e05bd2019-06-26 13:10:09 +01001990 {
1991 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1992 workloadInfo.m_OutputTensorInfos[i],
1993 "LstmQueueDescriptor",
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001994 "input_0",
1995 "output_" + std::to_string(i));
Jan Eilers38e05bd2019-06-26 13:10:09 +01001996 }
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001997
janeil0117d8d852019-11-15 15:00:16 +00001998 // Making sure clipping parameters have valid values.
1999 // == 0 means no clipping
2000 // > 0 means clipping
2001 if (m_Parameters.m_ClippingThresCell < 0.0f)
2002 {
2003 throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
2004 }
2005 if (m_Parameters.m_ClippingThresProj < 0.0f)
2006 {
2007 throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
2008 }
2009
Jan Eilers38e05bd2019-06-26 13:10:09 +01002010 // Inferring batch size, number of outputs and number of cells from the inputs.
Jan Eilers38e05bd2019-06-26 13:10:09 +01002011 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
2012 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
2013 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
2014 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
2015 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
2016 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
2017
Jan Eilers38e05bd2019-06-26 13:10:09 +01002018 // input tensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002019 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
2020 descriptorName + " input_0");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002021 // outputStateInTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002022 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
2023 descriptorName + " input_1");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002024 // outputStateInTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002025 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
2026 descriptorName + " input_2");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002027 // scratchBufferTensor
2028 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002029 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
2030 descriptorName + " output_0");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002031 // outputStateOutTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002032 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
2033 descriptorName + " output_1");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002034 // cellStateOutTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002035 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
2036 descriptorName + " output_2");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002037 // outputTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002038 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
2039 descriptorName + " output_3");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002040
Jan Eilers38e05bd2019-06-26 13:10:09 +01002041 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
2042 if ( m_InputToInputWeights )
2043 {
2044 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
2045 (n_cell * n_input), "InputLayerNormWeights");
2046 }
2047
2048 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
2049 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
2050 (n_cell * n_input), "InputToForgetWeights");
2051
2052 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
2053 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2054 (n_cell * n_input), "InputToCellWeights");
2055
2056 if ( m_RecurrentToInputWeights )
2057 {
2058 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2059 (n_cell * n_output), "RecurrentToInputWeights");
2060 }
2061
2062 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
2063 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2064 (n_cell * n_output), "RecurrentToForgetWeights");
2065
2066 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
2067 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2068 (n_cell * n_output), "RecurrentToCellWeights");
2069
2070 // Make sure the input-gate's parameters are either both present (regular
2071 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
2072 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2073 !m_Parameters.m_CifgEnabled) ||
2074 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2075 m_Parameters.m_CifgEnabled));
2076 if (!cifg_weights_all_or_none)
2077 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002078 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
2079 "RecurrentToInputWeights must either both be present (regular LSTM) "
2080 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
2081 "accordingly.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002082 }
2083
2084 if ( m_CellToInputWeights )
2085 {
2086 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2087 n_cell, "CellToInputWeights");
2088 }
2089 if ( m_CellToForgetWeights )
2090 {
2091 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2092 n_cell, "CellToForgetWeights");
2093 }
2094 if ( m_CellToOutputWeights )
2095 {
2096 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2097 n_cell, "CellToOutputWeights");
2098 }
2099
2100 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
2101 bool peephole_weights_all_or_none =
2102 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2103 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2104 || ( !m_CellToInputWeights && !m_CellToForgetWeights
2105 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2106 if (!peephole_weights_all_or_none)
2107 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002108 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002109 }
2110
2111 // Make sure the input gate bias is present only when not a CIFG-LSTM.
2112 if (m_Parameters.m_CifgEnabled)
2113 {
2114 if (m_InputGateBias)
2115 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002116 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002117 }
2118 }
2119 else
2120 {
2121 if (!m_InputGateBias)
2122 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002123 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2124 "must be present.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002125 }
2126 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2127 n_cell, "InputGateBias");
2128 }
2129
2130 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2131 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2132
2133 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2134 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2135
2136 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2137 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2138
2139 if (m_ProjectionWeights)
2140 {
2141 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2142 (n_cell * n_output), "ProjectionWeights");
2143 }
2144 if (m_ProjectionBias)
2145 {
2146 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2147 }
2148
2149 // Making sure the projection tensors are consistent:
2150 // 1) If projection weight is not present, then projection bias should not be
2151 // present.
2152 // 2) If projection weight is present, then projection bias is optional.
2153 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2154 !m_Parameters.m_ProjectionEnabled)
2155 || (m_ProjectionWeights && !m_ProjectionBias &&
2156 m_Parameters.m_ProjectionEnabled)
2157 || (m_ProjectionWeights && m_ProjectionBias &&
2158 m_Parameters.m_ProjectionEnabled));
2159 if (!projecton_tensors_consistent)
2160 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002161 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002162 }
2163
2164 // The four layer normalization weights either all have values or none of them have values. Additionally, if
2165 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2166 // either all have values or none of them have values. Layer normalization is used when the values of all the
2167 // layer normalization weights are present
2168 if (m_InputLayerNormWeights)
2169 {
2170 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2171 }
2172 if (m_ForgetLayerNormWeights)
2173 {
2174 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2175 }
2176 if (m_CellLayerNormWeights)
2177 {
2178 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2179 }
2180 if (m_OutputLayerNormWeights)
2181 {
2182 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2183 }
2184
Jan Eilers38e05bd2019-06-26 13:10:09 +01002185 if (m_Parameters.m_LayerNormEnabled)
2186 {
2187 if (!m_Parameters.m_CifgEnabled)
2188 {
2189 if (!m_InputLayerNormWeights)
2190 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002191 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2192 "disabled but InputLayerNormWeights are not present");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002193 }
2194 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2195 1, n_cell, "InputLayerNormWeights");
2196 }
2197 else if (m_InputLayerNormWeights)
2198 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002199 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2200 "enabled");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002201 }
2202
2203 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2204 "ForgetLayerNormWeights");
2205 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2206
2207 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2208 "OutputLayerNormWeights");
2209 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2210
2211 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2212 "CellLayerNormWeights");
2213 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2214 }
2215 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2216 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002217 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2218 "normalisation weights are present.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002219 }
telsoa01c577f2c2018-08-31 09:22:23 +01002220}
2221
2222void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2223{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002224 const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
telsoa01c577f2c2018-08-31 09:22:23 +01002225
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002226 ValidateNumInputs(workloadInfo, descriptorName, 1);
2227 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2228
2229 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2230 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2231
2232 if (inputTensorInfo.GetDataType() != DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +01002233 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002234 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
telsoa01c577f2c2018-08-31 09:22:23 +01002235 }
2236
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002237 if (outputTensorInfo.GetDataType() != DataType::Float16)
telsoa01c577f2c2018-08-31 09:22:23 +01002238 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002239 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
telsoa01c577f2c2018-08-31 09:22:23 +01002240 }
2241
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002242 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa01c577f2c2018-08-31 09:22:23 +01002243}
2244
2245void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2246{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002247 const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
telsoa01c577f2c2018-08-31 09:22:23 +01002248
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002249 ValidateNumInputs(workloadInfo, descriptorName, 1);
2250 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2251
2252 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2253 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2254
2255 if (inputTensorInfo.GetDataType() != DataType::Float16)
telsoa01c577f2c2018-08-31 09:22:23 +01002256 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002257 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
telsoa01c577f2c2018-08-31 09:22:23 +01002258 }
2259
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002260 if (outputTensorInfo.GetDataType() != DataType::Float32)
2261 {
2262 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2263 }
2264
2265 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa01c577f2c2018-08-31 09:22:23 +01002266}
2267
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002268void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2269{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002270 const std::string descriptorName{"DivisionQueueDescriptor"};
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002271
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002272 ValidateNumInputs(workloadInfo, descriptorName, 2);
2273 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2274
2275 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2276 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2277 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2278
2279 std::vector<DataType> supportedTypes =
2280 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002281 DataType::BFloat16,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002282 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002283 DataType::Float32,
2284 DataType::QAsymmS8,
2285 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01002286 DataType::QSymmS16,
2287 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002288 };
2289
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002290 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2291 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2292 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002293
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002294 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2295 inputTensorInfo1,
2296 outputTensorInfo,
2297 descriptorName,
2298 "input_0",
2299 "input_1");
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002300}
2301
David Beckc2044fe2018-09-05 15:00:38 +01002302void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2303{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002304 const std::string descriptorName{"SubtractionQueueDescriptor"};
David Beckc2044fe2018-09-05 15:00:38 +01002305
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002306 ValidateNumInputs(workloadInfo, descriptorName, 2);
2307 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2308
2309 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2310 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2311 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2312
2313 std::vector<DataType> supportedTypes =
2314 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002315 DataType::BFloat16,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002316 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002317 DataType::Float32,
2318 DataType::QAsymmS8,
2319 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01002320 DataType::QSymmS16,
2321 DataType::Signed32,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002322 };
2323
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002324 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2325 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2326 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002327
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002328 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2329 inputTensorInfo1,
2330 outputTensorInfo,
2331 descriptorName,
2332 "input_0",
2333 "input_1");
David Beckc2044fe2018-09-05 15:00:38 +01002334}
2335
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002336void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2337{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002338 const std::string descriptorName{"MaximumQueueDescriptor"};
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002339
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002340 ValidateNumInputs(workloadInfo, descriptorName, 2);
2341 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2342
2343 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2344 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2345 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2346
2347 std::vector<DataType> supportedTypes =
2348 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002349 DataType::BFloat16,
Mike Kelly1da02362019-08-01 08:43:57 +01002350 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002351 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00002352 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002353 DataType::QAsymmU8,
Sadik Armagan303980c2020-04-17 12:45:14 +01002354 DataType::QSymmS16,
2355 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002356 };
2357
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002358 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2359 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2360 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002361
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002362 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2363 inputTensorInfo1,
2364 outputTensorInfo,
2365 descriptorName,
2366 "input_0",
2367 "input_1");
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002368}
2369
narpra01a6bf9122018-09-10 09:50:09 +01002370void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2371{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002372 const std::string descriptorName{"MeanQueueDescriptor"};
James Conroy4d1ff582019-06-10 17:06:39 +01002373
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002374 ValidateNumInputs(workloadInfo, descriptorName, 1);
2375 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2376
2377 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2378 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
James Conroy4d1ff582019-06-10 17:06:39 +01002379
2380 std::vector<DataType> supportedTypes =
2381 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002382 DataType::BFloat16,
James Conroy4d1ff582019-06-10 17:06:39 +01002383 DataType::Float32,
2384 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002385 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002386 DataType::QAsymmU8,
2387 DataType::QSymmS16
James Conroy4d1ff582019-06-10 17:06:39 +01002388 };
narpra01eb061912018-09-10 17:35:27 +01002389
James Conroy4d1ff582019-06-10 17:06:39 +01002390 // First check if input tensor data type is supported, then
2391 // check if this data type matches the output tensor data type
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002392 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2393 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
James Conroy4d1ff582019-06-10 17:06:39 +01002394
narpra0132b90462018-09-13 11:07:48 +01002395 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01002396 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002397 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
narpra01eb061912018-09-10 17:35:27 +01002398 }
narpra0132b90462018-09-13 11:07:48 +01002399 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01002400 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002401 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
narpra01eb061912018-09-10 17:35:27 +01002402 }
2403 else
2404 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002405 unsigned int outputDim =
Matthew Sloyan171214c2020-09-09 09:07:37 +01002406 inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002407 ValidateTensorNumDimensions(outputTensorInfo,
2408 descriptorName,
narpra01eb061912018-09-10 17:35:27 +01002409 outputDim > 0 ? outputDim : 1,
2410 "output");
2411 }
narpra01a6bf9122018-09-10 09:50:09 +01002412}
2413
jimfly012c9322a2018-09-19 10:59:49 +01002414void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2415{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002416 const std::string descriptorName{"PadQueueDescriptor"};
jimfly012c9322a2018-09-19 10:59:49 +01002417
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002418 ValidateNumInputs(workloadInfo, descriptorName, 1);
2419 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2420
2421 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2422 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01002423
jimfly012c9322a2018-09-19 10:59:49 +01002424 // input and output should have the same number of dimensions
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002425 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2426
jimfly012c9322a2018-09-19 10:59:49 +01002427 // there should be entry in the pad list for each dimension in the input tensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002428 if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2429 throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2430 "as there are dimensions in the input tensor that is " +
2431 std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2432 " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
jimfly012c9322a2018-09-19 10:59:49 +01002433 }
2434}
2435
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002436void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2437{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002438 const std::string descriptorName{"QuantizeQueueDescriptor"};
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002439
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002440 ValidateNumInputs(workloadInfo, descriptorName, 1);
2441 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002442
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002443 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2444 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2445
Sadik Armagan2208b602019-07-31 16:36:27 +01002446 std::vector<DataType> supportedTypes =
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002447 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002448 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002449 DataType::Float32,
Keith Davis5e51cd82020-01-29 16:52:59 +00002450 DataType::Float16,
2451 DataType::QSymmS8,
Ryan OShea9add1202020-02-07 10:06:33 +00002452 DataType::QAsymmS8,
Keith Davis5e51cd82020-01-29 16:52:59 +00002453 DataType::QAsymmU8,
2454 DataType::QSymmS16
Sadik Armagan2208b602019-07-31 16:36:27 +01002455 };
2456
2457 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002458
Keith Davis0c2eeac2020-02-11 16:51:50 +00002459 if (!IsQuantizedType(outputTensorInfo.GetDataType()))
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002460 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002461 throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002462 }
2463}
2464
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00002465void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2466{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002467 const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002468
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002469 ValidateNumInputs(workloadInfo, descriptorName, 1);
2470 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002471
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002472 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2473 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002474
2475 std::vector<DataType> supportedTypes =
2476 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002477 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002478 DataType::Float32,
2479 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002480 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002481 DataType::QAsymmU8,
2482 DataType::QSymmS16
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002483 };
2484
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002485 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2486 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00002487}
2488
Conor Kennedy430b5d82018-11-14 15:28:28 +00002489void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2490{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002491 const std::string descriptorName{"StridedSliceQueueDescriptor"};
Conor Kennedy430b5d82018-11-14 15:28:28 +00002492
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002493 ValidateNumInputs(workloadInfo, descriptorName, 1);
2494 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2495
2496 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2497 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002498
2499 std::vector<DataType> supportedTypes =
2500 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002501 DataType::BFloat16,
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002502 DataType::Float16,
2503 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002504 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002505 DataType::QAsymmU8,
2506 DataType::QSymmS16
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002507 };
2508
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002509 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2510 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002511
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002512 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002513
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002514 const uint32_t rank = inputTensorInfo.GetNumDimensions();
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00002515 if (rank > 4)
2516 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002517 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00002518 }
2519
Conor Kennedy430b5d82018-11-14 15:28:28 +00002520 // Begin, End & Stride length must be of rank(input0)
2521 if (m_Parameters.m_Begin.size() != rank)
2522 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002523 throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002524 }
2525
2526 if (m_Parameters.m_End.size() != rank)
2527 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002528 throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002529 }
2530
2531 if (m_Parameters.m_Stride.size() != rank)
2532 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002533 throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002534 }
2535
2536 // Stride entries must be non-zero
2537 for (auto& stride : m_Parameters.m_Stride)
2538 {
2539 if (stride == 0)
2540 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002541 throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
Conor Kennedy430b5d82018-11-14 15:28:28 +00002542 }
2543 }
2544}
2545
kevmay0190539692018-11-29 08:40:19 +00002546void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2547{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002548 const std::string descriptorName{"MinimumQueueDescriptor"};
kevmay0190539692018-11-29 08:40:19 +00002549
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002550 ValidateNumInputs(workloadInfo, descriptorName, 2);
2551 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2552
2553 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2554 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2555 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2556
2557 std::vector<DataType> supportedTypes =
2558 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002559 DataType::BFloat16,
Mike Kelly1da02362019-08-01 08:43:57 +01002560 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002561 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002562 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002563 DataType::QAsymmU8,
Sadik Armagan303980c2020-04-17 12:45:14 +01002564 DataType::QSymmS16,
2565 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002566 };
2567
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002568 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2569 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2570 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002571
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002572 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2573 inputTensorInfo1,
2574 outputTensorInfo,
2575 descriptorName,
2576 "input_0",
2577 "input_1");
kevmay0190539692018-11-29 08:40:19 +00002578}
2579
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00002580void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2581{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002582 const std::string descriptorName{"DebugQueueDescriptor"};
2583
2584 ValidateNumInputs(workloadInfo, descriptorName, 1);
2585 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00002586}
2587
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002588void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2589{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002590 const std::string descriptorName{"EqualQueueDescriptor"};
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002591
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002592 ValidateNumInputs(workloadInfo, descriptorName, 2);
2593 ValidateNumOutputs(workloadInfo, descriptorName, 1);
kevmay012b4d88e2019-01-24 14:05:09 +00002594
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002595 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2596 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2597 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2598
2599 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2600 inputTensorInfo1,
2601 outputTensorInfo,
2602 descriptorName,
2603 "input_0",
2604 "input_1");
2605
2606 if (outputTensorInfo.GetDataType() != DataType::Boolean)
kevmay012b4d88e2019-01-24 14:05:09 +00002607 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002608 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
kevmay012b4d88e2019-01-24 14:05:09 +00002609 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002610}
2611
FrancisMurtagh878f0232018-12-19 10:56:15 +00002612void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2613{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002614 const std::string descriptorName{"GreaterQueueDescriptor"};
FrancisMurtagh878f0232018-12-19 10:56:15 +00002615
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002616 ValidateNumInputs(workloadInfo, descriptorName, 2);
2617 ValidateNumOutputs(workloadInfo, descriptorName, 1);
kevmay012b4d88e2019-01-24 14:05:09 +00002618
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002619 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2620 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2621 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2622
2623 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2624 inputTensorInfo1,
2625 outputTensorInfo,
2626 descriptorName,
2627 "input_0",
2628 "input_1");
2629
2630 if (outputTensorInfo.GetDataType() != DataType::Boolean)
kevmay012b4d88e2019-01-24 14:05:09 +00002631 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002632 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
kevmay012b4d88e2019-01-24 14:05:09 +00002633 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00002634}
2635
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002636void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2637{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002638 const std::string descriptorName{"RsqrtQueueDescriptor"};
2639
2640 ValidateNumInputs(workloadInfo, descriptorName, 1);
2641 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2642
2643 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2644 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2645
2646 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
nikraj010421e7f2019-06-14 09:40:34 +01002647
2648 std::vector<DataType> supportedTypes =
2649 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002650 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002651 DataType::Float16,
2652 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002653 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002654 DataType::QAsymmU8,
2655 DataType::QSymmS16
nikraj010421e7f2019-06-14 09:40:34 +01002656 };
2657
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002658 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2659 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002660}
2661
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01002662void GatherNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2663{
2664 const std::string descriptorName{"GatherNdQueueDescriptor"};
2665
2666 ValidateNumInputs(workloadInfo, descriptorName, 2);
2667 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2668
2669 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2670 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2671 {
2672 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2673 }
2674
2675 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2676 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2677
2678 std::vector<DataType> supportedTypes =
2679 {
2680 DataType::BFloat16,
2681 DataType::Float16,
2682 DataType::Float32,
2683 DataType::QAsymmS8,
2684 DataType::QAsymmU8,
2685 DataType::QSymmS16,
2686 DataType::Signed32,
2687 };
2688
2689 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2690
2691 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2692
2693 unsigned int outputDim = outputTensorInfo.GetNumDimensions();
2694 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2695}
2696
narpra01b89b05f2019-01-16 09:53:09 +00002697void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2698{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002699 const std::string descriptorName{"GatherQueueDescriptor"};
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002700
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002701 ValidateNumInputs(workloadInfo, descriptorName, 2);
2702 ValidateNumOutputs(workloadInfo, descriptorName, 1);
narpra014951d842019-01-18 16:53:53 +00002703
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002704 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2705 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
narpra014951d842019-01-18 16:53:53 +00002706 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002707 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
narpra014951d842019-01-18 16:53:53 +00002708 }
2709
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002710 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2711 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2712
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002713 std::vector<DataType> supportedTypes =
2714 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002715 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002716 DataType::Float16,
2717 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002718 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002719 DataType::QAsymmU8,
Teresa Charlin93492462020-05-29 13:08:59 +01002720 DataType::QSymmS16,
2721 DataType::Signed32,
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002722 };
2723
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002724 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002725
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002726 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002727
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002728 unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2729 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00002730}
2731
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002732void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2733{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002734 const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2735
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002736 ValidateNumInputs(workloadInfo, descriptorName, 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002737
2738 if (workloadInfo.m_OutputTensorInfos.size() != 4)
2739 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002740 throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002741 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2742 }
2743
2744 if (m_Anchors == nullptr)
2745 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002746 throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002747 }
2748
2749 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002750 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2751 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2752
2753 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002754 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002755 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2756 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002757
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002758 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2759 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2760 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002761
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002762 const std::vector<DataType> supportedInputTypes =
2763 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002764 DataType::BFloat16,
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002765 DataType::Float32,
Matthew Jackson9bff1442019-09-12 09:08:23 +01002766 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002767 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002768 DataType::QAsymmU8,
2769 DataType::QSymmS16
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002770 };
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002771
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002772 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2773 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2774 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2775
2776 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2777 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2778 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2779 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2780
2781 // NOTE: Output is always Float32 regardless of input type
2782 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2783 ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2784 ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2785 ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002786
2787 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2788 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002789 throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002790 "must be positive and less than or equal to 1.");
2791 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002792
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002793 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2794 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002795 throw InvalidArgumentException(descriptorName + ": Number of classes with background "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002796 "should be equal to number of classes + 1.");
2797 }
2798}
2799
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002800void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2801{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002802 const std::string& descriptorName{"DequantizeQueueDescriptor"};
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002803
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002804 ValidateNumInputs(workloadInfo, descriptorName, 1);
2805 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2806
2807 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2808 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2809
Teresa Charlin07307f32022-05-15 14:07:05 +01002810 std::vector<DataType> inputSupportedTypes =
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002811 {
Teresa Charlin07307f32022-05-15 14:07:05 +01002812 DataType::QAsymmS8,
2813 DataType::QAsymmU8,
2814 DataType::QSymmS8,
2815 DataType::QSymmS16,
2816 DataType::Float16
2817 };
2818 ValidateDataTypes(inputTensorInfo, inputSupportedTypes, descriptorName);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002819
Teresa Charlin07307f32022-05-15 14:07:05 +01002820 std::vector<DataType> outputSupportedTypes =
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002821 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002822 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002823 DataType::Float32,
2824 DataType::Float16
Sadik Armagan2208b602019-07-31 16:36:27 +01002825 };
2826
Teresa Charlin07307f32022-05-15 14:07:05 +01002827 ValidateDataTypes(outputTensorInfo, outputSupportedTypes, descriptorName);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002828}
2829
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002830void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2831{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002832 const std::string& descriptorName{"MergeQueueDescriptor"};
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002833
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002834 ValidateNumInputs(workloadInfo, descriptorName, 2);
2835 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002836
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002837 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2838 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2839 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002840
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002841 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2842 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2843
2844 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2845 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002846}
2847
Keith Davis3ae3f972021-05-21 16:33:48 +01002848void ShapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2849{
2850 const std::string& descriptorName{"ShapeQueueDescriptor"};
2851
2852 ValidateNumInputs(workloadInfo, descriptorName, 1);
2853 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2854
2855 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2856 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2857
2858 std::vector<DataType> supportedTypes =
2859 {
2860 DataType::BFloat16,
2861 DataType::Float16,
2862 DataType::Float32,
2863 DataType::QAsymmS8,
2864 DataType::QAsymmU8,
2865 DataType::QAsymmS8,
2866 DataType::QSymmS8,
2867 DataType::QSymmS16,
2868 DataType::Signed32
2869 };
2870
2871 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2872 ValidateDataTypes(outputTensorInfo, {DataType::Signed32}, descriptorName);
2873}
2874
Sadik Armaganeff363d2019-04-05 15:25:46 +01002875void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2876{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002877 const std::string& descriptorName{"SwitchQueueDescriptor"};
Sadik Armaganeff363d2019-04-05 15:25:46 +01002878
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002879 ValidateNumInputs(workloadInfo, descriptorName, 2);
2880 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2881
2882 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2883 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2884
2885 const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2886 const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2887
2888 std::vector<DataType> supportedTypes =
2889 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002890 DataType::BFloat16,
Sadik Armaganeff363d2019-04-05 15:25:46 +01002891 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002892 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002893 DataType::QAsymmU8,
2894 DataType::QSymmS16
Sadik Armaganeff363d2019-04-05 15:25:46 +01002895 };
2896
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002897 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2898 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
Sadik Armaganeff363d2019-04-05 15:25:46 +01002899
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002900 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2901 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
Sadik Armaganeff363d2019-04-05 15:25:46 +01002902
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002903 ValidateTensorShapesMatch(inputTensorInfo0,
2904 outputTensorInfo0,
2905 descriptorName,
2906 "input_0",
2907 "output_0");
Sadik Armaganeff363d2019-04-05 15:25:46 +01002908
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002909 ValidateTensorShapesMatch(inputTensorInfo0,
2910 outputTensorInfo1,
2911 descriptorName,
2912 "input_0",
2913 "output_1");
Sadik Armaganeff363d2019-04-05 15:25:46 +01002914}
2915
Derek Lamberti901ea112019-12-10 22:07:09 +00002916void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
Matteo Martincigh49124022019-01-11 13:25:59 +00002917{
2918 // This is internally generated so it should not need validation.
2919}
2920
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002921void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2922{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002923 const std::string& descriptorName{"PreluQueueDescriptor"};
2924
2925 ValidateNumInputs(workloadInfo, descriptorName, 2);
2926 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2927
2928 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2929 const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2930 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002931
2932 std::vector<DataType> supportedTypes
2933 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002934 DataType::BFloat16,
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002935 DataType::Float16,
2936 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002937 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002938 DataType::QAsymmU8,
2939 DataType::QSymmS16
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002940 };
2941
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002942 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2943 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002944
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002945 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002946
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002947 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2948 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002949
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002950 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2951 alphaTensorInfo,
2952 outputTensorInfo,
2953 descriptorName,
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002954 "input",
2955 "alpha");
2956}
2957
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002958void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2959{
2960 const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2961
2962 ValidateNumInputs(workloadInfo, descriptorName, 1);
2963 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2964
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002965 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2966 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2967
2968 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2969 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002970
2971 ValidatePointer(m_Weight, descriptorName, "weight");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002972
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002973 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2974 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002975
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00002976 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2977
2978 Optional<TensorInfo> optionalBiasTensorInfo;
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002979 if (m_Parameters.m_BiasEnabled)
2980 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002981 ValidatePointer(m_Bias, descriptorName, "bias");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002982
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00002983 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2984 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002985
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00002986 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002987 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01002988 }
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00002989
2990 ValidatePerAxisQuantization(inputTensorInfo,
2991 outputTensorInfo,
2992 weightTensorInfo,
2993 optionalBiasTensorInfo,
2994 descriptorName);
2995
2996 std::vector<DataType> supportedTypes =
2997 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002998 DataType::BFloat16,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00002999 DataType::Float32,
3000 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01003001 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003002 DataType::QAsymmU8,
3003 DataType::QSymmS16
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003004 };
3005
3006 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3007 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003008}
3009
Mike Kellyc9ea45a2020-02-28 18:11:58 +00003010void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3011{
3012 const std::string descriptorName{"TransposeQueueDescriptor"};
3013
3014 ValidateNumInputs(workloadInfo, descriptorName, 1);
3015 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3016
3017 const PermutationVector& mapping = m_Parameters.m_DimMappings;
3018
3019 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3020 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3021
3022 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
3023 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
3024
3025 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
3026 {
3027 if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
3028 {
3029 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
3030 " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
3031 "must match dst dimension " + to_string(i) +
3032 " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
3033 }
3034 }
3035
3036 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3037}
3038
Simon Obute51f67772021-09-03 15:50:13 +01003039void ChannelShuffleQueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
3040{
3041 const std::string descriptorName{"TransposeQueueDescriptor"};
3042
3043 ValidateNumInputs(workloadInfo, descriptorName, 1);
3044 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3045
3046 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3047 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3048
3049 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3050}
3051
James Conroy4f1f8992020-04-29 20:01:10 +01003052void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3053{
3054 const std::string descriptorName{"QLstmQueueDescriptor"};
3055
3056 // Validate number of inputs/outputs
3057 ValidateNumInputs(workloadInfo, descriptorName, 3);
3058 ValidateNumOutputs(workloadInfo, descriptorName, 3);
3059
3060 // Input/output tensor info
3061 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3062 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
3063 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
3064
3065 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3066 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3067 auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
3068
3069 // Supported types for various tensors in QLSTM
3070 std::vector<DataType> inputOutputSupportedTypes =
3071 {
3072 DataType::QAsymmS8
3073 };
3074
3075 std::vector<DataType> cellStateSupportedTypes =
3076 {
3077 DataType::QSymmS16
3078 };
3079
3080 std::vector<DataType> weightsSupportedTypes =
3081 {
3082 DataType::QSymmS8
3083 };
3084
3085 std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
3086 {
3087 DataType::QSymmS16
3088 };
3089
3090 std::vector<DataType> biasSupportedTypes =
3091 {
3092 DataType::Signed32
3093 };
3094
3095 // Validate types of input/output tensors
3096 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3097 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3098 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3099
3100 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3101 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3102 ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3103
3104 // Validate matching types of input/output tensors
3105 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3106 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3107 "outputStateIn", "outputStateOut");
3108 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3109
3110 // Infer number of batches, number of units, input size and output size from tensor dimensions
3111 const uint32_t numBatches = inputInfo.GetShape()[0];
3112 const uint32_t inputSize = inputInfo.GetShape()[1];
3113 const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3114 const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3115
3116 // Validate number of dimensions and number of elements for input/output tensors
3117 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3118 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3119 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
3120
3121 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3122 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
3123 ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
3124
3125 // Validate number of dimensions and number of elements for MANDATORY weight tensors
3126 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3127 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3128 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
3129
3130 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3131 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3132 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
3133
3134 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3135 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3136 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
3137
3138 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3139 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3140 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3141 " RecurrentToForgetWeights");
3142
3143 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3144 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3145 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3146
3147 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3148 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3149 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3150
3151 // Validate data types for MANDATORY weights tensors (all should match each other)
3152 ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3153
3154 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3155 "inputToForgetWeights", "inputToCellWeights");
3156 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3157 "inputToForgetWeights", "inputToOutputWeights");
3158
3159 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3160 "inputToForgetWeights", "recurrentToForgeteights");
3161 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3162 "inputToForgetWeights", "recurrentToCellWeights");
3163 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3164 "inputToForgetWeights", "recurrentToOutputWeights");
3165
3166 // Validate number of dimensions and number of elements for MANDATORY bias tensors
3167 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3168 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3169 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3170
3171 ValidatePointer(m_CellBias, descriptorName, "CellBias");
3172 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3173 ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3174
3175 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3176 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3177 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3178
3179 // Validate data types for MANDATORY bias tensors
3180 ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3181
3182 ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3183 "forgetGateBias", "cellBias");
3184 ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3185 "forgetGateBias", "outputGateBias");
3186
3187 // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3188 const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3189 !m_Parameters.m_CifgEnabled) ||
3190 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3191 !m_InputGateBias && m_Parameters.m_CifgEnabled));
3192
3193 if (!allCifgParamsPresentOrNot)
3194 {
3195 throw InvalidArgumentException(descriptorName +
3196 ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3197 "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3198 "set appropriately.");
3199 }
3200
3201 if (!m_Parameters.m_CifgEnabled)
3202 {
3203 // Validate number of dimensions and number of elements
3204 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3205 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3206
3207 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3208 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3209 " RecurrentToInputWeights");
3210
3211 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3212 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3213
3214 // Validate data types
3215 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3216 "inputToForgetWeights", "inputToInputWeights");
3217 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3218 "inputToForgetWeights", "recurrentToInputWeights");
3219 ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3220 "forgetGateBias", "inputGateBias");
3221 }
3222
3223 // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3224 bool allPeepholeWeightsPresentOrNot =
3225 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3226 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3227 || (!m_CellToInputWeights && !m_CellToForgetWeights
3228 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3229
3230 if (!allPeepholeWeightsPresentOrNot)
3231 {
3232 throw InvalidArgumentException(descriptorName +
3233 ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3234 "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3235 "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3236 "appropriately.");
3237 }
3238
3239 if (m_Parameters.m_PeepholeEnabled)
3240 {
3241 auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3242 ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3243 ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3244
3245 auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3246 ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3247 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3248 "cellToForgetWeight", "cellToOutputWeights");
3249
3250 if (!m_Parameters.m_CifgEnabled)
3251 {
3252 auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3253 ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3254 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3255 "cellToForgetWeights", "cellToInputWeights");
3256 }
3257 }
3258
3259 // Validate OPTIONAL params: Layer Norm Weights
3260 bool allLayerNormWeightsPresentOrNot =
3261 (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3262 && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3263 || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3264 && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3265
3266 if (!allLayerNormWeightsPresentOrNot)
3267 {
3268 throw InvalidArgumentException(descriptorName +
3269 ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3270 "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3271 "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3272 "only be present when Layer Norm is enabled and CIFG is disabled. "
3273 "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3274 }
3275
3276 if (m_Parameters.m_LayerNormEnabled)
3277 {
3278 auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3279 ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3280 ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3281
3282 auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3283 ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3284 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3285 "forgetLayerNormWeights", "cellLayerNormWeights");
3286
3287 auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3288 ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3289 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3290 "forgetLayerNormWeights", "outputLayerNormWeights");
3291
3292 if (!m_Parameters.m_CifgEnabled)
3293 {
3294 auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3295 ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3296 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3297 "forgetLayerNormWeights", "inputLayerNormWeights");
3298 }
3299 }
3300
3301 // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3302 bool correctProjectionTensorsPresent =
3303 ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3304 (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3305 (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3306
3307 if (!correctProjectionTensorsPresent)
3308 {
3309 throw InvalidArgumentException(descriptorName +
3310 ": If projection is enabled, ProjectionWeights should be present and "
3311 "ProjectionBias is optional. If projection is disabled, neither "
3312 "ProjectionWeights nor ProjectionBias should be present.");
3313 }
3314
3315 if (m_Parameters.m_ProjectionEnabled)
3316 {
3317 auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3318 ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3319 ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3320
3321 if (m_ProjectionBias)
3322 {
3323 auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
Sadik Armagand6f06492020-05-22 08:36:33 +01003324 ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
James Conroy4f1f8992020-04-29 20:01:10 +01003325 ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3326 }
3327
3328 }
3329 else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3330 outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3331 throw InvalidArgumentException(descriptorName +
3332 ": If projection is disabled, output quantization info (scale, offset) "
3333 "should match HiddenStateScale and HiddenStateZeroPoint.");
3334 }
3335
3336}
3337
James Conroy9c3cae82019-08-01 16:01:48 +01003338void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3339{
3340 const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3341
3342 // Validate number of inputs/outputs
3343 ValidateNumInputs(workloadInfo, descriptorName, 3);
3344 ValidateNumOutputs(workloadInfo, descriptorName, 2);
3345
3346 // Input/output tensor infos
3347 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3348 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3349 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3350
3351 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3352 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3353
3354 std::vector<DataType> inputOutputSupportedTypes =
3355 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003356 DataType::QAsymmU8
James Conroy9c3cae82019-08-01 16:01:48 +01003357 };
3358
3359 std::vector<DataType> cellStateSupportedTypes =
3360 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003361 DataType::QSymmS16
James Conroy9c3cae82019-08-01 16:01:48 +01003362 };
3363
3364 std::vector<DataType> weightsSupportedTypes =
3365 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003366 DataType::QAsymmU8
James Conroy9c3cae82019-08-01 16:01:48 +01003367 };
3368
3369 std::vector<DataType> biasSupportedTypes =
3370 {
3371 DataType::Signed32
3372 };
3373
3374 // Validate types of input/output tensors
3375 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3376 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3377 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3378
3379 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3380 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3381
3382 // Validate matching types of input/output tensors
3383 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3384 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3385 "outputStateIn", "outputStateOut");
3386 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3387
3388 // Validate matching quantization info for input/output tensors
3389 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3390 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3391 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003392
James Conroy9c3cae82019-08-01 16:01:48 +01003393 // Infer number of batches, input size and output size from tensor dimensions
3394 const uint32_t numBatches = inputInfo.GetShape()[0];
3395 const uint32_t inputSize = inputInfo.GetShape()[1];
3396 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3397
3398 // Validate number of dimensions and number of elements for input/output tensors
3399 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3400 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3401 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3402 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3403 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3404
3405 // Validate number of dimensions and number of elements for weights tensors
3406 ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3407 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3408 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3409
3410 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3411 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3412 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3413
3414 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3415 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3416 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3417
3418 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3419 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3420 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3421
3422 ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3423 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3424 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3425
3426 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3427 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3428 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3429 " RecurrentToForgetWeights");
3430
3431 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3432 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3433 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3434
3435 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3436 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3437 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3438
3439 // Validate data types for weights tensors (all should match each other)
3440 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3441
3442 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3443 "inputToInputWeights", "inputToForgetWeights");
3444 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3445 "inputToInputWeights", "inputToCellWeights");
3446 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3447 "inputToInputWeights", "inputToOutputWeights");
3448
3449 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3450 "inputToInputWeights", "recurrentToInputWeights");
3451 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3452 "inputToInputWeights", "recurrentToForgeteights");
3453 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3454 "inputToInputWeights", "recurrentToCellWeights");
3455 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3456 "inputToInputWeights", "recurrentToOutputWeights");
3457
3458 // Validate matching quantization info for weight tensors (all should match each other)
3459 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3460 descriptorName, "inputToInputWeights", "inputToForgetWeights");
3461 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3462 descriptorName, "inputToInputWeights", "inputToCellWeights");
3463 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3464 descriptorName, "inputToInputWeights", "inputToOutputWeights");
3465
3466 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3467 descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3468 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3469 descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3470 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3471 descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3472 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3473 descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3474
3475 // Validate number of dimensions and number of elements in bias tensors
3476 ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3477 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3478 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3479
3480 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3481 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3482 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3483
3484 ValidatePointer(m_CellBias, descriptorName, "CellBias");
3485 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3486 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3487
3488 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3489 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3490 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3491
3492 // Validate data types for bias tensors (all should match each other)
3493 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3494
3495 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3496 "inputGateBias", "forgetGateBias");
3497 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3498 "inputGateBias", "cellBias");
3499 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3500 "inputGateBias", "outputGateBias");
3501
3502 // Validate bias tensor quantization info
3503 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3504 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3505 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3506 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3507}
3508
Kevin May868eb142019-09-04 17:29:31 +01003509void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3510{
3511 const std::string descriptorName{"AbsQueueDescriptor"};
3512
3513 ValidateNumInputs(workloadInfo, descriptorName, 1);
3514 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3515
3516 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3517 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3518
3519 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3520
3521 std::vector<DataType> supportedTypes =
James Conroyd47a0642019-09-17 14:22:06 +01003522 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003523 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01003524 DataType::Float16,
3525 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01003526 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003527 DataType::QAsymmU8,
Kevin Mayec52c3a2020-04-24 09:42:31 +01003528 DataType::QSymmS16,
3529 DataType::Signed32
James Conroyd47a0642019-09-17 14:22:06 +01003530 };
Kevin May868eb142019-09-04 17:29:31 +01003531
3532 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3533 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3534}
3535
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003536void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3537{
3538 const std::string descriptorName{"SliceQueueDescriptor"};
3539
3540 ValidateNumInputs(workloadInfo, descriptorName, 1);
3541 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3542
3543 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3544 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3545
3546 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3547
3548 const unsigned int rank = inputTensorInfo.GetNumDimensions();
3549 if (rank > 4)
3550 {
3551 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3552 }
3553
3554 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3555
3556 // Check if m_Begin and m_Size have the expected length
3557 if (m_Parameters.m_Begin.size() != rank)
3558 {
3559 throw InvalidArgumentException(descriptorName +
3560 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3561 }
3562 if (m_Parameters.m_Size.size() != rank)
3563 {
3564 throw InvalidArgumentException(descriptorName +
3565 ": Length of size descriptor must equal rank " + std::to_string(rank));
3566 }
3567
3568 // Check if the shape of the output tensor matches m_Size
3569 const TensorShape& outputShape = outputTensorInfo.GetShape();
3570 for (unsigned int i = 0u; i < rank; ++i)
3571 {
3572 if (m_Parameters.m_Size[i] != outputShape[i])
3573 {
3574 throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3575 }
3576 }
3577
3578 // Check if the sum of begin offset and size in a given dimension
3579 // does not exceed the size of corresponding input
3580 const TensorShape& inputShape = inputTensorInfo.GetShape();
3581 for(unsigned int i = 0u; i < rank; ++i)
3582 {
Aron Virginas-Tar92b9f872019-09-17 17:27:04 +01003583 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003584 {
3585 throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3586 std::to_string(i) + " exceeds input size.");
3587 }
3588 }
3589}
3590
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003591void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3592{
3593 const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3594
3595 ValidateNumInputs(workloadInfo, descriptorName, 1);
3596 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3597
3598 const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3599 const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3600
3601 ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3602 ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3603
3604 std::vector<DataType> supportedTypes =
3605 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003606 DataType::BFloat16,
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003607 DataType::Float32,
3608 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01003609 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003610 DataType::QAsymmU8,
3611 DataType::QSymmS16
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003612 };
3613
3614 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3615 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3616
3617 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3618
3619 if (m_Parameters.m_BlockSize == 0)
3620 {
3621 throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3622 }
3623
3624 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3625 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3626 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3627 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3628
3629 const TensorShape& outputShape = outputInfo.GetShape();
3630 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3631 {
3632 throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3633 "must be divisible by block size.");
3634 }
3635
3636 const TensorShape& inputShape = inputInfo.GetShape();
3637 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3638 {
3639 throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3640 "must be divisible by the square of block size." );
3641 }
3642}
3643
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01003644void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3645{
3646 const std::string descriptorName{"ComparisonQueueDescriptor"};
3647
3648 ValidateNumInputs(workloadInfo, descriptorName, 2);
3649 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3650
3651 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3652 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3653 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3654
3655 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3656 inputTensorInfo1,
3657 outputTensorInfo,
3658 descriptorName,
3659 "input_0",
3660 "input_1");
3661
3662 if (outputTensorInfo.GetDataType() != DataType::Boolean)
3663 {
3664 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3665 }
3666}
3667
Mike Kelly3ec30772023-03-08 13:47:17 +00003668void ElementwiseBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3669{
3670 const std::string descriptorName{"ElementwiseBinaryQueueDescriptor"};
3671
3672 ValidateNumInputs(workloadInfo, descriptorName, 2);
3673 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3674
3675 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3676 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3677 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3678
3679 std::vector<DataType> supportedTypes =
3680 {
3681 DataType::BFloat16,
3682 DataType::Float16,
3683 DataType::Float32,
3684 DataType::QAsymmS8,
3685 DataType::QAsymmU8,
3686 DataType::QSymmS16,
3687 DataType::Signed32
3688 };
3689
3690 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
3691 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
3692
3693 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input", "output");
3694 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input", "output");
3695}
3696
josh minor4a3c6102020-01-06 16:40:46 -06003697void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3698{
3699 const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3700
3701 ValidateNumInputs(workloadInfo, descriptorName, 1);
3702 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3703
3704 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3705 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3706
3707 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3708
3709 std::vector<DataType> supportedTypes =
3710 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003711 DataType::BFloat16,
josh minor4a3c6102020-01-06 16:40:46 -06003712 DataType::Float16,
3713 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01003714 DataType::QAsymmS8,
josh minor4a3c6102020-01-06 16:40:46 -06003715 DataType::QAsymmU8,
Sadik Armaganac472102020-03-24 09:54:36 +00003716 DataType::QSymmS16,
3717 DataType::Signed32
josh minor4a3c6102020-01-06 16:40:46 -06003718 };
3719
James Conroyaba90cd2020-11-06 16:28:18 +00003720 std::vector<DataType> logicalSupportedTypes =
3721 {
3722 DataType::Boolean
3723 };
3724
3725 if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3726 {
3727 ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3728 }
3729 else
3730 {
3731 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3732 }
3733
3734
josh minor4a3c6102020-01-06 16:40:46 -06003735 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3736}
3737
Finn Williams2605b232020-06-10 15:53:46 +01003738void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3739{
3740 const std::string descriptorName{"RankQueueDescriptor"};
3741
3742 ValidateNumInputs(workloadInfo, descriptorName, 1);
3743 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3744
3745 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3746 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3747
3748 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3749 ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3750
3751 std::vector<DataType> supportedTypes =
3752 {
3753 DataType::BFloat16,
3754 DataType::Float16,
3755 DataType::Float32,
3756 DataType::QAsymmS8,
3757 DataType::QAsymmU8,
3758 DataType::QSymmS8,
3759 DataType::QSymmS16,
3760 DataType::Signed32
3761 };
3762
3763 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3764 ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3765}
3766
James Conroyaba90cd2020-11-06 16:28:18 +00003767void LogicalBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3768{
3769 const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3770
3771 ValidateNumInputs(workloadInfo, descriptorName, 2);
3772 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3773
3774 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3775 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3776 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3777
3778 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3779 inputTensorInfo1,
3780 outputTensorInfo,
3781 descriptorName,
3782 "input_0",
3783 "input_1");
3784
3785 if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3786 {
3787 throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3788 }
3789
3790 if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3791 {
3792 throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3793 }
3794
3795 if (outputTensorInfo.GetDataType() != DataType::Boolean)
3796 {
3797 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3798 }
3799}
3800
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003801void ReduceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3802{
3803 const std::string descriptorName{"ReduceQueueDescriptor"};
3804
3805 ValidateNumInputs(workloadInfo, descriptorName, 1);
3806 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3807
3808 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3809 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3810
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003811 std::vector<DataType> supportedTypes =
3812 {
3813 DataType::BFloat16,
3814 DataType::Float16,
3815 DataType::Float32,
3816 DataType::QAsymmS8,
3817 DataType::QAsymmU8,
3818 DataType::QSymmS16,
3819 DataType::Signed32
3820 };
3821
3822 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3823 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3824}
3825
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003826void UnidirectionalSequenceLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3827{
3828 // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3829
3830 const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3831
3832 // check dimensions of all inputs and outputs
3833 if (workloadInfo.m_InputTensorInfos.size() != 3)
3834 {
3835 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3836 }
Mike Kelly12994962022-04-21 11:57:09 +01003837 if (workloadInfo.m_OutputTensorInfos.size() != 3)
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003838 {
3839 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3840 }
3841
3842 std::vector<DataType> supportedTypes =
3843 {
Mike Kelly12994962022-04-21 11:57:09 +01003844 DataType::Float32,
3845 DataType::QAsymmS8
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003846 };
3847
3848 // check for supported type of one input and match them with all the other input and output
3849 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3850
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003851 // Making sure clipping parameters have valid values.
3852 // == 0 means no clipping
3853 // > 0 means clipping
3854 if (m_Parameters.m_ClippingThresCell < 0.0f)
3855 {
3856 throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3857 }
3858 if (m_Parameters.m_ClippingThresProj < 0.0f)
3859 {
3860 throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3861 }
3862
3863 unsigned int batchIndx = 0;
3864 unsigned int inputIndx = 1;
3865 uint32_t timeStep = 1;
3866 unsigned int timeIndx = 1;
3867 inputIndx = 2;
3868 if (m_Parameters.m_TimeMajor)
3869 {
3870 batchIndx = 1;
3871 timeIndx = 0;
3872
3873 }
3874 timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3875
3876 // Inferring batch size, number of outputs and number of cells from the inputs.
3877 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3878 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3879 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3880 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3881 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3882 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3883
3884 // input tensor
3885 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3886 descriptorName + " input_0");
3887 // outputStateInTensor
3888 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3889 descriptorName + " input_1");
3890 // outputStateInTensor
3891 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3892 descriptorName + " input_2");
3893
3894 // outputTensor
Mike Kelly12994962022-04-21 11:57:09 +01003895 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 3, (timeStep * n_batch * n_output),
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003896 descriptorName + " output_0");
3897
3898 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3899 if ( m_InputToInputWeights )
3900 {
3901 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3902 (n_cell * n_input), "InputLayerNormWeights");
3903 }
3904
3905 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3906 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3907 (n_cell * n_input), "InputToForgetWeights");
3908
3909 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3910 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3911 (n_cell * n_input), "InputToCellWeights");
3912
3913 if ( m_RecurrentToInputWeights )
3914 {
3915 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3916 (n_cell * n_output), "RecurrentToInputWeights");
3917 }
3918
3919 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3920 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3921 (n_cell * n_output), "RecurrentToForgetWeights");
3922
3923 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3924 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3925 (n_cell * n_output), "RecurrentToCellWeights");
3926
3927 // Make sure the input-gate's parameters are either both present (regular
3928 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3929 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3930 !m_Parameters.m_CifgEnabled) ||
3931 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3932 m_Parameters.m_CifgEnabled));
3933 if (!cifg_weights_all_or_none)
3934 {
3935 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3936 "RecurrentToInputWeights must either both be present (regular LSTM) "
3937 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3938 "accordingly.");
3939 }
3940
3941 if ( m_CellToInputWeights )
3942 {
3943 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3944 n_cell, "CellToInputWeights");
3945 }
3946 if ( m_CellToForgetWeights )
3947 {
3948 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3949 n_cell, "CellToForgetWeights");
3950 }
3951 if ( m_CellToOutputWeights )
3952 {
3953 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3954 n_cell, "CellToOutputWeights");
3955 }
3956
3957 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
3958 bool peephole_weights_all_or_none =
3959 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3960 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3961 || ( !m_CellToInputWeights && !m_CellToForgetWeights
3962 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3963 if (!peephole_weights_all_or_none)
3964 {
3965 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
3966 }
3967
3968 // Make sure the input gate bias is present only when not a CIFG-LSTM.
3969 if (m_Parameters.m_CifgEnabled)
3970 {
3971 if (m_InputGateBias)
3972 {
3973 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
3974 }
3975 }
3976 else
3977 {
3978 if (!m_InputGateBias)
3979 {
3980 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
3981 "must be present.");
3982 }
3983 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
3984 n_cell, "InputGateBias");
3985 }
3986
3987 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
3988 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
3989
3990 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
3991 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
3992
3993 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
3994 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
3995
3996 if (m_ProjectionWeights)
3997 {
3998 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
3999 (n_cell * n_output), "ProjectionWeights");
4000 }
4001 if (m_ProjectionBias)
4002 {
4003 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
4004 }
4005
4006 // Making sure the projection tensors are consistent:
4007 // 1) If projection weight is not present, then projection bias should not be
4008 // present.
4009 // 2) If projection weight is present, then projection bias is optional.
4010 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
4011 !m_Parameters.m_ProjectionEnabled)
4012 || (m_ProjectionWeights && !m_ProjectionBias &&
4013 m_Parameters.m_ProjectionEnabled)
4014 || (m_ProjectionWeights && m_ProjectionBias &&
4015 m_Parameters.m_ProjectionEnabled));
4016 if (!projecton_tensors_consistent)
4017 {
4018 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
4019 }
4020
4021 // The four layer normalization weights either all have values or none of them have values. Additionally, if
4022 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4023 // either all have values or none of them have values. Layer normalization is used when the values of all the
4024 // layer normalization weights are present
4025 if (m_InputLayerNormWeights)
4026 {
4027 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4028 }
4029 if (m_ForgetLayerNormWeights)
4030 {
4031 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4032 }
4033 if (m_CellLayerNormWeights)
4034 {
4035 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4036 }
4037 if (m_OutputLayerNormWeights)
4038 {
4039 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4040 }
4041
4042 if (m_Parameters.m_LayerNormEnabled)
4043 {
4044 if (!m_Parameters.m_CifgEnabled)
4045 {
4046 if (!m_InputLayerNormWeights)
4047 {
4048 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4049 "disabled but InputLayerNormWeights are not present");
4050 }
4051 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4052 1, n_cell, "InputLayerNormWeights");
4053 }
4054 else if (m_InputLayerNormWeights)
4055 {
4056 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4057 "enabled");
4058 }
4059
4060 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4061 "ForgetLayerNormWeights");
4062 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4063
4064 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4065 "OutputLayerNormWeights");
4066 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4067
4068 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4069 "CellLayerNormWeights");
4070 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4071 }
4072 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
4073 {
4074 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4075 "normalisation weights are present.");
4076 }
4077}
4078
Samuel Yap6b478092022-07-06 15:36:03 +01004079void BatchMatMulQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
4080{
4081 const std::string descriptorName{"BatchMatMulDescriptor"};
4082
4083 ValidateNumInputs(workloadInfo, descriptorName, 2);
4084 ValidateNumOutputs(workloadInfo, descriptorName, 1);
4085
4086 // Inputs must be: both 2D+
4087 // For inputs X and Y whose dimensions to be multiplied are (M,N) and (I,J) respectively,
4088 // axes N and I must be the same size
4089
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004090 const auto& inputXInfoBeforeParams = workloadInfo.m_InputTensorInfos[0];
4091 const auto& inputYInfoBeforeParams = workloadInfo.m_InputTensorInfos[1];
4092 const auto& outputInfo = workloadInfo.m_OutputTensorInfos[0];
4093 // Output info has already been inferred
Samuel Yap6b478092022-07-06 15:36:03 +01004094
4095 std::vector<DataType> supportedTypes =
4096 {
4097 DataType::BFloat16,
4098 DataType::Float16,
4099 DataType::Float32,
4100 DataType::QAsymmS8,
4101 DataType::QAsymmU8,
4102 DataType::QSymmS16
4103 };
4104
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004105 ValidateDataTypes(inputXInfoBeforeParams, supportedTypes, descriptorName);
4106 ValidateDataTypes(inputYInfoBeforeParams, supportedTypes, descriptorName);
4107 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
Samuel Yap6b478092022-07-06 15:36:03 +01004108
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004109 if ((inputXInfoBeforeParams.GetNumDimensions() < 2) ||
4110 (inputYInfoBeforeParams.GetNumDimensions() < 2))
Samuel Yap6b478092022-07-06 15:36:03 +01004111 {
4112 throw InvalidArgumentException(descriptorName + ": Input tensors are not 2D or greater.");
4113 }
4114
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004115 TensorInfo inputXInfoAfterParams;
4116 TensorInfo inputYInfoAfterParams;
4117
4118 if((m_Parameters.m_TransposeX && m_Parameters.m_AdjointX) ||
4119 (m_Parameters.m_TransposeY && m_Parameters.m_AdjointY))
Samuel Yap6b478092022-07-06 15:36:03 +01004120 {
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004121 throw InvalidArgumentException(descriptorName +
4122 ": Invalid descriptor parameters - Transpose and Adjoint "
4123 "cannot both be true for a given input tensor.");
4124 }
4125 if(m_Parameters.m_TransposeX)
4126 {
4127 inputXInfoAfterParams = armnnUtils::Permuted(inputXInfoBeforeParams,
4128 BatchMatMulDescriptor::GetPermuteVec(
4129 m_Parameters.m_DataLayoutX,
4130 inputXInfoBeforeParams.GetShape()));
4131 }
4132 else if(m_Parameters.m_AdjointX)
4133 {
4134 auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutX,
4135 inputXInfoBeforeParams.GetShape());
4136 if(inputXInfoBeforeParams.GetShape()[axesToMul.first] !=
4137 inputXInfoBeforeParams.GetShape()[axesToMul.second])
Samuel Yap6b478092022-07-06 15:36:03 +01004138 {
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004139 throw InvalidArgumentException(descriptorName +
4140 ": Adjoint is set to true for input tensor X, but the axes to be adjointed are not square." );
Samuel Yap6b478092022-07-06 15:36:03 +01004141 }
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004142 // Shape remains the same as it's square
4143 inputXInfoAfterParams = inputXInfoBeforeParams;
4144 }
4145 else
4146 {
4147 inputXInfoAfterParams = inputXInfoBeforeParams;
Samuel Yap6b478092022-07-06 15:36:03 +01004148 }
4149
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004150 if(m_Parameters.m_TransposeY)
Samuel Yap6b478092022-07-06 15:36:03 +01004151 {
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004152 inputYInfoAfterParams = armnnUtils::Permuted(inputYInfoBeforeParams,
4153 BatchMatMulDescriptor::GetPermuteVec(
4154 m_Parameters.m_DataLayoutY,
4155 inputYInfoBeforeParams.GetShape()));
4156 }
4157 else if(m_Parameters.m_AdjointY)
4158 {
4159 auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutY,
4160 inputYInfoBeforeParams.GetShape());
4161 if(inputYInfoBeforeParams.GetShape()[axesToMul.first] !=
4162 inputYInfoBeforeParams.GetShape()[axesToMul.second])
Samuel Yap6b478092022-07-06 15:36:03 +01004163 {
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004164 throw InvalidArgumentException(descriptorName +
4165 ": Adjoint is set to true for input tensor Y, but the axes to be adjointed are not square." );
Samuel Yap6b478092022-07-06 15:36:03 +01004166 }
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004167 // Shape remains the same as it's square
4168 inputYInfoAfterParams = inputYInfoBeforeParams;
4169 }
4170 else
4171 {
4172 inputYInfoAfterParams = inputYInfoBeforeParams;
Samuel Yap6b478092022-07-06 15:36:03 +01004173 }
4174
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004175 switch(m_Parameters.m_DataLayoutX)
4176 {
4177 case DataLayout::NCDHW:
4178 case DataLayout::NDHWC:
4179 if(inputXInfoAfterParams.GetNumDimensions() < 3)
4180 {
4181 throw InvalidArgumentException(descriptorName +
4182 ": Input tensor X does not have the correct "
4183 "number of dimensions for the Data Layout that it has been assigned.");
4184 }
4185 break;
4186 case DataLayout::NCHW:
4187 case DataLayout::NHWC:
4188 default:
4189 break;
4190 }
Samuel Yap6b478092022-07-06 15:36:03 +01004191
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004192 switch(m_Parameters.m_DataLayoutY)
4193 {
4194 case DataLayout::NCDHW:
4195 case DataLayout::NDHWC:
4196 if(inputYInfoAfterParams.GetNumDimensions() < 3)
4197 {
4198 throw InvalidArgumentException(descriptorName +
4199 ": Input tensor Y does not have the correct "
4200 "number of dimensions for the Data Layout that it has been assigned.");
4201 }
4202 break;
4203 case DataLayout::NCHW:
4204 case DataLayout::NHWC:
4205 default:
4206 break;
4207 }
4208
4209 auto axesXToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutX,
4210 inputXInfoAfterParams.GetShape());
4211 auto axesYToMul = BatchMatMulDescriptor::GetAxesToMul(m_Parameters.m_DataLayoutY,
4212 inputXInfoBeforeParams.GetShape());
4213
4214 if(inputXInfoAfterParams.GetShape()[axesXToMul.second]
4215 != inputYInfoAfterParams.GetShape()[axesYToMul.first])
Samuel Yap6b478092022-07-06 15:36:03 +01004216 {
4217 throw InvalidArgumentException(descriptorName +
4218 ": The final axis of input tensor X must be the same size as "
4219 "the second last axis of input tensor Y.");
4220 }
4221
Samuel Yap6b478092022-07-06 15:36:03 +01004222 { // Separate scope so we don't pollute the rest of the scope with our temp variables
4223 // e.g. NHWC isnt compatible with NCHW as of now
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004224 DataLayout xLayout = m_Parameters.m_DataLayoutX;
4225 DataLayout yLayout = m_Parameters.m_DataLayoutY;
Samuel Yap6b478092022-07-06 15:36:03 +01004226
4227 if(xLayout == DataLayout::NCHW || xLayout == DataLayout::NCDHW)
4228 {
4229 if(yLayout == DataLayout::NHWC || yLayout == DataLayout::NDHWC)
4230 {
4231 throw InvalidArgumentException(descriptorName +
4232 ": Invalid input tensor data layout combination.");
4233 }
4234 }
4235 if(yLayout == DataLayout::NCHW || yLayout == DataLayout::NCDHW)
4236 {
4237 if(xLayout == DataLayout::NHWC || xLayout == DataLayout::NDHWC)
4238 {
4239 throw InvalidArgumentException(descriptorName +
4240 ": Invalid input tensor data layout combination.");
4241 }
4242 }
4243 }
4244
4245 // Simulate aligning the ends of the matrix dims and prepending 1's to the beginning of the shorter one
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004246 unsigned int outputTensorDimSize = std::max(inputXInfoAfterParams.GetNumDimensions(),
4247 inputYInfoAfterParams.GetNumDimensions());
Samuel Yap6b478092022-07-06 15:36:03 +01004248 if(outputTensorDimSize-2 > 0)
4249 {
4250 TensorInfo tiXNotMul = TensorInfo(TensorShape(outputTensorDimSize-2),
4251 DataType::Float32);
4252 TensorInfo tiYNotMul = TensorInfo(TensorShape(outputTensorDimSize-2),
4253 DataType::Float32);
4254 TensorInfo tiOutNotMul = TensorInfo(TensorShape(outputTensorDimSize-2),
4255 DataType::Float32);
4256
4257 auto doAxisExtension = [&](std::vector<unsigned int> axisIndices, TensorInfo& ti)
4258 {
4259 auto sizeDiff = (outputTensorDimSize-2) - axisIndices.size();
4260
4261 for(unsigned int i = 0; i < sizeDiff; i++)
4262 {
4263 axisIndices.insert(axisIndices.begin(), 1);
4264 }
4265
4266 for(unsigned int i = 0; i < ti.GetNumDimensions(); i++)
4267 {
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004268 ti.GetShape()[i] = inputXInfoAfterParams.GetShape()[i];
Samuel Yap6b478092022-07-06 15:36:03 +01004269 }
4270 };
4271
Samuel Yapdc8ed9d2022-08-08 14:07:42 +01004272 auto axesXNotMul = BatchMatMulDescriptor::GetAxesNotMul(m_Parameters.m_DataLayoutX,
4273 inputXInfoAfterParams.GetShape());
4274 auto axesYNotMul = BatchMatMulDescriptor::GetAxesNotMul(m_Parameters.m_DataLayoutY,
4275 inputYInfoAfterParams.GetShape());
4276
4277 doAxisExtension(axesXNotMul, tiXNotMul);
4278 doAxisExtension(axesYNotMul, tiYNotMul);
Samuel Yap6b478092022-07-06 15:36:03 +01004279
4280 for(unsigned int i = 0; i < tiOutNotMul.GetNumDimensions(); i++)
4281 {
4282 tiOutNotMul.GetShape()[i] = std::max(tiXNotMul.GetShape()[i],
4283 tiYNotMul.GetShape()[i]);
4284 }
4285
4286 ValidateBroadcastTensorShapesMatch(tiXNotMul,
4287 tiYNotMul,
4288 tiOutNotMul,
4289 descriptorName,
4290 "input_X",
4291 "input_Y");
4292 }
Samuel Yap6b478092022-07-06 15:36:03 +01004293}
4294
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01004295
mathad01df9a3222021-04-28 11:42:57 +01004296} // namespace armnn