blob: 70d7641f41395d3bcb928c7700a5589eac062c1b [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
telsoa014fcda012018-03-09 14:13:49 +00002// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matteo Martincighe011d202019-11-28 11:35:47 +00005
Colm Donelan0c479742021-12-10 12:43:54 +00006#include <armnn/backends/TensorHandle.hpp>
7#include <armnn/backends/WorkloadData.hpp>
8#include <armnn/backends/WorkloadInfo.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +00009#include <armnnUtils/DataLayoutIndexed.hpp>
10#include <armnnUtils/TensorUtils.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
mathad01df9a3222021-04-28 11:42:57 +010012#include <armnn/Logging.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000013
telsoa014fcda012018-03-09 14:13:49 +000014#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000015#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000016#include <string>
17#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000018
James Ward47fce872020-09-10 11:57:28 +010019#include <fmt/format.h>
telsoa014fcda012018-03-09 14:13:49 +000020
Matteo Martincigh21350152018-11-28 16:22:22 +000021using namespace armnnUtils;
22
telsoa014fcda012018-03-09 14:13:49 +000023namespace armnn
24{
25
26//---------------------------------------------------------------
27DataType GetBiasDataType(DataType inputDataType)
28{
29 switch (inputDataType)
30 {
telsoa01c577f2c2018-08-31 09:22:23 +010031 case DataType::Float16:
32 return DataType::Float16;
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +000033 case DataType::BFloat16:
telsoa014fcda012018-03-09 14:13:49 +000034 case DataType::Float32:
35 return DataType::Float32;
Keith Davis0c2eeac2020-02-11 16:51:50 +000036 case DataType::QAsymmS8:
37 return DataType::Signed32;
Derek Lambertif90c56d2020-01-10 17:14:08 +000038 case DataType::QAsymmU8:
telsoa014fcda012018-03-09 14:13:49 +000039 return DataType::Signed32;
Keith Davis5204aa82020-01-27 15:24:59 +000040 case DataType::QSymmS8:
41 return DataType::Signed32;
Derek Lambertif90c56d2020-01-10 17:14:08 +000042 case DataType::QSymmS16:
Ruomei Yan88d44b82019-05-23 14:29:06 +010043 return DataType::Signed32;
telsoa014fcda012018-03-09 14:13:49 +000044 default:
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010045 ARMNN_ASSERT_MSG(false, "Invalid input data type");
telsoa014fcda012018-03-09 14:13:49 +000046 return DataType::Float32;
47 }
48}
49
50namespace
51{
52
53//---------------------------------------------------------------
54//android ndk does not support std::to_string function.
55template <typename T>
56std::string to_string(T value)
57{
58 std::ostringstream os;
59 os << value;
60 return os.str();
61}
62
63//---------------------------------------------------------------
64void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
65{
66 if (!ptr)
67 {
68 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
69 paramName + " parameter must be set.");
70 }
71}
72
73//---------------------------------------------------------------
74void ValidateTensorShapesMatch(const TensorInfo& first,
75 const TensorInfo& second,
76 std::string const& descName,
77 std::string const& firstName,
78 std::string const& secondName)
79{
80 if (first.GetShape() != second.GetShape())
81 {
82 throw InvalidArgumentException(descName + ": "
83 + firstName + " & " + secondName + " must have identical shapes");
84 }
85}
86
87//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010088void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000089{
Sadik Armaganeff363d2019-04-05 15:25:46 +010090 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091 {
92 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010093 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000094 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
95 }
96}
97
98//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010099void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +0000100{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100101 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +0000102 {
103 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +0100104 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +0000105 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
106 }
107}
108
109//---------------------------------------------------------------
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100110void ValidateTensorNumDimensions(const TensorInfo& tensor,
telsoa014fcda012018-03-09 14:13:49 +0000111 std::string const& descName,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100112 unsigned int numDimensions,
telsoa014fcda012018-03-09 14:13:49 +0000113 std::string const& tensorName)
114{
115 if (tensor.GetNumDimensions() != numDimensions)
116 {
117 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
118 to_string(tensor.GetNumDimensions()) + " dimensions for " +
119 tensorName + " tensor.");
120 }
121}
122
123//---------------------------------------------------------------
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100124void ValidateTensorNumElements(const TensorInfo& tensor,
125 std::string const& descName,
126 unsigned int numElements,
127 std::string const& tensorName)
Jan Eilers38e05bd2019-06-26 13:10:09 +0100128{
129 if (tensor.GetNumElements() != numElements)
130 {
131 throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
James Conroyceda7852019-08-22 11:41:07 +0100132 to_string(tensor.GetNumElements()) + " elements for " +
Jan Eilers38e05bd2019-06-26 13:10:09 +0100133 tensorName + " tensor.");
134 }
135}
136
137//---------------------------------------------------------------
138void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100139 unsigned int numDimension,
140 unsigned int numElements,
141 std::string const& tensorName)
Jan Eilers38e05bd2019-06-26 13:10:09 +0100142{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100143 const std::string functionName{"ValidateTensorNumDimNumElem"};
144 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
145 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
Jan Eilers38e05bd2019-06-26 13:10:09 +0100146}
147
148//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000149void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
150 const std::string& descName, std::string const& tensorName)
151{
152 if (tensor.GetDataType() != dataType)
153 {
154 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
155 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
156 }
157}
158
Derek Lambertid466a542020-01-22 15:37:29 +0000159void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
160{
Jan Eilers1b2654f2021-09-24 15:45:46 +0100161 if (tensor.GetDataType() != DataType::QSymmS8)
Derek Lambertid466a542020-01-22 15:37:29 +0000162 {
163 throw InvalidArgumentException(descName +
164 ": Expected data type which supports per-axis quantization scheme but got " +
165 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
166 }
Derek Lambertid466a542020-01-22 15:37:29 +0000167}
168
telsoa014fcda012018-03-09 14:13:49 +0000169//---------------------------------------------------------------
Matteo Martincighe851b3d2019-05-28 14:31:20 +0100170void ValidateTensorQuantizationSpace(const TensorInfo& first,
171 const TensorInfo& second,
172 const std::string& descName,
173 std::string const& firstName,
174 std::string const& secondName)
175{
176 if (!first.IsQuantized() ||
177 !second.IsQuantized())
178 {
179 // Not a quantized type, ignore the validation
180 return;
181 }
182
183 DataType firstDataType = first.GetDataType();
184 DataType secondDataType = second.GetDataType();
185
186 if (firstDataType != secondDataType)
187 {
188 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
189 " must be of the same quantized type, " +
190 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
191 secondName + " is " + GetDataTypeName(secondDataType));
192 }
193
194 if (!first.IsTypeSpaceMatch(second))
195 {
196 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
197 " must have the same quantization space, " +
198 firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
199 " and scale " + to_string(first.GetQuantizationScale()) + ", " +
200 secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
201 " and scale " + to_string(second.GetQuantizationScale()));
202 }
203}
204
205//---------------------------------------------------------------
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100206void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
207 const TensorInfo& inputTensorInfo,
208 const TensorInfo& weightsTensorInfo,
209 const std::string& descName)
telsoa014fcda012018-03-09 14:13:49 +0000210{
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000211 // Helper lambda function to validate a single bias quantization scale value
212 auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
213 {
mathad01df9a3222021-04-28 11:42:57 +0100214 constexpr float tolerance = 0.0001f;
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000215 if (std::abs(biasScale - expectedScale) > tolerance)
216 {
217 // Print the float values with extra precision to see very small differences
mathad01df9a3222021-04-28 11:42:57 +0100218 ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale <<
219 " for bias quantization scale (product of input and weight scales), but got " <<
220 biasScale << ". Using scale provided.";
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000221 }
222 };
223
telsoa014fcda012018-03-09 14:13:49 +0000224 if (biasTensor.GetQuantizationOffset() != 0)
225 {
226 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
227 to_string(biasTensor.GetQuantizationOffset()));
228 }
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000229
James Conroy8502ade2020-11-12 19:26:29 +0000230 if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
telsoa014fcda012018-03-09 14:13:49 +0000231 {
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000232 // Validate per-axis quantization scales
233 const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
234 const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
235
236 if (weightScales.size() != biasScales.size())
237 {
238 std::stringstream msg;
James Conroy8502ade2020-11-12 19:26:29 +0000239 msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
240 << "but got different values. This is currently unsupported: weights=" << weightScales.size()
241 << ", biases=" << biasScales.size();
Aron Virginas-Tard9053072019-10-30 16:03:19 +0000242 throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
243 }
244
245 for (size_t i = 0ul; i < biasScales.size(); ++i)
246 {
247 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
248 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
249 }
250 }
251 else
252 {
253 // Validate per-tensor quantization scale
254 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
255 VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
telsoa014fcda012018-03-09 14:13:49 +0000256 }
257}
258
259//---------------------------------------------------------------
260void ValidateTensors(const std::vector<ITensorHandle*>& vec,
261 unsigned int numExpected,
262 const std::string& descName,
263 const std::string& varName)
264{
265 if (vec.empty() && numExpected > 0)
266 {
267 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
268 }
269
270 for (unsigned int i = 0; i < numExpected; ++i)
271 {
272 if (!vec[i])
273 {
274 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
275 }
276 }
277}
278
279//---------------------------------------------------------------
280void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
281 const TensorInfo& second,
282 const TensorInfo& output,
283 std::string const& descName,
284 std::string const& firstName,
285 std::string const& secondName)
286{
287 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
288 // broadcasted.
289 if (first.GetNumDimensions() != second.GetNumDimensions())
290 {
291 throw InvalidArgumentException(descName + ": Tensors "
292 + firstName + " & " + secondName
293 + " must have the same number of dimensions in order to be broadcasted");
294 }
295 uint32_t numDims = first.GetNumDimensions();
296 std::vector<uint32_t> outputDims(numDims, 0u);
297 for (uint32_t i = 0; i < numDims; i++)
298 {
299 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
300 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
301 if (dimsNotEqual && dimsNotOne)
302 {
303 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
304 }
305 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
306 }
Matthew Sloyan171214c2020-09-09 09:07:37 +0100307 TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
telsoa014fcda012018-03-09 14:13:49 +0000308 if (broadcastShape != output.GetShape())
309 {
310 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
311 + firstName + " & " + secondName
312 + " does not match the output shape");
313 }
314}
315
316//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +0100317void ValidateDataTypes(const TensorInfo& info,
318 const std::vector<armnn::DataType>& supportedTypes,
319 std::string const& descName)
320{
321 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
322 if (iterator == supportedTypes.end())
323 {
324 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
325 }
326}
327
James Conroy4d1ff582019-06-10 17:06:39 +0100328//---------------------------------------------------------------
329void ValidateTensorDataTypesMatch(const TensorInfo& first,
330 const TensorInfo& second,
331 std::string const& descName,
332 std::string const& firstName,
333 std::string const& secondName)
334{
335 if (first.GetDataType() != second.GetDataType())
336 {
337 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
338 " must have identical data types.");
339 }
340}
341
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100342//---------------------------------------------------------------
343void ValidateTensorNumElementsMatch(const TensorInfo& first,
344 const TensorInfo& second,
345 std::string const& descName,
346 std::string const& firstName,
347 std::string const& secondName)
348{
349 if (first.GetNumElements() != second.GetNumElements())
350 {
351 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
352 " must have the same number of elements.");
353 }
354}
355
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000356void ValidateWeightDataType(const TensorInfo& inputInfo,
357 const TensorInfo& weightInfo,
358 const std::string& descName)
359{
360 const DataType inputType = inputInfo.GetDataType();
Keith Davis0c2eeac2020-02-11 16:51:50 +0000361 if (IsQuantized8BitType(inputType))
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000362 {
363 const std::vector<DataType> validTypes =
364 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000365 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +0100366 DataType::QAsymmU8,
Jan Eilers1b2654f2021-09-24 15:45:46 +0100367 DataType::QSymmS8
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000368 };
369
370 ValidateDataTypes(weightInfo, validTypes, descName);
371 }
372 else
373 {
374 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
375 }
376}
377
378void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
379 const std::string& descName,
380 const std::string& tensorName)
381{
382 const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
383 if (!quantizationDim.has_value())
384 {
James Ward47fce872020-09-10 11:57:28 +0100385 throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
386 "not set on tensor {1}.", descName, tensorName));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000387 }
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000388}
389
390void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
391 const std::string& descName,
392 const std::string& tensorName)
393{
394 int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
395 if (quantizationOffset != 0)
396 {
James Ward47fce872020-09-10 11:57:28 +0100397 throw InvalidArgumentException(fmt::format(
398 "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
399 descName, tensorName, quantizationOffset));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000400 }
401}
402
403void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
404 const TensorInfo& outputInfo,
405 const TensorInfo& weightInfo,
406 const Optional<TensorInfo>& optionalBiasInfo,
407 const std::string& descName)
408{
409 if (weightInfo.HasPerAxisQuantization())
410 {
411 const DataType inputDataType = inputInfo.GetDataType();
412 const DataType outputDataType = outputInfo.GetDataType();
413
Keith Davis0c2eeac2020-02-11 16:51:50 +0000414 const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000415
416 if (!canHavePerAxisQuantization)
417 {
James Ward47fce872020-09-10 11:57:28 +0100418 throw InvalidArgumentException(fmt::format(
419 "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
420 "per-axis quantization.", descName, "weight"));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000421 }
422
Derek Lambertid466a542020-01-22 15:37:29 +0000423
424 ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000425 ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
426 ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
427
428 if (optionalBiasInfo.has_value())
429 {
430 const TensorInfo& biasInfo = optionalBiasInfo.value();
431 if (!biasInfo.HasPerAxisQuantization())
432 {
James Ward47fce872020-09-10 11:57:28 +0100433 throw InvalidArgumentException(fmt::format(
434 "{}: Per-axis quantization parameters not set on bias tensor, "
435 "despite being set on weight tensor.", descName));
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +0000436 }
437
438 ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
439 ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
440 ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
441 }
442 }
443}
444
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100445} // anonymous namespace
telsoa014fcda012018-03-09 14:13:49 +0000446
447void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
448 unsigned int numExpectedIn, unsigned int numExpectedOut) const
449{
450 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
451 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
452}
453
454//---------------------------------------------------------------
Jim Flynn68db06f2020-10-06 10:14:50 +0100455void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
456{
457 const std::string descriptorName{"MapQueueDescriptor"};
458
459 ValidateNumInputs(workloadInfo, descriptorName, 1);
Jim Flynn3a40ea52020-10-08 11:42:30 +0100460 ValidateNumOutputs(workloadInfo, descriptorName, 0);
461
462 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
463 {
464 if (!m_Inputs[i])
465 {
466 throw InvalidArgumentException(
467 fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
468 }
469 }
470}
471
472//---------------------------------------------------------------
473void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
474{
475 const std::string descriptorName{"UnmapQueueDescriptor"};
476
477 ValidateNumInputs(workloadInfo, descriptorName, 1);
478 ValidateNumOutputs(workloadInfo, descriptorName, 0);
Jim Flynn68db06f2020-10-06 10:14:50 +0100479
480 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
481 {
482 if (!m_Inputs[i])
483 {
484 throw InvalidArgumentException(
485 fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
486 }
487 }
488}
489
490//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000491void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
492{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100493 const std::string descriptorName{"MemCopyQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +0000494
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100495 ValidateNumInputs(workloadInfo, descriptorName, 1);
496 ValidateNumOutputs(workloadInfo, descriptorName , 1);
telsoa014fcda012018-03-09 14:13:49 +0000497
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100498 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
499 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
500
501 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
502 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +0000503
504 if (m_Inputs.size() != m_Outputs.size())
505 {
James Ward47fce872020-09-10 11:57:28 +0100506 throw InvalidArgumentException(fmt::format(
507 "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
508 descriptorName, m_Inputs.size(), m_Outputs.size()));
telsoa014fcda012018-03-09 14:13:49 +0000509 }
510
511 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
512 {
513 if (!m_Inputs[i])
514 {
James Ward47fce872020-09-10 11:57:28 +0100515 throw InvalidArgumentException(fmt::format(
516 "{0}: Invalid NULL input {1}.", descriptorName, i));
telsoa014fcda012018-03-09 14:13:49 +0000517 }
518
519 if (!m_Outputs[i])
520 {
James Ward47fce872020-09-10 11:57:28 +0100521 throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
telsoa014fcda012018-03-09 14:13:49 +0000522 }
523 }
524}
525
Derek Lambertif674aa02019-08-01 15:56:25 +0100526//---------------------------------------------------------------
527void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
528{
529 ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
530 ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
531
532 if (workloadInfo.m_InputTensorInfos.size() != 1)
533 {
James Ward47fce872020-09-10 11:57:28 +0100534 throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
535 workloadInfo.m_InputTensorInfos.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100536
537 }
538
539 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
540 {
James Ward47fce872020-09-10 11:57:28 +0100541 throw InvalidArgumentException(fmt::format(
542 "Number of input infos ({0}) does not match the number of output infos ({1})",
543 workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100544 }
545
546 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
547 {
548 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
549 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
550 {
James Ward47fce872020-09-10 11:57:28 +0100551 throw InvalidArgumentException(fmt::format(
552 "Number of elements for tensor input and output {} does not match", i ));
Derek Lambertif674aa02019-08-01 15:56:25 +0100553 }
554 }
555
556 if (m_Inputs.size() != 1)
557 {
James Ward47fce872020-09-10 11:57:28 +0100558 throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100559 }
560
561 if (m_Inputs.size() != m_Outputs.size())
562 {
James Ward47fce872020-09-10 11:57:28 +0100563 throw InvalidArgumentException(fmt::format(
564 "Number of inputs ({0}) does not match the number of outputs ({1})",
565 m_Inputs.size(), m_Outputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100566 }
567
568 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
569 {
570 if (!m_Inputs[i])
571 {
James Ward47fce872020-09-10 11:57:28 +0100572 throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
Derek Lambertif674aa02019-08-01 15:56:25 +0100573 }
574
575 if (!m_Outputs[i])
576 {
James Ward47fce872020-09-10 11:57:28 +0100577 throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
Derek Lambertif674aa02019-08-01 15:56:25 +0100578 }
579 }
580}
581
582//---------------------------------------------------------------
583void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
584{
585 ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
Derek Lambertif674aa02019-08-01 15:56:25 +0100586
Derek Lambertif674aa02019-08-01 15:56:25 +0100587 if (m_Inputs.size() != 1)
588 {
James Ward47fce872020-09-10 11:57:28 +0100589 throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100590 }
591
592 if (m_Outputs.size() != 0)
593 {
James Ward47fce872020-09-10 11:57:28 +0100594 throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
Derek Lambertif674aa02019-08-01 15:56:25 +0100595 }
596
597 if (!m_Inputs[0])
598 {
James Ward47fce872020-09-10 11:57:28 +0100599 throw InvalidArgumentException(fmt::format("Invalid null input 0"));
Derek Lambertif674aa02019-08-01 15:56:25 +0100600 }
601}
602
603//---------------------------------------------------------------
telsoa014fcda012018-03-09 14:13:49 +0000604void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
605{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100606 const std::string descriptorName{"ActivationQueueDescriptor"};
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100607
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100608 ValidateNumInputs(workloadInfo, descriptorName, 1);
609 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100610
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100611 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
612 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
nikraj01248683f2019-05-29 16:46:50 +0100613
614 std::vector<DataType> supportedTypes =
615 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000616 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100617 DataType::Float16,
618 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000619 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000620 DataType::QAsymmU8,
621 DataType::QSymmS16
nikraj01248683f2019-05-29 16:46:50 +0100622 };
623
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100624 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
625 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
626 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +0000627}
628
Nikhil Rajee391d52019-09-05 17:50:44 +0100629void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
630{
631 const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
632
633 ValidateNumInputs(workloadInfo, descriptorName, 1);
634 ValidateNumOutputs(workloadInfo, descriptorName, 1);
635
636 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
637 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
638
Inki Daed4619e22020-09-10 15:33:54 +0900639 if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
640 outputTensorInfo.GetDataType() != DataType::Signed64)
Nikhil Raj68c2c902019-09-19 11:21:11 +0100641 {
Inki Daed4619e22020-09-10 15:33:54 +0900642 throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
Nikhil Raj68c2c902019-09-19 11:21:11 +0100643 }
644
James Conroyd47a0642019-09-17 14:22:06 +0100645 std::vector<DataType> supportedInputTypes =
646 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000647 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100648 DataType::Float16,
649 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100650 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000651 DataType::QAsymmU8,
652 DataType::QSymmS16,
Inki Daed4619e22020-09-10 15:33:54 +0900653 DataType::Signed32,
654 DataType::Signed64
James Conroyd47a0642019-09-17 14:22:06 +0100655 };
Nikhil Rajee391d52019-09-05 17:50:44 +0100656
James Conroyd47a0642019-09-17 14:22:06 +0100657 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
James Conroyc8724c72019-10-08 15:41:34 +0100658
659 auto inputShape = inputTensorInfo.GetShape();
660 auto outputShape = outputTensorInfo.GetShape();
661
662 auto inputNumDimensions = inputShape.GetNumDimensions();
663 auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
664
665 const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
666
667 // 1D input shape results in scalar output shape
668 if (inputShape.GetNumDimensions() == 1)
669 {
670 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
671 {
672 throw InvalidArgumentException(descriptorName + outputShapeError);
673 }
674 }
675 else
676 {
677 for (unsigned int i = 0; i < unsignedAxis; ++i)
678 {
679 if (outputShape[i] != inputShape[i])
680 {
681 throw InvalidArgumentException(descriptorName + outputShapeError);
682 }
683 }
684
685 for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
686 {
687 if (outputShape[i - 1] != inputShape[i])
688 {
689 throw InvalidArgumentException(descriptorName + outputShapeError);
690 }
691 }
692 }
Nikhil Rajee391d52019-09-05 17:50:44 +0100693}
694
mathad01b392e982021-04-07 12:07:30 +0100695void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
696{
697 const std::string descriptorName{"CastQueueDescriptor"};
698
699 ValidateNumInputs(workloadInfo, descriptorName, 1);
700 ValidateNumOutputs(workloadInfo, descriptorName, 1);
701
702 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
703 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
704
705 std::vector<DataType> supportedTypes =
706 {
707 DataType::BFloat16,
708 DataType::Float16,
709 DataType::Float32,
710 DataType::QAsymmS8,
711 DataType::QAsymmU8,
712 DataType::QSymmS8,
713 DataType::QSymmS16,
714 DataType::Signed32,
715 DataType::Signed64
716 };
717
718 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
719 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
720}
721
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100722void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
723{
724 const std::string descriptorName{"SoftmaxQueueDescriptor"};
725
726 ValidateNumInputs(workloadInfo, descriptorName, 1);
727 ValidateNumOutputs(workloadInfo, descriptorName, 1);
728
729 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
730 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
731
732 std::vector<DataType> supportedTypes =
733 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000734 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100735 DataType::Float16,
736 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +0000737 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000738 DataType::QAsymmU8,
739 DataType::QSymmS16
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100740 };
741
742 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
743 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
744 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
745}
746
telsoa014fcda012018-03-09 14:13:49 +0000747void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
748{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100749 const std::string descriptorName{"SplitterQueueDescriptor"};
750
751 ValidateNumInputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +0000752
Ruomei Yan25339c32019-05-28 16:48:20 +0100753 // Check the supported data types
754 std::vector<DataType> supportedTypes =
755 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000756 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100757 DataType::Float32,
758 DataType::Float16,
759 DataType::Boolean,
760 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100761 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000762 DataType::QAsymmU8,
763 DataType::QSymmS16
Ruomei Yan25339c32019-05-28 16:48:20 +0100764 };
765
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100766 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
767 for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
Ruomei Yan25339c32019-05-28 16:48:20 +0100768 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100769 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
770 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
771
772 const std::string outputName = "output_" + std::to_string(i);
773 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
Ruomei Yan25339c32019-05-28 16:48:20 +0100774 }
Ruomei Yan25339c32019-05-28 16:48:20 +0100775
telsoa014fcda012018-03-09 14:13:49 +0000776 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
777 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100778 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000779 }
780
781 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
782 {
783 throw InvalidArgumentException(
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100784 descriptorName + ": Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000785 "has to match number of workloadInfo.m_OutputTensorInfos. "
786 "Number of windows: " +
787 to_string(m_ViewOrigins.size()) +
788 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
789 }
790
telsoa01c577f2c2018-08-31 09:22:23 +0100791 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000792 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
793 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
794 {
telsoa01c577f2c2018-08-31 09:22:23 +0100795 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000796 ViewOrigin const& e = m_ViewOrigins[w];
797 if (e.m_Origin.size() != inputDims)
798 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100799 throw InvalidArgumentException(descriptorName + ": Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000800 "have the same dimensionality as the input tensor. "
801 "Window origin (index: " +
802 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
803 " dimensions, the input "
804 "tensor has " +
805 to_string(inputDims) + " dimensions.");
806 }
807 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
808 {
809 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
810 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
811 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100812 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000813 "be smaller or equal than the size of the input in that coord.");
814 }
815 }
816 }
817}
818
Jim Flynne242f2d2019-05-22 14:24:13 +0100819void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
telsoa014fcda012018-03-09 14:13:49 +0000820{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100821 const std::string descriptorName{"ConcatQueueDescriptor"};
822
823 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +0000824
825 if (m_Inputs.size() <= 0)
826 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100827 throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000828 }
829 if (m_Outputs.size() <= 0)
830 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100831 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000832 }
833
834 if (workloadInfo.m_InputTensorInfos.size() <= 0)
835 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100836 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000837 }
838 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
839 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100840 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000841 }
842
Nikhil Raj8599a412018-11-19 14:51:07 +0000843 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
844 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100845 throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
Nikhil Raj8599a412018-11-19 14:51:07 +0000846 }
847
848 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
849 {
850 return;
851 }
852
telsoa014fcda012018-03-09 14:13:49 +0000853 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
854 {
855 throw InvalidArgumentException(
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100856 descriptorName + ": Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000857 "has to match number of workloadInfo.m_InputTensorInfos. "
858 "Number of windows: " +
859 to_string(m_ViewOrigins.size()) +
860 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
861 }
862
telsoa01c577f2c2018-08-31 09:22:23 +0100863 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000864 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
865 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
866 {
telsoa01c577f2c2018-08-31 09:22:23 +0100867 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000868 ViewOrigin const& e = m_ViewOrigins[w];
869 if (e.m_Origin.size() != outputDims)
870 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100871 throw InvalidArgumentException(descriptorName + ": Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000872 "have the same dimensionality as the output tensor. "
873 "Window origin (index: " +
874 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
875 " dimensions, the output "
876 "tensor has " +
877 to_string(outputDims) + " dimensions.");
878 }
telsoa01c577f2c2018-08-31 09:22:23 +0100879 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000880 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
881 {
882 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
883 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
884 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100885 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000886 "be smaller or equal than the size of the output in that coord.");
887 }
888 }
889 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100890
891 // Check the supported data types
892 std::vector<DataType> supportedTypes =
893 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000894 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100895 DataType::Float32,
896 DataType::Float16,
897 DataType::Boolean,
898 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100899 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000900 DataType::QAsymmU8,
901 DataType::QSymmS16
Jim Flynncbb66aa2019-05-15 13:03:54 +0100902 };
903
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100904 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
905 for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Jim Flynncbb66aa2019-05-15 13:03:54 +0100906 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100907 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
908 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
909
910 const std::string inputName = "input_" + std::to_string(i);
911 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
Jim Flynncbb66aa2019-05-15 13:03:54 +0100912 }
telsoa014fcda012018-03-09 14:13:49 +0000913}
914
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100915void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
916{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100917 const std::string descriptorName{"StackQueueDescriptor"};
918
919 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100920
921 if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
922 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100923 throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100924 }
925
926 // All inputs must have the same shape, which is defined in parameters
927 const TensorShape& inputShape = m_Parameters.m_InputShape;
928 for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
929 {
930 if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
931 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100932 throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100933 }
934 }
935
Matthew Jacksondba634f2019-08-15 15:14:18 +0100936 if (inputShape.GetNumDimensions() > 4)
937 {
938 throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
939 }
940
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100941 // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
942 // since the output tensor has an additional dimension.
943 if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
944 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100945 throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100946 "than the number of input dimensions.");
947 }
948
949 // Output shape must be as inferred from the input shape
950 const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
951 for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
952 {
953 if (outputShape[i] != inputShape[i])
954 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100955 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100956 "match shape inferred from input tensor.");
957 }
958 }
959
960 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
961 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100962 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100963 "match shape inferred from input tensor.");
964 }
965
966 for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
967 {
968 if (outputShape[i] != inputShape[i-1])
969 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100970 throw InvalidArgumentException(descriptorName + ": Output tensor must "
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100971 "match shape inferred from input tensor.");
972 }
973 }
974
Matthew Jacksondba634f2019-08-15 15:14:18 +0100975 if (outputShape.GetNumDimensions() > 5)
976 {
977 throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
978 }
979
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100980 // Check the supported data types
981 std::vector<DataType> supportedTypes =
982 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +0000983 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +0100984 DataType::Float32,
985 DataType::Float16,
986 DataType::Boolean,
987 DataType::Signed32,
Sadik Armagan303980c2020-04-17 12:45:14 +0100988 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000989 DataType::QAsymmU8,
990 DataType::QSymmS16
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100991 };
992
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100993 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100994
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100995 for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Matthew Jackson2b8c1da2019-07-04 14:59:16 +0100996 {
997 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
998 workloadInfo.m_InputTensorInfos[i],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +0100999 descriptorName,
1000 "input_0",
1001 "input_" + std::to_string(i));
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001002 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001003
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001004 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1005 workloadInfo.m_OutputTensorInfos[0],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001006 descriptorName,
1007 "input_0",
1008 "output");
Matthew Jackson2b8c1da2019-07-04 14:59:16 +01001009}
1010
Ryan OSheaec6c6802020-06-05 17:17:06 +01001011void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1012{
1013 const std::string descriptorName{"FillQueueDescriptor"};
1014
1015 ValidateNumInputs(workloadInfo, descriptorName, 1);
1016 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1017
1018 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1019 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1020
1021 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1022
1023 std::vector<DataType> supportedTypes =
1024 {
1025 DataType::BFloat16,
1026 DataType::Float32,
1027 DataType::Float16,
1028 DataType::Signed32
1029 };
1030
1031 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1032}
1033
telsoa014fcda012018-03-09 14:13:49 +00001034void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1035{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001036 const std::string descriptorName{"FullyConnectedQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001037
Matthew Sloyan81beae32021-07-13 19:46:11 +01001038 uint32_t numInputs = 2;
1039 if (m_Parameters.m_BiasEnabled)
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001040 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001041 numInputs = 3;
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001042 }
Matthew Sloyan81beae32021-07-13 19:46:11 +01001043
Sadik Armaganf0a6dec2021-03-25 07:46:55 +00001044 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001045 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1046
1047 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1048 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1049
1050 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1051
1052 if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
telsoa014fcda012018-03-09 14:13:49 +00001053 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001054 throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
telsoa014fcda012018-03-09 14:13:49 +00001055 }
1056
Matthew Sloyan81beae32021-07-13 19:46:11 +01001057 TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001058 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
telsoa014fcda012018-03-09 14:13:49 +00001059
1060 if (m_Parameters.m_BiasEnabled)
1061 {
Matthew Sloyan81beae32021-07-13 19:46:11 +01001062 TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
telsoa01c577f2c2018-08-31 09:22:23 +01001063 // Validates type and quantization values.
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001064 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001065 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1066 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
telsoa014fcda012018-03-09 14:13:49 +00001067 }
1068
Francis Murtagh46c09d02019-05-28 08:15:28 +01001069 // Check the supported data types
1070 std::vector<DataType> supportedTypes =
1071 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001072 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01001073 DataType::Float32,
1074 DataType::Float16,
Francis Murtaghddb1d062020-03-10 13:51:45 +00001075 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001076 DataType::QAsymmU8,
1077 DataType::QSymmS16
Francis Murtagh46c09d02019-05-28 08:15:28 +01001078 };
1079
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001080 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001081
1082 // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1083 if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1084 {
1085 if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1086 {
1087 throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1088 "for BFloat16 input.");
1089 }
1090 }
1091 else
1092 {
1093 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1094 }
telsoa014fcda012018-03-09 14:13:49 +00001095}
1096
telsoa014fcda012018-03-09 14:13:49 +00001097void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1098{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001099 const std::string descriptorName{"NormalizationQueueDescriptor"};
1100
1101 ValidateNumInputs(workloadInfo, descriptorName, 1);
1102 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1103
1104 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1105 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001106
1107 // Check the supported data types
1108 std::vector<DataType> supportedTypes =
1109 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001110 DataType::BFloat16,
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001111 DataType::Float16,
1112 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001113 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001114 DataType::QAsymmU8,
1115 DataType::QSymmS16
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001116 };
1117
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001118 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001119
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001120 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincigh2fc70c52019-06-05 14:12:48 +01001121
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001122 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001123}
1124
1125void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1126{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001127 const std::string descriptorName{"AdditionQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001128
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001129 ValidateNumInputs(workloadInfo, descriptorName, 2);
1130 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1131
1132 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1133 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1134 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1135
1136 std::vector<DataType> supportedTypes =
1137 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001138 DataType::BFloat16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001139 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001140 DataType::Float16,
1141 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001142 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01001143 DataType::QSymmS16,
1144 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001145 };
1146
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001147 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1148 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1149 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001150
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001151 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1152 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001153
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001154 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1155 inputTensorInfo1,
1156 outputTensorInfo,
1157 descriptorName,
1158 "input_0",
1159 "input_1");
telsoa014fcda012018-03-09 14:13:49 +00001160}
1161
telsoa014fcda012018-03-09 14:13:49 +00001162void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1163{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001164 const std::string descriptorName{"MultiplicationQueueDescriptor"};
surmeh01bceff2f2018-03-29 16:29:27 +01001165
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001166 ValidateNumInputs(workloadInfo, descriptorName, 2);
1167 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1168
1169 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1170 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1171 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1172
1173 std::vector<DataType> supportedTypes =
1174 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001175 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001176 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001177 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00001178 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001179 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01001180 DataType::QSymmS16,
1181 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001182 };
1183
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001184 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1185 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1186 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001187
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001188 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1189 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001190
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001191 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1192 inputTensorInfo1,
1193 outputTensorInfo,
1194 descriptorName,
1195 "input_0",
1196 "input_1");
telsoa014fcda012018-03-09 14:13:49 +00001197}
1198
1199void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1200{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001201 const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001202
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001203 ValidateNumInputs(workloadInfo, descriptorName, 1);
1204 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1205
1206 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1207 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001208
1209 std::vector<DataType> supportedTypes =
1210 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001211 DataType::BFloat16,
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001212 DataType::Float16,
1213 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001214 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001215 DataType::QAsymmU8,
1216 DataType::QSymmS16
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001217 };
1218
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001219 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1220 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001221
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001222 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001223 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001224
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001225 ValidatePointer(m_Mean, descriptorName, "mean");
1226 ValidatePointer(m_Variance, descriptorName, "variance");
1227 ValidatePointer(m_Beta, descriptorName, "beta");
1228 ValidatePointer(m_Gamma, descriptorName, "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001229
Matteo Martincigh3122bd52019-06-03 16:54:25 +01001230 const TensorInfo& mean = m_Mean->GetTensorInfo();
1231 const TensorInfo& variance = m_Variance->GetTensorInfo();
1232 const TensorInfo& beta = m_Beta->GetTensorInfo();
1233 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +00001234
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001235 ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1236 ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1237 ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1238 ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001239
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001240 ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1241 ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1242 ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
telsoa014fcda012018-03-09 14:13:49 +00001243}
1244
1245void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1246{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001247 const std::string descriptorName{"Convolution2dQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001248
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001249 ValidateNumInputs(workloadInfo, descriptorName, 1);
1250 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001251
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001252 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1253 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
telsoa014fcda012018-03-09 14:13:49 +00001254
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001255 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1256 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
telsoa014fcda012018-03-09 14:13:49 +00001257
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001258 ValidatePointer(m_Weight, descriptorName, "weight");
telsoa014fcda012018-03-09 14:13:49 +00001259
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001260 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1261 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
telsoa014fcda012018-03-09 14:13:49 +00001262
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001263 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001264
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001265 Optional<TensorInfo> optionalBiasTensorInfo;
telsoa014fcda012018-03-09 14:13:49 +00001266 if (m_Parameters.m_BiasEnabled)
1267 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001268 ValidatePointer(m_Bias, descriptorName, "bias");
telsoa014fcda012018-03-09 14:13:49 +00001269
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001270 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1271 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001272
1273 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1274 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001275 }
1276
Teresa Charlinf2ed1b82020-11-24 15:11:54 +00001277 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1278 {
1279 throw InvalidArgumentException(
1280 fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1281 "cannot be either negative or 0.",
1282 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1283 }
1284
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00001285 ValidatePerAxisQuantization(inputTensorInfo,
1286 outputTensorInfo,
1287 weightTensorInfo,
1288 optionalBiasTensorInfo,
1289 descriptorName);
1290
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001291 std::vector<DataType> supportedTypes =
1292 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001293 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001294 DataType::Float16,
Ruomei Yan88d44b82019-05-23 14:29:06 +01001295 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001296 DataType::QAsymmS8,
Francis Murtaghddb1d062020-03-10 13:51:45 +00001297 DataType::QAsymmU8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001298 DataType::QSymmS16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001299 DataType::QSymmS8
Ruomei Yan88d44b82019-05-23 14:29:06 +01001300 };
1301
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001302 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Narumol Prangnawarat57ef0082020-03-26 09:20:43 +00001303
1304 // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1305 if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1306 {
1307 if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1308 {
1309 throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1310 "for BFloat16 input.");
1311 }
1312 }
1313 else
1314 {
1315 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1316 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001317}
Ruomei Yan88d44b82019-05-23 14:29:06 +01001318
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001319void Convolution3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1320{
1321 const std::string descriptorName{"Convolution3dQueueDescriptor"};
1322
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001323 uint32_t numInputs = 2;
1324 if (m_Parameters.m_BiasEnabled)
1325 {
1326 numInputs = 3;
1327 }
1328 ValidateNumInputs(workloadInfo, descriptorName, numInputs);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001329 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1330
1331 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1332 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1333
1334 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1335 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1336
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001337 const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001338 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
1339
1340 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1341
1342 Optional<TensorInfo> optionalBiasTensorInfo;
1343 if (m_Parameters.m_BiasEnabled)
1344 {
Matthew Sloyan5d7b0a32021-10-18 13:07:49 +01001345 optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
Matthew Sloyanb63a3112021-09-08 13:05:51 +01001346 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1347
1348 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1349 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1350 }
1351
1352 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 || m_Parameters.m_StrideZ <= 0 )
1353 {
1354 throw InvalidArgumentException(
1355 fmt::format("{}: strideX (provided {}), strideY (provided {}) or strideZ (provided {})"
1356 "cannot be either negative or 0.",
1357 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY, m_Parameters.m_StrideZ));
1358 }
1359
1360 ValidatePerAxisQuantization(inputTensorInfo,
1361 outputTensorInfo,
1362 weightTensorInfo,
1363 optionalBiasTensorInfo,
1364 descriptorName);
1365
1366 std::vector<DataType> supportedTypes =
1367 {
1368 DataType::BFloat16,
1369 DataType::Float16,
1370 DataType::Float32,
1371 DataType::QAsymmS8,
1372 DataType::QAsymmU8,
1373 DataType::QSymmS16,
1374 DataType::QSymmS8
1375 };
1376
1377 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1378 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1379}
1380
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001381void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1382{
1383 const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1384
1385 ValidateNumInputs(workloadInfo, descriptorName, 1);
1386 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1387
1388 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1389 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1390
1391 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1392 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1393
1394 ValidatePointer(m_Weight, descriptorName, "weight");
1395
1396 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1397 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1398
1399 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1400 {
1401 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001402 fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1403 "cannot be smaller than 1.",
1404 descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001405 }
1406
Teresa Charlinf2ed1b82020-11-24 15:11:54 +00001407 if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1408 {
1409 throw InvalidArgumentException(
1410 fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1411 "cannot be either negative or 0.",
1412 descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1413 }
1414
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001415 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1416
Jan Eilers53ef7952021-06-02 12:01:25 +01001417 // Expected weight shape: [ 1, H, W, I*M ] - This shape does NOT depend on the data layout
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001418 // inputChannels * channelMultiplier should be equal to outputChannels.
Jan Eilers53ef7952021-06-02 12:01:25 +01001419 const unsigned int numWeightOutputChannels = weightTensorInfo.GetShape()[3]; // I*M=Cout
1420 const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1421 if (numWeightOutputChannels != numOutputChannels)
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001422 {
James Ward47fce872020-09-10 11:57:28 +01001423 throw InvalidArgumentException(fmt::format(
Jan Eilers53ef7952021-06-02 12:01:25 +01001424 "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1425 "But 4th dimension is not equal to Cout. Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
1426 descriptorName,
1427 numOutputChannels,
1428 weightTensorInfo.GetShape()[0],
1429 weightTensorInfo.GetShape()[1],
1430 weightTensorInfo.GetShape()[2],
1431 weightTensorInfo.GetShape()[3]));
1432 }
1433 if (weightTensorInfo.GetShape()[0] != 1)
1434 {
1435 throw InvalidArgumentException(fmt::format(
1436 "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1437 "But first dimension is not equal to 1. Provided weight shape: [{1}, {2}, {3}, {4}]",
1438 descriptorName,
1439 weightTensorInfo.GetShape()[0],
1440 weightTensorInfo.GetShape()[1],
1441 weightTensorInfo.GetShape()[2],
1442 weightTensorInfo.GetShape()[3]));
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001443 }
1444
Teresa Charlind8df0262019-11-11 12:28:15 +00001445 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001446
Teresa Charlind8df0262019-11-11 12:28:15 +00001447 Optional<TensorInfo> optionalBiasTensorInfo;
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001448 if (m_Parameters.m_BiasEnabled)
1449 {
1450 ValidatePointer(m_Bias, descriptorName, "bias");
1451
Teresa Charlind8df0262019-11-11 12:28:15 +00001452 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1453 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001454
1455 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1456 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1457 }
Teresa Charlind8df0262019-11-11 12:28:15 +00001458 ValidatePerAxisQuantization(inputTensorInfo,
1459 outputTensorInfo,
1460 weightTensorInfo,
1461 optionalBiasTensorInfo,
1462 descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001463
1464 std::vector<DataType> supportedTypes =
1465 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001466 DataType::BFloat16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001467 DataType::Float16,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001468 DataType::Float32,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001469 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001470 DataType::QAsymmU8,
1471 DataType::QSymmS16
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001472 };
1473
1474 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1475 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001476}
1477
1478void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1479{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001480 const std::string descriptorName{"PermuteQueueDescriptor"};
1481
1482 ValidateNumInputs(workloadInfo, descriptorName, 1);
1483 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001484
1485 const PermutationVector& mapping = m_Parameters.m_DimMappings;
1486
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001487 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1488 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
telsoa014fcda012018-03-09 14:13:49 +00001489
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001490 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1491 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
telsoa014fcda012018-03-09 14:13:49 +00001492
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001493 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
telsoa014fcda012018-03-09 14:13:49 +00001494 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001495 if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
telsoa014fcda012018-03-09 14:13:49 +00001496 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001497 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1498 " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1499 "must match dst dimension " + to_string(mapping[i]) +
1500 " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
telsoa014fcda012018-03-09 14:13:49 +00001501 }
1502 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001503
1504 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001505}
1506
1507void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1508{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001509 const std::string descriptorName{"Pooling2dQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001510
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001511 ValidateNumInputs(workloadInfo, descriptorName, 1);
1512 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1513
1514 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1515 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1516
1517 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1518 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Teresa Charlina3b20472019-06-06 11:12:32 +01001519
1520 std::vector<DataType> supportedTypes =
1521 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001522 DataType::BFloat16,
Teresa Charlina3b20472019-06-06 11:12:32 +01001523 DataType::Float32,
1524 DataType::Float16,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001525 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001526 DataType::QAsymmU8,
1527 DataType::QSymmS16
Teresa Charlina3b20472019-06-06 11:12:32 +01001528 };
1529
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001530 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1531 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001532}
1533
Tamás Nyíri7b885b32021-10-26 14:47:57 +01001534void Pooling3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1535{
1536 const std::string descriptorName{"Pooling3dQueueDescriptor"};
1537
1538 ValidateNumInputs(workloadInfo, descriptorName, 1);
1539 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1540
1541 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1542 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1543
1544 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1545 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1546
1547 std::vector<DataType> supportedTypes =
1548 {
1549 DataType::BFloat16,
1550 DataType::Float32,
1551 DataType::Float16,
1552 DataType::QAsymmS8,
1553 DataType::QAsymmU8,
1554 DataType::QSymmS16
1555 };
1556
1557 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1558 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1559}
1560
1561
telsoa014fcda012018-03-09 14:13:49 +00001562void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1563{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001564 const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001565
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001566 ValidateNumInputs(workloadInfo, descriptorName, 1);
1567 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1568
1569 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1570 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1571
1572 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1573 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
telsoa014fcda012018-03-09 14:13:49 +00001574
Ellen Norris-Thompson3cb85f32019-06-17 11:32:49 +01001575 std::vector<DataType> supportedTypes =
Teresa Charlin970f43b2019-07-01 13:51:07 +01001576 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001577 DataType::BFloat16,
Teresa Charlin970f43b2019-07-01 13:51:07 +01001578 DataType::Float16,
1579 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001580 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001581 DataType::QAsymmU8,
1582 DataType::QSymmS16
Teresa Charlin970f43b2019-07-01 13:51:07 +01001583 };
Ellen Norris-Thompson3cb85f32019-06-17 11:32:49 +01001584
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001585 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1586 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Ellen Norris-Thompson3cb85f32019-06-17 11:32:49 +01001587
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001588 // ResizeBilinear only changes width and height: batch and channel count must match.
1589 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1590 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001591 if (inputBatchSize != outputBatchSize)
telsoa014fcda012018-03-09 14:13:49 +00001592 {
Teresa Charlin970f43b2019-07-01 13:51:07 +01001593 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001594 fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1595 descriptorName, inputBatchSize, outputBatchSize));
telsoa014fcda012018-03-09 14:13:49 +00001596 }
1597
Teresa Charlin970f43b2019-07-01 13:51:07 +01001598 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001599 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1600 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001601 if (inputChannelCount != outputChannelCount)
telsoa014fcda012018-03-09 14:13:49 +00001602 {
Teresa Charlin970f43b2019-07-01 13:51:07 +01001603 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001604 fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1605 descriptorName, inputChannelCount, outputChannelCount));
Teresa Charlin970f43b2019-07-01 13:51:07 +01001606 }
1607}
1608
1609void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1610{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001611 const std::string descriptorName{"ResizeQueueDescriptor"};
Teresa Charlin970f43b2019-07-01 13:51:07 +01001612
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001613 ValidateNumInputs(workloadInfo, descriptorName, 1);
1614 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1615
1616 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1617 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1618
1619 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1620 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Teresa Charlin970f43b2019-07-01 13:51:07 +01001621
1622 std::vector<DataType> supportedTypes =
1623 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001624 DataType::BFloat16,
Teresa Charlin970f43b2019-07-01 13:51:07 +01001625 DataType::Float16,
1626 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00001627 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001628 DataType::QAsymmU8,
1629 DataType::QSymmS16
Teresa Charlin970f43b2019-07-01 13:51:07 +01001630 };
1631
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001632 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1633 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Teresa Charlin970f43b2019-07-01 13:51:07 +01001634
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001635 // Resize only changes width and height: batch and channel count must match.
1636 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1637 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001638 if (inputBatchSize != outputBatchSize)
1639 {
1640 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001641 fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1642 descriptorName, inputBatchSize, outputBatchSize));
Teresa Charlin970f43b2019-07-01 13:51:07 +01001643 }
1644
1645 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001646 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1647 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
Teresa Charlin970f43b2019-07-01 13:51:07 +01001648 if (inputChannelCount != outputChannelCount)
1649 {
1650 throw InvalidArgumentException(
James Ward47fce872020-09-10 11:57:28 +01001651 fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1652 descriptorName, inputChannelCount, outputChannelCount));
telsoa014fcda012018-03-09 14:13:49 +00001653 }
1654}
1655
1656void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1657{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001658 const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001659
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001660 ValidateNumInputs(workloadInfo, descriptorName, 1);
1661 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1662
1663 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1664 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1665
1666 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1667 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1668
1669 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1670
telsoa014fcda012018-03-09 14:13:49 +00001671 if (m_Parameters.m_Min > m_Parameters.m_Max)
1672 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001673 throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
telsoa014fcda012018-03-09 14:13:49 +00001674 }
telsoa014fcda012018-03-09 14:13:49 +00001675}
1676
Kevin Mayce5045a2019-10-02 14:07:47 +01001677void InstanceNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1678{
1679 const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1680
1681 ValidateNumInputs(workloadInfo, descriptorName, 1);
1682 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1683
1684 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1685 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1686
1687 if (inputTensorInfo.GetNumDimensions() > 4)
1688 {
1689 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1690 }
1691
1692 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1693
1694 // Check the supported data types
1695 std::vector<DataType> supportedTypes =
1696 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001697 DataType::BFloat16,
Kevin Mayce5045a2019-10-02 14:07:47 +01001698 DataType::Float32,
1699 DataType::Float16
1700 };
1701
1702 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Kevin Mayce5045a2019-10-02 14:07:47 +01001703 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Kevin Mayce5045a2019-10-02 14:07:47 +01001704}
1705
telsoa014fcda012018-03-09 14:13:49 +00001706void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1707{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001708 const std::string descriptorName{"L2NormalizationQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001709
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001710 ValidateNumInputs(workloadInfo, descriptorName, 1);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001711 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1712
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001713 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1714 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1715
Matthew Jackson82b15ed2019-07-25 16:14:30 +01001716 if (inputTensorInfo.GetNumDimensions() > 4)
1717 {
1718 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1719 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001720
1721 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001722
1723 // Check the supported data types
1724 std::vector<DataType> supportedTypes =
1725 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001726 DataType::BFloat16,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001727 DataType::Float32,
1728 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001729 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001730 DataType::QAsymmU8,
1731 DataType::QSymmS16
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001732 };
1733
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001734 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001735 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1736}
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001737
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001738void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1739{
1740 const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1741
1742 ValidateNumInputs(workloadInfo, descriptorName, 1);
1743 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1744
1745 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1746 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1747
1748 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1749
1750 std::vector<DataType> supportedTypes =
1751 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001752 DataType::BFloat16,
Aron Virginas-Tarf982dea2019-10-11 14:07:53 +01001753 DataType::Float32,
1754 DataType::Float16,
1755 };
1756
1757 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001758 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001759}
1760
1761void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1762{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001763 const std::string descriptorName{"ConstantQueueDescriptor"};
1764
1765 ValidateNumInputs(workloadInfo, descriptorName, 0);
1766 ValidateNumOutputs(workloadInfo, descriptorName, 1);
telsoa014fcda012018-03-09 14:13:49 +00001767
1768 if (!m_LayerOutput)
1769 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001770 throw InvalidArgumentException(descriptorName + ": No const input specified.");
telsoa014fcda012018-03-09 14:13:49 +00001771 }
1772
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001773 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1774 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +01001775
1776 // Check the supported data types
1777 std::vector<DataType> supportedTypes =
Nina Drozd2f2778f2019-05-27 10:37:05 +01001778 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001779 DataType::BFloat16,
Nina Drozd2f2778f2019-05-27 10:37:05 +01001780 DataType::Float32,
1781 DataType::Float16,
Keith Davis67e6c542020-02-19 10:08:33 +00001782 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001783 DataType::QAsymmU8,
Keith Davis5204aa82020-01-27 15:24:59 +00001784 DataType::QSymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001785 DataType::QSymmS16,
1786 DataType::Signed32
Nina Drozd2f2778f2019-05-27 10:37:05 +01001787 };
Nina Drozd58ef2c62019-05-16 12:09:18 +01001788
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001789 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
telsoa014fcda012018-03-09 14:13:49 +00001790}
1791
1792void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1793{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001794 const std::string descriptorName{"ReshapeQueueDescriptor"};
telsoa014fcda012018-03-09 14:13:49 +00001795
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001796 ValidateNumInputs(workloadInfo, descriptorName, 1);
1797 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1798
1799 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1800 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1801
1802 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Nina Drozd2f2778f2019-05-27 10:37:05 +01001803
1804 // Check the supported data types
1805 std::vector<DataType> supportedTypes =
1806 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001807 DataType::BFloat16,
Nina Drozd2f2778f2019-05-27 10:37:05 +01001808 DataType::Float32,
1809 DataType::Float16,
Keith Davis0c2eeac2020-02-11 16:51:50 +00001810 DataType::QAsymmS8,
Sadik Armagan303980c2020-04-17 12:45:14 +01001811 DataType::QAsymmU8,
1812 DataType::QSymmS16,
Narumol Prangnawarat0c95f4c2020-11-18 16:52:07 +00001813 DataType::Signed32,
1814 DataType::Boolean
Nina Drozd2f2778f2019-05-27 10:37:05 +01001815 };
1816
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001817 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1818 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001819}
1820
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001821void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1822{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001823 const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001824
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001825 ValidateNumInputs(workloadInfo, descriptorName, 1);
1826 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1827
1828 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1829 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1830
1831 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1832 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001833
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001834 if (m_Parameters.m_BlockShape.size() != 2)
1835 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001836 throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001837 }
1838
1839 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1840 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001841 throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1842 "dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001843 }
1844
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001845 const TensorShape& inputShape = inputTensorInfo.GetShape();
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001846
1847 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001848 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001849
Matthew Bentham8800c002018-11-19 13:19:28 +00001850 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001851
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001852 const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1853 widthPad.first + widthPad.second;
1854 const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1855 heightPad.first + heightPad.second;
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001856
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001857 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1858 inputShape[dimensionIndices.GetChannelsIndex()];
1859 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001860
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001861 if (numOutputElements != numInputElements)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001862 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001863 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001864 to_string(numInputElements) + " after padding but output tensor has " +
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001865 to_string(numOutputElements) + " elements.");
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00001866 }
1867
1868 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001869 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001870 throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1871 "divisible by Block Shape in all spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001872 }
nikraj01120522a2019-05-31 11:33:07 +01001873
1874 std::vector<DataType> supportedTypes =
1875 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001876 DataType::BFloat16,
1877 DataType::Float16,
1878 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01001879 DataType::QAsymmS8,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001880 DataType::QAsymmU8,
1881 DataType::QSymmS16
nikraj01120522a2019-05-31 11:33:07 +01001882 };
1883
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001884 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1885 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +00001886}
1887
Keith Davisa57eccb2019-06-14 17:33:22 +01001888void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1889{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001890 const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
Keith Davisa57eccb2019-06-14 17:33:22 +01001891
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001892 ValidateNumInputs(workloadInfo, descriptorName, 1);
1893 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Keith Davisa57eccb2019-06-14 17:33:22 +01001894
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001895 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1896 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1897
1898 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1899 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Keith Davisa57eccb2019-06-14 17:33:22 +01001900
1901 std::vector<DataType> supportedTypes =
1902 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001903 DataType::BFloat16,
Keith Davisa57eccb2019-06-14 17:33:22 +01001904 DataType::Float32,
1905 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01001906 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001907 DataType::QAsymmU8,
1908 DataType::QSymmS16
Keith Davisa57eccb2019-06-14 17:33:22 +01001909 };
1910
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001911 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1912 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Keith Davisa57eccb2019-06-14 17:33:22 +01001913
Aron Virginas-Tar8a1b2182019-09-19 14:39:37 +01001914 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1915
1916 if (m_Parameters.m_BlockSize == 0)
1917 {
1918 throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1919 }
1920
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001921 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1922 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1923 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1924 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
Keith Davisa57eccb2019-06-14 17:33:22 +01001925
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001926 const TensorShape& inputShape = inputTensorInfo.GetShape();
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001927 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
Keith Davisa57eccb2019-06-14 17:33:22 +01001928 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001929 throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1930 "by block size in all spatial dimensions");
Keith Davisa57eccb2019-06-14 17:33:22 +01001931 }
Aron Virginas-Tar8a1b2182019-09-19 14:39:37 +01001932
1933 const TensorShape& outputShape = outputTensorInfo.GetShape();
1934 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1935 {
1936 throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1937 "must be divisible by the square of block size." );
1938 }
Keith Davisa57eccb2019-06-14 17:33:22 +01001939}
1940
telsoa014fcda012018-03-09 14:13:49 +00001941void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1942{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001943 const std::string descriptorName{"FloorQueueDescriptor"};
James Conroy83735b12019-05-30 16:36:59 +01001944
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001945 ValidateNumInputs(workloadInfo, descriptorName, 1);
1946 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1947
1948 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1949 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
James Conroy83735b12019-05-30 16:36:59 +01001950
1951 std::vector<DataType> supportedTypes =
1952 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001953 DataType::BFloat16,
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001954 DataType::Float32,
Matthew Jackson9bff1442019-09-12 09:08:23 +01001955 DataType::Float16,
Teresa Charlin38b72e82022-05-04 17:54:19 +01001956 DataType::QSymmS16,
1957 DataType::Signed32
James Conroy83735b12019-05-30 16:36:59 +01001958 };
1959
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001960 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Matthew Sloyan81beae32021-07-13 19:46:11 +01001961 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1962 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1963 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa014fcda012018-03-09 14:13:49 +00001964}
1965
telsoa01c577f2c2018-08-31 09:22:23 +01001966void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1967{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001968 // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1969
1970 const std::string descriptorName{"LstmQueueDescriptor"};
1971
1972 // check dimensions of all inputs and outputs
1973 if (workloadInfo.m_InputTensorInfos.size() != 3)
1974 {
1975 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1976 }
1977 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1978 {
1979 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1980 }
1981
1982 std::vector<DataType> supportedTypes =
1983 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001984 DataType::BFloat16,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001985 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001986 DataType::Float32,
Derek Lambertif90c56d2020-01-10 17:14:08 +00001987 DataType::QSymmS16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01001988 };
1989
Jan Eilers38e05bd2019-06-26 13:10:09 +01001990 // check for supported type of one input and match them with all the other input and output
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001991 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1992
Jan Eilers38e05bd2019-06-26 13:10:09 +01001993 // type matches all other inputs
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001994 for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
Jan Eilers38e05bd2019-06-26 13:10:09 +01001995 {
1996 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1997 workloadInfo.m_InputTensorInfos[i],
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01001998 descriptorName,
1999 "input_0",
2000 "input_" + std::to_string(i));
Jan Eilers38e05bd2019-06-26 13:10:09 +01002001 }
2002 // type matches all other outputs
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002003 for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
Jan Eilers38e05bd2019-06-26 13:10:09 +01002004 {
2005 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
2006 workloadInfo.m_OutputTensorInfos[i],
2007 "LstmQueueDescriptor",
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002008 "input_0",
2009 "output_" + std::to_string(i));
Jan Eilers38e05bd2019-06-26 13:10:09 +01002010 }
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +01002011
janeil0117d8d852019-11-15 15:00:16 +00002012 // Making sure clipping parameters have valid values.
2013 // == 0 means no clipping
2014 // > 0 means clipping
2015 if (m_Parameters.m_ClippingThresCell < 0.0f)
2016 {
2017 throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
2018 }
2019 if (m_Parameters.m_ClippingThresProj < 0.0f)
2020 {
2021 throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
2022 }
2023
Jan Eilers38e05bd2019-06-26 13:10:09 +01002024 // Inferring batch size, number of outputs and number of cells from the inputs.
Jan Eilers38e05bd2019-06-26 13:10:09 +01002025 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
2026 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
2027 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
2028 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
2029 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
2030 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
2031
Jan Eilers38e05bd2019-06-26 13:10:09 +01002032 // input tensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002033 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
2034 descriptorName + " input_0");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002035 // outputStateInTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002036 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
2037 descriptorName + " input_1");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002038 // outputStateInTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002039 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
2040 descriptorName + " input_2");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002041 // scratchBufferTensor
2042 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002043 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
2044 descriptorName + " output_0");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002045 // outputStateOutTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002046 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
2047 descriptorName + " output_1");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002048 // cellStateOutTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002049 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
2050 descriptorName + " output_2");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002051 // outputTensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002052 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
2053 descriptorName + " output_3");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002054
Jan Eilers38e05bd2019-06-26 13:10:09 +01002055 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
2056 if ( m_InputToInputWeights )
2057 {
2058 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
2059 (n_cell * n_input), "InputLayerNormWeights");
2060 }
2061
2062 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
2063 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
2064 (n_cell * n_input), "InputToForgetWeights");
2065
2066 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
2067 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2068 (n_cell * n_input), "InputToCellWeights");
2069
2070 if ( m_RecurrentToInputWeights )
2071 {
2072 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2073 (n_cell * n_output), "RecurrentToInputWeights");
2074 }
2075
2076 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
2077 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2078 (n_cell * n_output), "RecurrentToForgetWeights");
2079
2080 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
2081 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2082 (n_cell * n_output), "RecurrentToCellWeights");
2083
2084 // Make sure the input-gate's parameters are either both present (regular
2085 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
2086 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2087 !m_Parameters.m_CifgEnabled) ||
2088 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2089 m_Parameters.m_CifgEnabled));
2090 if (!cifg_weights_all_or_none)
2091 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002092 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
2093 "RecurrentToInputWeights must either both be present (regular LSTM) "
2094 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
2095 "accordingly.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002096 }
2097
2098 if ( m_CellToInputWeights )
2099 {
2100 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2101 n_cell, "CellToInputWeights");
2102 }
2103 if ( m_CellToForgetWeights )
2104 {
2105 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2106 n_cell, "CellToForgetWeights");
2107 }
2108 if ( m_CellToOutputWeights )
2109 {
2110 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2111 n_cell, "CellToOutputWeights");
2112 }
2113
2114 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
2115 bool peephole_weights_all_or_none =
2116 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2117 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2118 || ( !m_CellToInputWeights && !m_CellToForgetWeights
2119 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2120 if (!peephole_weights_all_or_none)
2121 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002122 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002123 }
2124
2125 // Make sure the input gate bias is present only when not a CIFG-LSTM.
2126 if (m_Parameters.m_CifgEnabled)
2127 {
2128 if (m_InputGateBias)
2129 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002130 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002131 }
2132 }
2133 else
2134 {
2135 if (!m_InputGateBias)
2136 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002137 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2138 "must be present.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002139 }
2140 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2141 n_cell, "InputGateBias");
2142 }
2143
2144 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2145 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2146
2147 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2148 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2149
2150 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2151 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2152
2153 if (m_ProjectionWeights)
2154 {
2155 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2156 (n_cell * n_output), "ProjectionWeights");
2157 }
2158 if (m_ProjectionBias)
2159 {
2160 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2161 }
2162
2163 // Making sure the projection tensors are consistent:
2164 // 1) If projection weight is not present, then projection bias should not be
2165 // present.
2166 // 2) If projection weight is present, then projection bias is optional.
2167 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2168 !m_Parameters.m_ProjectionEnabled)
2169 || (m_ProjectionWeights && !m_ProjectionBias &&
2170 m_Parameters.m_ProjectionEnabled)
2171 || (m_ProjectionWeights && m_ProjectionBias &&
2172 m_Parameters.m_ProjectionEnabled));
2173 if (!projecton_tensors_consistent)
2174 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002175 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002176 }
2177
2178 // The four layer normalization weights either all have values or none of them have values. Additionally, if
2179 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2180 // either all have values or none of them have values. Layer normalization is used when the values of all the
2181 // layer normalization weights are present
2182 if (m_InputLayerNormWeights)
2183 {
2184 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2185 }
2186 if (m_ForgetLayerNormWeights)
2187 {
2188 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2189 }
2190 if (m_CellLayerNormWeights)
2191 {
2192 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2193 }
2194 if (m_OutputLayerNormWeights)
2195 {
2196 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2197 }
2198
Jan Eilers38e05bd2019-06-26 13:10:09 +01002199 if (m_Parameters.m_LayerNormEnabled)
2200 {
2201 if (!m_Parameters.m_CifgEnabled)
2202 {
2203 if (!m_InputLayerNormWeights)
2204 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002205 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2206 "disabled but InputLayerNormWeights are not present");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002207 }
2208 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2209 1, n_cell, "InputLayerNormWeights");
2210 }
2211 else if (m_InputLayerNormWeights)
2212 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002213 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2214 "enabled");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002215 }
2216
2217 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2218 "ForgetLayerNormWeights");
2219 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2220
2221 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2222 "OutputLayerNormWeights");
2223 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2224
2225 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2226 "CellLayerNormWeights");
2227 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2228 }
2229 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2230 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002231 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2232 "normalisation weights are present.");
Jan Eilers38e05bd2019-06-26 13:10:09 +01002233 }
telsoa01c577f2c2018-08-31 09:22:23 +01002234}
2235
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +00002236void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2237{
2238 const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2239
2240 ValidateNumInputs(workloadInfo, descriptorName, 1);
2241 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2242
2243 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2244 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2245
2246 if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2247 {
2248 throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2249 }
2250
2251 if (outputTensorInfo.GetDataType() != DataType::Float32)
2252 {
2253 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2254 }
2255
2256 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2257}
2258
Narumol Prangnawaratea54a012020-03-16 16:36:10 +00002259void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2260{
2261 const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2262
2263 ValidateNumInputs(workloadInfo, descriptorName, 1);
2264 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2265
2266 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2267 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2268
2269 if (inputTensorInfo.GetDataType() != DataType::Float32)
2270 {
2271 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2272 }
2273
2274 if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2275 {
2276 throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2277 }
2278
2279 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2280}
2281
telsoa01c577f2c2018-08-31 09:22:23 +01002282void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2283{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002284 const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
telsoa01c577f2c2018-08-31 09:22:23 +01002285
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002286 ValidateNumInputs(workloadInfo, descriptorName, 1);
2287 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2288
2289 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2290 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2291
2292 if (inputTensorInfo.GetDataType() != DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +01002293 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002294 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
telsoa01c577f2c2018-08-31 09:22:23 +01002295 }
2296
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002297 if (outputTensorInfo.GetDataType() != DataType::Float16)
telsoa01c577f2c2018-08-31 09:22:23 +01002298 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002299 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
telsoa01c577f2c2018-08-31 09:22:23 +01002300 }
2301
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002302 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa01c577f2c2018-08-31 09:22:23 +01002303}
2304
2305void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2306{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002307 const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
telsoa01c577f2c2018-08-31 09:22:23 +01002308
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002309 ValidateNumInputs(workloadInfo, descriptorName, 1);
2310 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2311
2312 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2313 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2314
2315 if (inputTensorInfo.GetDataType() != DataType::Float16)
telsoa01c577f2c2018-08-31 09:22:23 +01002316 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002317 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
telsoa01c577f2c2018-08-31 09:22:23 +01002318 }
2319
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002320 if (outputTensorInfo.GetDataType() != DataType::Float32)
2321 {
2322 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2323 }
2324
2325 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
telsoa01c577f2c2018-08-31 09:22:23 +01002326}
2327
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002328void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2329{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002330 const std::string descriptorName{"DivisionQueueDescriptor"};
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002331
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002332 ValidateNumInputs(workloadInfo, descriptorName, 2);
2333 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2334
2335 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2336 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2337 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2338
2339 std::vector<DataType> supportedTypes =
2340 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002341 DataType::BFloat16,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002342 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002343 DataType::Float32,
2344 DataType::QAsymmS8,
2345 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01002346 DataType::QSymmS16,
2347 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002348 };
2349
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002350 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2351 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2352 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002353
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002354 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2355 inputTensorInfo1,
2356 outputTensorInfo,
2357 descriptorName,
2358 "input_0",
2359 "input_1");
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002360}
2361
David Beckc2044fe2018-09-05 15:00:38 +01002362void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2363{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002364 const std::string descriptorName{"SubtractionQueueDescriptor"};
David Beckc2044fe2018-09-05 15:00:38 +01002365
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002366 ValidateNumInputs(workloadInfo, descriptorName, 2);
2367 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2368
2369 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2370 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2371 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2372
2373 std::vector<DataType> supportedTypes =
2374 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002375 DataType::BFloat16,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002376 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002377 DataType::Float32,
2378 DataType::QAsymmS8,
2379 DataType::QAsymmU8,
Teresa Charlinecb6b8e2020-05-22 18:08:23 +01002380 DataType::QSymmS16,
2381 DataType::Signed32,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002382 };
2383
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002384 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2385 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2386 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002387
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002388 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2389 inputTensorInfo1,
2390 outputTensorInfo,
2391 descriptorName,
2392 "input_0",
2393 "input_1");
David Beckc2044fe2018-09-05 15:00:38 +01002394}
2395
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002396void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2397{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002398 const std::string descriptorName{"MaximumQueueDescriptor"};
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002399
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002400 ValidateNumInputs(workloadInfo, descriptorName, 2);
2401 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2402
2403 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2404 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2405 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2406
2407 std::vector<DataType> supportedTypes =
2408 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002409 DataType::BFloat16,
Mike Kelly1da02362019-08-01 08:43:57 +01002410 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002411 DataType::Float32,
Keith Davis67e6c542020-02-19 10:08:33 +00002412 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002413 DataType::QAsymmU8,
Sadik Armagan303980c2020-04-17 12:45:14 +01002414 DataType::QSymmS16,
2415 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002416 };
2417
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002418 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2419 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2420 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002421
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002422 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2423 inputTensorInfo1,
2424 outputTensorInfo,
2425 descriptorName,
2426 "input_0",
2427 "input_1");
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00002428}
2429
narpra01a6bf9122018-09-10 09:50:09 +01002430void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2431{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002432 const std::string descriptorName{"MeanQueueDescriptor"};
James Conroy4d1ff582019-06-10 17:06:39 +01002433
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002434 ValidateNumInputs(workloadInfo, descriptorName, 1);
2435 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2436
2437 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2438 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
James Conroy4d1ff582019-06-10 17:06:39 +01002439
2440 std::vector<DataType> supportedTypes =
2441 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002442 DataType::BFloat16,
James Conroy4d1ff582019-06-10 17:06:39 +01002443 DataType::Float32,
2444 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002445 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002446 DataType::QAsymmU8,
2447 DataType::QSymmS16
James Conroy4d1ff582019-06-10 17:06:39 +01002448 };
narpra01eb061912018-09-10 17:35:27 +01002449
James Conroy4d1ff582019-06-10 17:06:39 +01002450 // First check if input tensor data type is supported, then
2451 // check if this data type matches the output tensor data type
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002452 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2453 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
James Conroy4d1ff582019-06-10 17:06:39 +01002454
narpra0132b90462018-09-13 11:07:48 +01002455 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01002456 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002457 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
narpra01eb061912018-09-10 17:35:27 +01002458 }
narpra0132b90462018-09-13 11:07:48 +01002459 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01002460 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002461 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
narpra01eb061912018-09-10 17:35:27 +01002462 }
2463 else
2464 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002465 unsigned int outputDim =
Matthew Sloyan171214c2020-09-09 09:07:37 +01002466 inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002467 ValidateTensorNumDimensions(outputTensorInfo,
2468 descriptorName,
narpra01eb061912018-09-10 17:35:27 +01002469 outputDim > 0 ? outputDim : 1,
2470 "output");
2471 }
narpra01a6bf9122018-09-10 09:50:09 +01002472}
2473
jimfly012c9322a2018-09-19 10:59:49 +01002474void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2475{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002476 const std::string descriptorName{"PadQueueDescriptor"};
jimfly012c9322a2018-09-19 10:59:49 +01002477
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002478 ValidateNumInputs(workloadInfo, descriptorName, 1);
2479 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2480
2481 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2482 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01002483
jimfly012c9322a2018-09-19 10:59:49 +01002484 // input and output should have the same number of dimensions
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002485 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2486
jimfly012c9322a2018-09-19 10:59:49 +01002487 // there should be entry in the pad list for each dimension in the input tensor
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002488 if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2489 throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2490 "as there are dimensions in the input tensor that is " +
2491 std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2492 " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
jimfly012c9322a2018-09-19 10:59:49 +01002493 }
2494}
2495
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002496void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2497{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002498 const std::string descriptorName{"QuantizeQueueDescriptor"};
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002499
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002500 ValidateNumInputs(workloadInfo, descriptorName, 1);
2501 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002502
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002503 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2504 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2505
Sadik Armagan2208b602019-07-31 16:36:27 +01002506 std::vector<DataType> supportedTypes =
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002507 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002508 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002509 DataType::Float32,
Keith Davis5e51cd82020-01-29 16:52:59 +00002510 DataType::Float16,
2511 DataType::QSymmS8,
Ryan OShea9add1202020-02-07 10:06:33 +00002512 DataType::QAsymmS8,
Keith Davis5e51cd82020-01-29 16:52:59 +00002513 DataType::QAsymmU8,
2514 DataType::QSymmS16
Sadik Armagan2208b602019-07-31 16:36:27 +01002515 };
2516
2517 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002518
Keith Davis0c2eeac2020-02-11 16:51:50 +00002519 if (!IsQuantizedType(outputTensorInfo.GetDataType()))
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002520 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002521 throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
Derek Lambertia9cca6a2019-03-25 15:41:58 +00002522 }
2523}
2524
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00002525void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2526{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002527 const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002528
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002529 ValidateNumInputs(workloadInfo, descriptorName, 1);
2530 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002531
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002532 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2533 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002534
2535 std::vector<DataType> supportedTypes =
2536 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002537 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002538 DataType::Float32,
2539 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002540 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002541 DataType::QAsymmU8,
2542 DataType::QSymmS16
Francis Murtaghd0dfe172019-06-25 10:57:10 +01002543 };
2544
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002545 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2546 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00002547}
2548
Conor Kennedy430b5d82018-11-14 15:28:28 +00002549void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2550{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002551 const std::string descriptorName{"StridedSliceQueueDescriptor"};
Conor Kennedy430b5d82018-11-14 15:28:28 +00002552
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002553 ValidateNumInputs(workloadInfo, descriptorName, 1);
2554 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2555
2556 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2557 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002558
2559 std::vector<DataType> supportedTypes =
2560 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002561 DataType::BFloat16,
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002562 DataType::Float16,
2563 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002564 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002565 DataType::QAsymmU8,
2566 DataType::QSymmS16
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002567 };
2568
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002569 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2570 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002571
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002572 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Matteo Martincighe851b3d2019-05-28 14:31:20 +01002573
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002574 const uint32_t rank = inputTensorInfo.GetNumDimensions();
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00002575 if (rank > 4)
2576 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002577 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00002578 }
2579
Conor Kennedy430b5d82018-11-14 15:28:28 +00002580 // Begin, End & Stride length must be of rank(input0)
2581 if (m_Parameters.m_Begin.size() != rank)
2582 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002583 throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002584 }
2585
2586 if (m_Parameters.m_End.size() != rank)
2587 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002588 throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002589 }
2590
2591 if (m_Parameters.m_Stride.size() != rank)
2592 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002593 throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
Conor Kennedy430b5d82018-11-14 15:28:28 +00002594 }
2595
2596 // Stride entries must be non-zero
2597 for (auto& stride : m_Parameters.m_Stride)
2598 {
2599 if (stride == 0)
2600 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002601 throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
Conor Kennedy430b5d82018-11-14 15:28:28 +00002602 }
2603 }
2604}
2605
kevmay0190539692018-11-29 08:40:19 +00002606void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2607{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002608 const std::string descriptorName{"MinimumQueueDescriptor"};
kevmay0190539692018-11-29 08:40:19 +00002609
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002610 ValidateNumInputs(workloadInfo, descriptorName, 2);
2611 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2612
2613 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2614 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2615 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2616
2617 std::vector<DataType> supportedTypes =
2618 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002619 DataType::BFloat16,
Mike Kelly1da02362019-08-01 08:43:57 +01002620 DataType::Float16,
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002621 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002622 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002623 DataType::QAsymmU8,
Sadik Armagan303980c2020-04-17 12:45:14 +01002624 DataType::QSymmS16,
2625 DataType::Signed32
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002626 };
2627
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002628 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2629 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2630 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01002631
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002632 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2633 inputTensorInfo1,
2634 outputTensorInfo,
2635 descriptorName,
2636 "input_0",
2637 "input_1");
kevmay0190539692018-11-29 08:40:19 +00002638}
2639
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00002640void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2641{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002642 const std::string descriptorName{"DebugQueueDescriptor"};
2643
2644 ValidateNumInputs(workloadInfo, descriptorName, 1);
2645 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00002646}
2647
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002648void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2649{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002650 const std::string descriptorName{"EqualQueueDescriptor"};
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002651
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002652 ValidateNumInputs(workloadInfo, descriptorName, 2);
2653 ValidateNumOutputs(workloadInfo, descriptorName, 1);
kevmay012b4d88e2019-01-24 14:05:09 +00002654
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002655 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2656 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2657 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2658
2659 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2660 inputTensorInfo1,
2661 outputTensorInfo,
2662 descriptorName,
2663 "input_0",
2664 "input_1");
2665
2666 if (outputTensorInfo.GetDataType() != DataType::Boolean)
kevmay012b4d88e2019-01-24 14:05:09 +00002667 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002668 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
kevmay012b4d88e2019-01-24 14:05:09 +00002669 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002670}
2671
FrancisMurtagh878f0232018-12-19 10:56:15 +00002672void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2673{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002674 const std::string descriptorName{"GreaterQueueDescriptor"};
FrancisMurtagh878f0232018-12-19 10:56:15 +00002675
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002676 ValidateNumInputs(workloadInfo, descriptorName, 2);
2677 ValidateNumOutputs(workloadInfo, descriptorName, 1);
kevmay012b4d88e2019-01-24 14:05:09 +00002678
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002679 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2680 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2681 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2682
2683 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2684 inputTensorInfo1,
2685 outputTensorInfo,
2686 descriptorName,
2687 "input_0",
2688 "input_1");
2689
2690 if (outputTensorInfo.GetDataType() != DataType::Boolean)
kevmay012b4d88e2019-01-24 14:05:09 +00002691 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002692 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
kevmay012b4d88e2019-01-24 14:05:09 +00002693 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00002694}
2695
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002696void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2697{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002698 const std::string descriptorName{"RsqrtQueueDescriptor"};
2699
2700 ValidateNumInputs(workloadInfo, descriptorName, 1);
2701 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2702
2703 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2704 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2705
2706 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
nikraj010421e7f2019-06-14 09:40:34 +01002707
2708 std::vector<DataType> supportedTypes =
2709 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002710 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002711 DataType::Float16,
2712 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002713 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002714 DataType::QAsymmU8,
2715 DataType::QSymmS16
nikraj010421e7f2019-06-14 09:40:34 +01002716 };
2717
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002718 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2719 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00002720}
2721
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01002722void GatherNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2723{
2724 const std::string descriptorName{"GatherNdQueueDescriptor"};
2725
2726 ValidateNumInputs(workloadInfo, descriptorName, 2);
2727 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2728
2729 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2730 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2731 {
2732 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2733 }
2734
2735 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2736 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2737
2738 std::vector<DataType> supportedTypes =
2739 {
2740 DataType::BFloat16,
2741 DataType::Float16,
2742 DataType::Float32,
2743 DataType::QAsymmS8,
2744 DataType::QAsymmU8,
2745 DataType::QSymmS16,
2746 DataType::Signed32,
2747 };
2748
2749 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2750
2751 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2752
2753 unsigned int outputDim = outputTensorInfo.GetNumDimensions();
2754 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2755}
2756
narpra01b89b05f2019-01-16 09:53:09 +00002757void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2758{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002759 const std::string descriptorName{"GatherQueueDescriptor"};
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002760
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002761 ValidateNumInputs(workloadInfo, descriptorName, 2);
2762 ValidateNumOutputs(workloadInfo, descriptorName, 1);
narpra014951d842019-01-18 16:53:53 +00002763
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002764 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2765 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
narpra014951d842019-01-18 16:53:53 +00002766 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002767 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
narpra014951d842019-01-18 16:53:53 +00002768 }
2769
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002770 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2771 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2772
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002773 std::vector<DataType> supportedTypes =
2774 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002775 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002776 DataType::Float16,
2777 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002778 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002779 DataType::QAsymmU8,
Teresa Charlin93492462020-05-29 13:08:59 +01002780 DataType::QSymmS16,
2781 DataType::Signed32,
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002782 };
2783
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002784 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002785
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002786 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01002787
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002788 unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2789 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00002790}
2791
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002792void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2793{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002794 const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2795
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002796 ValidateNumInputs(workloadInfo, descriptorName, 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002797
2798 if (workloadInfo.m_OutputTensorInfos.size() != 4)
2799 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002800 throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002801 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2802 }
2803
2804 if (m_Anchors == nullptr)
2805 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002806 throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002807 }
2808
2809 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002810 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2811 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2812
2813 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00002814 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002815 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2816 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002817
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002818 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2819 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2820 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002821
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002822 const std::vector<DataType> supportedInputTypes =
2823 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002824 DataType::BFloat16,
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002825 DataType::Float32,
Matthew Jackson9bff1442019-09-12 09:08:23 +01002826 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01002827 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002828 DataType::QAsymmU8,
2829 DataType::QSymmS16
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002830 };
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002831
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002832 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2833 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2834 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2835
2836 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2837 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2838 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2839 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2840
2841 // NOTE: Output is always Float32 regardless of input type
2842 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2843 ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2844 ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2845 ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002846
2847 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2848 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002849 throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002850 "must be positive and less than or equal to 1.");
2851 }
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002852
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002853 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2854 {
Aron Virginas-Tar6331f912019-06-03 17:10:02 +01002855 throw InvalidArgumentException(descriptorName + ": Number of classes with background "
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00002856 "should be equal to number of classes + 1.");
2857 }
2858}
2859
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002860void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2861{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002862 const std::string& descriptorName{"DequantizeQueueDescriptor"};
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002863
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002864 ValidateNumInputs(workloadInfo, descriptorName, 1);
2865 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2866
2867 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2868 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2869
Aron Virginas-Tare9323ec2019-11-26 12:50:34 +00002870 if (!IsQuantizedType(inputTensorInfo.GetDataType()))
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002871 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002872 throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002873 }
2874
Sadik Armagan2208b602019-07-31 16:36:27 +01002875 std::vector<DataType> supportedTypes =
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002876 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002877 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01002878 DataType::Float32,
2879 DataType::Float16
Sadik Armagan2208b602019-07-31 16:36:27 +01002880 };
2881
2882 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00002883}
2884
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002885void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2886{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002887 const std::string& descriptorName{"MergeQueueDescriptor"};
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002888
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002889 ValidateNumInputs(workloadInfo, descriptorName, 2);
2890 ValidateNumOutputs(workloadInfo, descriptorName, 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002891
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002892 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2893 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2894 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002895
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002896 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2897 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2898
2899 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2900 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01002901}
2902
Keith Davis3ae3f972021-05-21 16:33:48 +01002903void ShapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2904{
2905 const std::string& descriptorName{"ShapeQueueDescriptor"};
2906
2907 ValidateNumInputs(workloadInfo, descriptorName, 1);
2908 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2909
2910 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2911 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2912
2913 std::vector<DataType> supportedTypes =
2914 {
2915 DataType::BFloat16,
2916 DataType::Float16,
2917 DataType::Float32,
2918 DataType::QAsymmS8,
2919 DataType::QAsymmU8,
2920 DataType::QAsymmS8,
2921 DataType::QSymmS8,
2922 DataType::QSymmS16,
2923 DataType::Signed32
2924 };
2925
2926 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2927 ValidateDataTypes(outputTensorInfo, {DataType::Signed32}, descriptorName);
2928}
2929
Sadik Armaganeff363d2019-04-05 15:25:46 +01002930void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2931{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002932 const std::string& descriptorName{"SwitchQueueDescriptor"};
Sadik Armaganeff363d2019-04-05 15:25:46 +01002933
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002934 ValidateNumInputs(workloadInfo, descriptorName, 2);
2935 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2936
2937 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2938 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2939
2940 const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2941 const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2942
2943 std::vector<DataType> supportedTypes =
2944 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002945 DataType::BFloat16,
Sadik Armaganeff363d2019-04-05 15:25:46 +01002946 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002947 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002948 DataType::QAsymmU8,
2949 DataType::QSymmS16
Sadik Armaganeff363d2019-04-05 15:25:46 +01002950 };
2951
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002952 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2953 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
Sadik Armaganeff363d2019-04-05 15:25:46 +01002954
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002955 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2956 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
Sadik Armaganeff363d2019-04-05 15:25:46 +01002957
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002958 ValidateTensorShapesMatch(inputTensorInfo0,
2959 outputTensorInfo0,
2960 descriptorName,
2961 "input_0",
2962 "output_0");
Sadik Armaganeff363d2019-04-05 15:25:46 +01002963
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002964 ValidateTensorShapesMatch(inputTensorInfo0,
2965 outputTensorInfo1,
2966 descriptorName,
2967 "input_0",
2968 "output_1");
Sadik Armaganeff363d2019-04-05 15:25:46 +01002969}
2970
Derek Lamberti901ea112019-12-10 22:07:09 +00002971void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
Matteo Martincigh49124022019-01-11 13:25:59 +00002972{
2973 // This is internally generated so it should not need validation.
2974}
2975
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002976void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2977{
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002978 const std::string& descriptorName{"PreluQueueDescriptor"};
2979
2980 ValidateNumInputs(workloadInfo, descriptorName, 2);
2981 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2982
2983 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2984 const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2985 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002986
2987 std::vector<DataType> supportedTypes
2988 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002989 DataType::BFloat16,
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002990 DataType::Float16,
2991 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01002992 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00002993 DataType::QAsymmU8,
2994 DataType::QSymmS16
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002995 };
2996
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01002997 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2998 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01002999
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003000 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01003001
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003002 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
3003 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01003004
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003005 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
3006 alphaTensorInfo,
3007 outputTensorInfo,
3008 descriptorName,
Matteo Martincigh0e406ee2019-06-12 15:42:18 +01003009 "input",
3010 "alpha");
3011}
3012
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003013void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3014{
3015 const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
3016
3017 ValidateNumInputs(workloadInfo, descriptorName, 1);
3018 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3019
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003020 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3021 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3022
3023 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
3024 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003025
3026 ValidatePointer(m_Weight, descriptorName, "weight");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003027
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003028 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
3029 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003030
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003031 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
3032
3033 Optional<TensorInfo> optionalBiasTensorInfo;
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003034 if (m_Parameters.m_BiasEnabled)
3035 {
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003036 ValidatePointer(m_Bias, descriptorName, "bias");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003037
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003038 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
3039 const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003040
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003041 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
Aron Virginas-Tar84062b72019-07-19 11:37:10 +01003042 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003043 }
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003044
3045 ValidatePerAxisQuantization(inputTensorInfo,
3046 outputTensorInfo,
3047 weightTensorInfo,
3048 optionalBiasTensorInfo,
3049 descriptorName);
3050
3051 std::vector<DataType> supportedTypes =
3052 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003053 DataType::BFloat16,
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003054 DataType::Float32,
3055 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01003056 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003057 DataType::QAsymmU8,
3058 DataType::QSymmS16
Aron Virginas-Tar94d3b932019-11-11 12:54:47 +00003059 };
3060
3061 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3062 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
Aron Virginas-Tar639fb042019-06-20 14:28:19 +01003063}
3064
Mike Kellyc9ea45a2020-02-28 18:11:58 +00003065void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3066{
3067 const std::string descriptorName{"TransposeQueueDescriptor"};
3068
3069 ValidateNumInputs(workloadInfo, descriptorName, 1);
3070 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3071
3072 const PermutationVector& mapping = m_Parameters.m_DimMappings;
3073
3074 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3075 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3076
3077 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
3078 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
3079
3080 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
3081 {
3082 if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
3083 {
3084 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
3085 " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
3086 "must match dst dimension " + to_string(i) +
3087 " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
3088 }
3089 }
3090
3091 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3092}
3093
Simon Obute51f67772021-09-03 15:50:13 +01003094void ChannelShuffleQueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
3095{
3096 const std::string descriptorName{"TransposeQueueDescriptor"};
3097
3098 ValidateNumInputs(workloadInfo, descriptorName, 1);
3099 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3100
3101 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3102 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3103
3104 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3105}
3106
James Conroy4f1f8992020-04-29 20:01:10 +01003107void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3108{
3109 const std::string descriptorName{"QLstmQueueDescriptor"};
3110
3111 // Validate number of inputs/outputs
3112 ValidateNumInputs(workloadInfo, descriptorName, 3);
3113 ValidateNumOutputs(workloadInfo, descriptorName, 3);
3114
3115 // Input/output tensor info
3116 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3117 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
3118 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
3119
3120 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3121 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3122 auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
3123
3124 // Supported types for various tensors in QLSTM
3125 std::vector<DataType> inputOutputSupportedTypes =
3126 {
3127 DataType::QAsymmS8
3128 };
3129
3130 std::vector<DataType> cellStateSupportedTypes =
3131 {
3132 DataType::QSymmS16
3133 };
3134
3135 std::vector<DataType> weightsSupportedTypes =
3136 {
3137 DataType::QSymmS8
3138 };
3139
3140 std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
3141 {
3142 DataType::QSymmS16
3143 };
3144
3145 std::vector<DataType> biasSupportedTypes =
3146 {
3147 DataType::Signed32
3148 };
3149
3150 // Validate types of input/output tensors
3151 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3152 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3153 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3154
3155 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3156 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3157 ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3158
3159 // Validate matching types of input/output tensors
3160 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3161 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3162 "outputStateIn", "outputStateOut");
3163 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3164
3165 // Infer number of batches, number of units, input size and output size from tensor dimensions
3166 const uint32_t numBatches = inputInfo.GetShape()[0];
3167 const uint32_t inputSize = inputInfo.GetShape()[1];
3168 const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3169 const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3170
3171 // Validate number of dimensions and number of elements for input/output tensors
3172 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3173 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3174 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
3175
3176 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3177 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
3178 ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
3179
3180 // Validate number of dimensions and number of elements for MANDATORY weight tensors
3181 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3182 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3183 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
3184
3185 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3186 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3187 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
3188
3189 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3190 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3191 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
3192
3193 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3194 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3195 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3196 " RecurrentToForgetWeights");
3197
3198 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3199 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3200 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3201
3202 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3203 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3204 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3205
3206 // Validate data types for MANDATORY weights tensors (all should match each other)
3207 ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3208
3209 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3210 "inputToForgetWeights", "inputToCellWeights");
3211 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3212 "inputToForgetWeights", "inputToOutputWeights");
3213
3214 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3215 "inputToForgetWeights", "recurrentToForgeteights");
3216 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3217 "inputToForgetWeights", "recurrentToCellWeights");
3218 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3219 "inputToForgetWeights", "recurrentToOutputWeights");
3220
3221 // Validate number of dimensions and number of elements for MANDATORY bias tensors
3222 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3223 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3224 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3225
3226 ValidatePointer(m_CellBias, descriptorName, "CellBias");
3227 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3228 ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3229
3230 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3231 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3232 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3233
3234 // Validate data types for MANDATORY bias tensors
3235 ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3236
3237 ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3238 "forgetGateBias", "cellBias");
3239 ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3240 "forgetGateBias", "outputGateBias");
3241
3242 // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3243 const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3244 !m_Parameters.m_CifgEnabled) ||
3245 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3246 !m_InputGateBias && m_Parameters.m_CifgEnabled));
3247
3248 if (!allCifgParamsPresentOrNot)
3249 {
3250 throw InvalidArgumentException(descriptorName +
3251 ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3252 "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3253 "set appropriately.");
3254 }
3255
3256 if (!m_Parameters.m_CifgEnabled)
3257 {
3258 // Validate number of dimensions and number of elements
3259 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3260 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3261
3262 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3263 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3264 " RecurrentToInputWeights");
3265
3266 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3267 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3268
3269 // Validate data types
3270 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3271 "inputToForgetWeights", "inputToInputWeights");
3272 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3273 "inputToForgetWeights", "recurrentToInputWeights");
3274 ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3275 "forgetGateBias", "inputGateBias");
3276 }
3277
3278 // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3279 bool allPeepholeWeightsPresentOrNot =
3280 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3281 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3282 || (!m_CellToInputWeights && !m_CellToForgetWeights
3283 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3284
3285 if (!allPeepholeWeightsPresentOrNot)
3286 {
3287 throw InvalidArgumentException(descriptorName +
3288 ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3289 "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3290 "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3291 "appropriately.");
3292 }
3293
3294 if (m_Parameters.m_PeepholeEnabled)
3295 {
3296 auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3297 ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3298 ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3299
3300 auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3301 ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3302 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3303 "cellToForgetWeight", "cellToOutputWeights");
3304
3305 if (!m_Parameters.m_CifgEnabled)
3306 {
3307 auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3308 ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3309 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3310 "cellToForgetWeights", "cellToInputWeights");
3311 }
3312 }
3313
3314 // Validate OPTIONAL params: Layer Norm Weights
3315 bool allLayerNormWeightsPresentOrNot =
3316 (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3317 && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3318 || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3319 && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3320
3321 if (!allLayerNormWeightsPresentOrNot)
3322 {
3323 throw InvalidArgumentException(descriptorName +
3324 ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3325 "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3326 "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3327 "only be present when Layer Norm is enabled and CIFG is disabled. "
3328 "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3329 }
3330
3331 if (m_Parameters.m_LayerNormEnabled)
3332 {
3333 auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3334 ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3335 ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3336
3337 auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3338 ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3339 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3340 "forgetLayerNormWeights", "cellLayerNormWeights");
3341
3342 auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3343 ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3344 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3345 "forgetLayerNormWeights", "outputLayerNormWeights");
3346
3347 if (!m_Parameters.m_CifgEnabled)
3348 {
3349 auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3350 ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3351 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3352 "forgetLayerNormWeights", "inputLayerNormWeights");
3353 }
3354 }
3355
3356 // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3357 bool correctProjectionTensorsPresent =
3358 ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3359 (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3360 (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3361
3362 if (!correctProjectionTensorsPresent)
3363 {
3364 throw InvalidArgumentException(descriptorName +
3365 ": If projection is enabled, ProjectionWeights should be present and "
3366 "ProjectionBias is optional. If projection is disabled, neither "
3367 "ProjectionWeights nor ProjectionBias should be present.");
3368 }
3369
3370 if (m_Parameters.m_ProjectionEnabled)
3371 {
3372 auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3373 ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3374 ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3375
3376 if (m_ProjectionBias)
3377 {
3378 auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
Sadik Armagand6f06492020-05-22 08:36:33 +01003379 ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
James Conroy4f1f8992020-04-29 20:01:10 +01003380 ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3381 }
3382
3383 }
3384 else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3385 outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3386 throw InvalidArgumentException(descriptorName +
3387 ": If projection is disabled, output quantization info (scale, offset) "
3388 "should match HiddenStateScale and HiddenStateZeroPoint.");
3389 }
3390
3391}
3392
James Conroy9c3cae82019-08-01 16:01:48 +01003393void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3394{
3395 const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3396
3397 // Validate number of inputs/outputs
3398 ValidateNumInputs(workloadInfo, descriptorName, 3);
3399 ValidateNumOutputs(workloadInfo, descriptorName, 2);
3400
3401 // Input/output tensor infos
3402 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3403 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3404 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3405
3406 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3407 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3408
3409 std::vector<DataType> inputOutputSupportedTypes =
3410 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003411 DataType::QAsymmU8
James Conroy9c3cae82019-08-01 16:01:48 +01003412 };
3413
3414 std::vector<DataType> cellStateSupportedTypes =
3415 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003416 DataType::QSymmS16
James Conroy9c3cae82019-08-01 16:01:48 +01003417 };
3418
3419 std::vector<DataType> weightsSupportedTypes =
3420 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00003421 DataType::QAsymmU8
James Conroy9c3cae82019-08-01 16:01:48 +01003422 };
3423
3424 std::vector<DataType> biasSupportedTypes =
3425 {
3426 DataType::Signed32
3427 };
3428
3429 // Validate types of input/output tensors
3430 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3431 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3432 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3433
3434 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3435 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3436
3437 // Validate matching types of input/output tensors
3438 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3439 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3440 "outputStateIn", "outputStateOut");
3441 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3442
3443 // Validate matching quantization info for input/output tensors
3444 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3445 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3446 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003447
James Conroy9c3cae82019-08-01 16:01:48 +01003448 // Infer number of batches, input size and output size from tensor dimensions
3449 const uint32_t numBatches = inputInfo.GetShape()[0];
3450 const uint32_t inputSize = inputInfo.GetShape()[1];
3451 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3452
3453 // Validate number of dimensions and number of elements for input/output tensors
3454 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3455 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3456 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3457 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3458 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3459
3460 // Validate number of dimensions and number of elements for weights tensors
3461 ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3462 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3463 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3464
3465 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3466 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3467 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3468
3469 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3470 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3471 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3472
3473 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3474 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3475 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3476
3477 ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3478 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3479 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3480
3481 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3482 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3483 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3484 " RecurrentToForgetWeights");
3485
3486 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3487 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3488 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3489
3490 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3491 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3492 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3493
3494 // Validate data types for weights tensors (all should match each other)
3495 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3496
3497 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3498 "inputToInputWeights", "inputToForgetWeights");
3499 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3500 "inputToInputWeights", "inputToCellWeights");
3501 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3502 "inputToInputWeights", "inputToOutputWeights");
3503
3504 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3505 "inputToInputWeights", "recurrentToInputWeights");
3506 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3507 "inputToInputWeights", "recurrentToForgeteights");
3508 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3509 "inputToInputWeights", "recurrentToCellWeights");
3510 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3511 "inputToInputWeights", "recurrentToOutputWeights");
3512
3513 // Validate matching quantization info for weight tensors (all should match each other)
3514 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3515 descriptorName, "inputToInputWeights", "inputToForgetWeights");
3516 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3517 descriptorName, "inputToInputWeights", "inputToCellWeights");
3518 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3519 descriptorName, "inputToInputWeights", "inputToOutputWeights");
3520
3521 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3522 descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3523 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3524 descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3525 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3526 descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3527 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3528 descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3529
3530 // Validate number of dimensions and number of elements in bias tensors
3531 ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3532 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3533 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3534
3535 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3536 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3537 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3538
3539 ValidatePointer(m_CellBias, descriptorName, "CellBias");
3540 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3541 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3542
3543 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3544 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3545 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3546
3547 // Validate data types for bias tensors (all should match each other)
3548 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3549
3550 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3551 "inputGateBias", "forgetGateBias");
3552 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3553 "inputGateBias", "cellBias");
3554 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3555 "inputGateBias", "outputGateBias");
3556
3557 // Validate bias tensor quantization info
3558 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3559 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3560 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3561 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3562}
3563
Kevin May868eb142019-09-04 17:29:31 +01003564void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3565{
3566 const std::string descriptorName{"AbsQueueDescriptor"};
3567
3568 ValidateNumInputs(workloadInfo, descriptorName, 1);
3569 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3570
3571 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3572 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3573
3574 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3575
3576 std::vector<DataType> supportedTypes =
James Conroyd47a0642019-09-17 14:22:06 +01003577 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003578 DataType::BFloat16,
James Conroyd47a0642019-09-17 14:22:06 +01003579 DataType::Float16,
3580 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01003581 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003582 DataType::QAsymmU8,
Kevin Mayec52c3a2020-04-24 09:42:31 +01003583 DataType::QSymmS16,
3584 DataType::Signed32
James Conroyd47a0642019-09-17 14:22:06 +01003585 };
Kevin May868eb142019-09-04 17:29:31 +01003586
3587 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3588 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3589}
3590
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003591void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3592{
3593 const std::string descriptorName{"SliceQueueDescriptor"};
3594
3595 ValidateNumInputs(workloadInfo, descriptorName, 1);
3596 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3597
3598 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3599 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3600
3601 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3602
3603 const unsigned int rank = inputTensorInfo.GetNumDimensions();
3604 if (rank > 4)
3605 {
3606 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3607 }
3608
3609 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3610
3611 // Check if m_Begin and m_Size have the expected length
3612 if (m_Parameters.m_Begin.size() != rank)
3613 {
3614 throw InvalidArgumentException(descriptorName +
3615 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3616 }
3617 if (m_Parameters.m_Size.size() != rank)
3618 {
3619 throw InvalidArgumentException(descriptorName +
3620 ": Length of size descriptor must equal rank " + std::to_string(rank));
3621 }
3622
3623 // Check if the shape of the output tensor matches m_Size
3624 const TensorShape& outputShape = outputTensorInfo.GetShape();
3625 for (unsigned int i = 0u; i < rank; ++i)
3626 {
3627 if (m_Parameters.m_Size[i] != outputShape[i])
3628 {
3629 throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3630 }
3631 }
3632
3633 // Check if the sum of begin offset and size in a given dimension
3634 // does not exceed the size of corresponding input
3635 const TensorShape& inputShape = inputTensorInfo.GetShape();
3636 for(unsigned int i = 0u; i < rank; ++i)
3637 {
Aron Virginas-Tar92b9f872019-09-17 17:27:04 +01003638 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
Aron Virginas-Tar636ab402019-09-16 14:27:45 +01003639 {
3640 throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3641 std::to_string(i) + " exceeds input size.");
3642 }
3643 }
3644}
3645
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003646void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3647{
3648 const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3649
3650 ValidateNumInputs(workloadInfo, descriptorName, 1);
3651 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3652
3653 const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3654 const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3655
3656 ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3657 ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3658
3659 std::vector<DataType> supportedTypes =
3660 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003661 DataType::BFloat16,
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003662 DataType::Float32,
3663 DataType::Float16,
Sadik Armagan303980c2020-04-17 12:45:14 +01003664 DataType::QAsymmS8,
Derek Lambertif90c56d2020-01-10 17:14:08 +00003665 DataType::QAsymmU8,
3666 DataType::QSymmS16
Aron Virginas-Tardd6247f2019-09-19 14:31:17 +01003667 };
3668
3669 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3670 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3671
3672 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3673
3674 if (m_Parameters.m_BlockSize == 0)
3675 {
3676 throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3677 }
3678
3679 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3680 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3681 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3682 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3683
3684 const TensorShape& outputShape = outputInfo.GetShape();
3685 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3686 {
3687 throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3688 "must be divisible by block size.");
3689 }
3690
3691 const TensorShape& inputShape = inputInfo.GetShape();
3692 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3693 {
3694 throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3695 "must be divisible by the square of block size." );
3696 }
3697}
3698
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +01003699void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3700{
3701 const std::string descriptorName{"ComparisonQueueDescriptor"};
3702
3703 ValidateNumInputs(workloadInfo, descriptorName, 2);
3704 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3705
3706 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3707 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3708 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3709
3710 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3711 inputTensorInfo1,
3712 outputTensorInfo,
3713 descriptorName,
3714 "input_0",
3715 "input_1");
3716
3717 if (outputTensorInfo.GetDataType() != DataType::Boolean)
3718 {
3719 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3720 }
3721}
3722
josh minor4a3c6102020-01-06 16:40:46 -06003723void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3724{
3725 const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3726
3727 ValidateNumInputs(workloadInfo, descriptorName, 1);
3728 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3729
3730 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3731 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3732
3733 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3734
3735 std::vector<DataType> supportedTypes =
3736 {
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003737 DataType::BFloat16,
josh minor4a3c6102020-01-06 16:40:46 -06003738 DataType::Float16,
3739 DataType::Float32,
Sadik Armagan303980c2020-04-17 12:45:14 +01003740 DataType::QAsymmS8,
josh minor4a3c6102020-01-06 16:40:46 -06003741 DataType::QAsymmU8,
Sadik Armaganac472102020-03-24 09:54:36 +00003742 DataType::QSymmS16,
3743 DataType::Signed32
josh minor4a3c6102020-01-06 16:40:46 -06003744 };
3745
James Conroyaba90cd2020-11-06 16:28:18 +00003746 std::vector<DataType> logicalSupportedTypes =
3747 {
3748 DataType::Boolean
3749 };
3750
3751 if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3752 {
3753 ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3754 }
3755 else
3756 {
3757 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3758 }
3759
3760
josh minor4a3c6102020-01-06 16:40:46 -06003761 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3762}
3763
Finn Williams2605b232020-06-10 15:53:46 +01003764void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3765{
3766 const std::string descriptorName{"RankQueueDescriptor"};
3767
3768 ValidateNumInputs(workloadInfo, descriptorName, 1);
3769 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3770
3771 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3772 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3773
3774 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3775 ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3776
3777 std::vector<DataType> supportedTypes =
3778 {
3779 DataType::BFloat16,
3780 DataType::Float16,
3781 DataType::Float32,
3782 DataType::QAsymmS8,
3783 DataType::QAsymmU8,
3784 DataType::QSymmS8,
3785 DataType::QSymmS16,
3786 DataType::Signed32
3787 };
3788
3789 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3790 ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3791}
3792
James Conroyaba90cd2020-11-06 16:28:18 +00003793void LogicalBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3794{
3795 const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3796
3797 ValidateNumInputs(workloadInfo, descriptorName, 2);
3798 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3799
3800 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3801 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3802 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3803
3804 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3805 inputTensorInfo1,
3806 outputTensorInfo,
3807 descriptorName,
3808 "input_0",
3809 "input_1");
3810
3811 if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3812 {
3813 throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3814 }
3815
3816 if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3817 {
3818 throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3819 }
3820
3821 if (outputTensorInfo.GetDataType() != DataType::Boolean)
3822 {
3823 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3824 }
3825}
3826
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003827void ReduceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3828{
3829 const std::string descriptorName{"ReduceQueueDescriptor"};
3830
3831 ValidateNumInputs(workloadInfo, descriptorName, 1);
3832 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3833
3834 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3835 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3836
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00003837 std::vector<DataType> supportedTypes =
3838 {
3839 DataType::BFloat16,
3840 DataType::Float16,
3841 DataType::Float32,
3842 DataType::QAsymmS8,
3843 DataType::QAsymmU8,
3844 DataType::QSymmS16,
3845 DataType::Signed32
3846 };
3847
3848 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3849 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3850}
3851
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003852void UnidirectionalSequenceLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3853{
3854 // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3855
3856 const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3857
3858 // check dimensions of all inputs and outputs
3859 if (workloadInfo.m_InputTensorInfos.size() != 3)
3860 {
3861 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3862 }
3863 if (workloadInfo.m_OutputTensorInfos.size() != 1)
3864 {
3865 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3866 }
3867
3868 std::vector<DataType> supportedTypes =
3869 {
Narumol Prangnawarate5339e72021-07-28 17:33:28 +01003870 DataType::Float32
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +01003871 };
3872
3873 // check for supported type of one input and match them with all the other input and output
3874 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3875
3876 // type matches all other inputs
3877 for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
3878 {
3879 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3880 workloadInfo.m_InputTensorInfos[i],
3881 descriptorName,
3882 "input_0",
3883 "input_" + std::to_string(i));
3884 }
3885 // type matches all other outputs
3886 for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
3887 {
3888 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3889 workloadInfo.m_OutputTensorInfos[i],
3890 "LstmQueueDescriptor",
3891 "input_0",
3892 "output_" + std::to_string(i));
3893 }
3894
3895 // Making sure clipping parameters have valid values.
3896 // == 0 means no clipping
3897 // > 0 means clipping
3898 if (m_Parameters.m_ClippingThresCell < 0.0f)
3899 {
3900 throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3901 }
3902 if (m_Parameters.m_ClippingThresProj < 0.0f)
3903 {
3904 throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3905 }
3906
3907 unsigned int batchIndx = 0;
3908 unsigned int inputIndx = 1;
3909 uint32_t timeStep = 1;
3910 unsigned int timeIndx = 1;
3911 inputIndx = 2;
3912 if (m_Parameters.m_TimeMajor)
3913 {
3914 batchIndx = 1;
3915 timeIndx = 0;
3916
3917 }
3918 timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3919
3920 // Inferring batch size, number of outputs and number of cells from the inputs.
3921 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3922 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3923 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3924 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3925 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3926 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3927
3928 // input tensor
3929 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3930 descriptorName + " input_0");
3931 // outputStateInTensor
3932 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3933 descriptorName + " input_1");
3934 // outputStateInTensor
3935 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3936 descriptorName + " input_2");
3937
3938 // outputTensor
3939 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 3, (timeStep * n_batch * n_output),
3940 descriptorName + " output_0");
3941
3942 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3943 if ( m_InputToInputWeights )
3944 {
3945 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3946 (n_cell * n_input), "InputLayerNormWeights");
3947 }
3948
3949 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3950 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3951 (n_cell * n_input), "InputToForgetWeights");
3952
3953 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3954 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3955 (n_cell * n_input), "InputToCellWeights");
3956
3957 if ( m_RecurrentToInputWeights )
3958 {
3959 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3960 (n_cell * n_output), "RecurrentToInputWeights");
3961 }
3962
3963 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3964 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3965 (n_cell * n_output), "RecurrentToForgetWeights");
3966
3967 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3968 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3969 (n_cell * n_output), "RecurrentToCellWeights");
3970
3971 // Make sure the input-gate's parameters are either both present (regular
3972 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3973 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3974 !m_Parameters.m_CifgEnabled) ||
3975 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3976 m_Parameters.m_CifgEnabled));
3977 if (!cifg_weights_all_or_none)
3978 {
3979 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3980 "RecurrentToInputWeights must either both be present (regular LSTM) "
3981 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3982 "accordingly.");
3983 }
3984
3985 if ( m_CellToInputWeights )
3986 {
3987 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3988 n_cell, "CellToInputWeights");
3989 }
3990 if ( m_CellToForgetWeights )
3991 {
3992 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3993 n_cell, "CellToForgetWeights");
3994 }
3995 if ( m_CellToOutputWeights )
3996 {
3997 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3998 n_cell, "CellToOutputWeights");
3999 }
4000
4001 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
4002 bool peephole_weights_all_or_none =
4003 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
4004 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
4005 || ( !m_CellToInputWeights && !m_CellToForgetWeights
4006 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
4007 if (!peephole_weights_all_or_none)
4008 {
4009 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
4010 }
4011
4012 // Make sure the input gate bias is present only when not a CIFG-LSTM.
4013 if (m_Parameters.m_CifgEnabled)
4014 {
4015 if (m_InputGateBias)
4016 {
4017 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
4018 }
4019 }
4020 else
4021 {
4022 if (!m_InputGateBias)
4023 {
4024 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
4025 "must be present.");
4026 }
4027 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
4028 n_cell, "InputGateBias");
4029 }
4030
4031 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
4032 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
4033
4034 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
4035 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
4036
4037 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
4038 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
4039
4040 if (m_ProjectionWeights)
4041 {
4042 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
4043 (n_cell * n_output), "ProjectionWeights");
4044 }
4045 if (m_ProjectionBias)
4046 {
4047 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
4048 }
4049
4050 // Making sure the projection tensors are consistent:
4051 // 1) If projection weight is not present, then projection bias should not be
4052 // present.
4053 // 2) If projection weight is present, then projection bias is optional.
4054 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
4055 !m_Parameters.m_ProjectionEnabled)
4056 || (m_ProjectionWeights && !m_ProjectionBias &&
4057 m_Parameters.m_ProjectionEnabled)
4058 || (m_ProjectionWeights && m_ProjectionBias &&
4059 m_Parameters.m_ProjectionEnabled));
4060 if (!projecton_tensors_consistent)
4061 {
4062 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
4063 }
4064
4065 // The four layer normalization weights either all have values or none of them have values. Additionally, if
4066 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4067 // either all have values or none of them have values. Layer normalization is used when the values of all the
4068 // layer normalization weights are present
4069 if (m_InputLayerNormWeights)
4070 {
4071 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4072 }
4073 if (m_ForgetLayerNormWeights)
4074 {
4075 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4076 }
4077 if (m_CellLayerNormWeights)
4078 {
4079 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4080 }
4081 if (m_OutputLayerNormWeights)
4082 {
4083 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4084 }
4085
4086 if (m_Parameters.m_LayerNormEnabled)
4087 {
4088 if (!m_Parameters.m_CifgEnabled)
4089 {
4090 if (!m_InputLayerNormWeights)
4091 {
4092 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4093 "disabled but InputLayerNormWeights are not present");
4094 }
4095 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4096 1, n_cell, "InputLayerNormWeights");
4097 }
4098 else if (m_InputLayerNormWeights)
4099 {
4100 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4101 "enabled");
4102 }
4103
4104 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4105 "ForgetLayerNormWeights");
4106 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4107
4108 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4109 "OutputLayerNormWeights");
4110 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4111
4112 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4113 "CellLayerNormWeights");
4114 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4115 }
4116 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
4117 {
4118 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4119 "normalisation weights are present.");
4120 }
4121}
4122
4123
mathad01df9a3222021-04-28 11:42:57 +01004124} // namespace armnn