blob: df66b4e4c1c09bf9be17849ad7fe639cccc32adf [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 default:
36 BOOST_ASSERT_MSG(false, "Invalid input data type");
37 return DataType::Float32;
38 }
39}
40
41namespace
42{
43
44//---------------------------------------------------------------
45//android ndk does not support std::to_string function.
46template <typename T>
47std::string to_string(T value)
48{
49 std::ostringstream os;
50 os << value;
51 return os.str();
52}
53
54//---------------------------------------------------------------
55void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
56{
57 if (!ptr)
58 {
59 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
60 paramName + " parameter must be set.");
61 }
62}
63
64//---------------------------------------------------------------
65void ValidateTensorShapesMatch(const TensorInfo& first,
66 const TensorInfo& second,
67 std::string const& descName,
68 std::string const& firstName,
69 std::string const& secondName)
70{
71 if (first.GetShape() != second.GetShape())
72 {
73 throw InvalidArgumentException(descName + ": "
74 + firstName + " & " + secondName + " must have identical shapes");
75 }
76}
77
78//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010079void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000080{
Sadik Armaganeff363d2019-04-05 15:25:46 +010081 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082 {
83 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010084 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000085 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
86 }
87}
88
89//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010090void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000091{
Sadik Armaganeff363d2019-04-05 15:25:46 +010092 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093 {
94 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010095 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000096 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
97 }
98}
99
100//---------------------------------------------------------------
101void ValidateTensorNumDimensions(const TensorInfo& tensor,
102 std::string const& descName,
103 unsigned int numDimensions,
104 std::string const& tensorName)
105{
106 if (tensor.GetNumDimensions() != numDimensions)
107 {
108 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
109 to_string(tensor.GetNumDimensions()) + " dimensions for " +
110 tensorName + " tensor.");
111 }
112}
113
114//---------------------------------------------------------------
115void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
116 const std::string& descName, std::string const& tensorName)
117{
118 if (tensor.GetDataType() != dataType)
119 {
120 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
121 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
122 }
123}
124
125//---------------------------------------------------------------
126void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
127 const TensorInfo& weightsTensorInfo, const std::string& descName)
128{
129 if (biasTensor.GetQuantizationOffset() != 0)
130 {
131 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
132 to_string(biasTensor.GetQuantizationOffset()));
133 }
134 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000135 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
137 // Print the float values with extra precision to see very small differences
138 std::stringstream msg;
139 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
140 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
141 biasTensor.GetQuantizationScale();
142 throw InvalidArgumentException(msg.str());
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateTensors(const std::vector<ITensorHandle*>& vec,
148 unsigned int numExpected,
149 const std::string& descName,
150 const std::string& varName)
151{
152 if (vec.empty() && numExpected > 0)
153 {
154 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
155 }
156
157 for (unsigned int i = 0; i < numExpected; ++i)
158 {
159 if (!vec[i])
160 {
161 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
162 }
163 }
164}
165
166//---------------------------------------------------------------
167void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
168 const TensorInfo& second,
169 const TensorInfo& output,
170 std::string const& descName,
171 std::string const& firstName,
172 std::string const& secondName)
173{
174 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
175 // broadcasted.
176 if (first.GetNumDimensions() != second.GetNumDimensions())
177 {
178 throw InvalidArgumentException(descName + ": Tensors "
179 + firstName + " & " + secondName
180 + " must have the same number of dimensions in order to be broadcasted");
181 }
182 uint32_t numDims = first.GetNumDimensions();
183 std::vector<uint32_t> outputDims(numDims, 0u);
184 for (uint32_t i = 0; i < numDims; i++)
185 {
186 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
187 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
188 if (dimsNotEqual && dimsNotOne)
189 {
190 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
191 }
192 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
193 }
194 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
195 if (broadcastShape != output.GetShape())
196 {
197 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
198 + firstName + " & " + secondName
199 + " does not match the output shape");
200 }
201}
202
203//---------------------------------------------------------------
204/// Validates that the output tensor's quantization scale is greater than the product
205/// of the two input tensors' quantization scales. This is a requirement of the implementation of
206/// the quantized multiplication.
207void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
208 const TensorInfo& outputTensorInfo, std::string const& descName,
209 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
210{
211 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
212 {
213 if (outputTensorInfo.GetQuantizationScale() <=
214 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
215 {
216 std::stringstream msg;
217 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
218 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
219 throw InvalidArgumentException(msg.str());
220 }
221 }
222}
223
Sadik Armaganeff363d2019-04-05 15:25:46 +0100224//---------------------------------------------------------------
225void ValidateDataTypes(const TensorInfo& info,
226 const std::vector<armnn::DataType>& supportedTypes,
227 std::string const& descName)
228{
229 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
230 if (iterator == supportedTypes.end())
231 {
232 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
233 }
234}
235
telsoa014fcda012018-03-09 14:13:49 +0000236} //namespace
237
238void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
239 unsigned int numExpectedIn, unsigned int numExpectedOut) const
240{
241 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
242 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
243}
244
245//---------------------------------------------------------------
246void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
247{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100248 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
249 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000250
251 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
252 {
253 throw InvalidArgumentException(boost::str(
254 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
255 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
256 }
257
258 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
259 {
260 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
261 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
262 {
263 throw InvalidArgumentException(boost::str(
264 boost::format("Number of elements for tensor input and output %1% does not match")
265 % i ));
266 }
267 }
268
269 if (m_Inputs.size() != m_Outputs.size())
270 {
271 throw InvalidArgumentException(boost::str(
272 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
273 % m_Inputs.size() % m_Outputs.size()));
274 }
275
276 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
277 {
278 if (!m_Inputs[i])
279 {
280 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
281 }
282
283 if (!m_Outputs[i])
284 {
285 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
286 }
287 }
288}
289
290//---------------------------------------------------------------
291void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
292{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100293 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
294 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000295 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
296 workloadInfo.m_OutputTensorInfos[0],
297 "ActivationQueueDescriptor",
298 "input",
299 "output");
300}
301
302//---------------------------------------------------------------
303void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
304{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100305 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
306 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000307
308 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
309 workloadInfo.m_OutputTensorInfos[0],
310 "SoftmaxQueueDescriptor",
311 "input",
312 "output");
313}
314
315//---------------------------------------------------------------
316void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
317{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100318 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000319
320 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
321 {
322 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
323 }
324
325 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
326 {
327 throw InvalidArgumentException(
328 "SplitterQueueDescriptor: Number of split windows "
329 "has to match number of workloadInfo.m_OutputTensorInfos. "
330 "Number of windows: " +
331 to_string(m_ViewOrigins.size()) +
332 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
333 }
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000336 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
337 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
338 {
telsoa01c577f2c2018-08-31 09:22:23 +0100339 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000340 ViewOrigin const& e = m_ViewOrigins[w];
341 if (e.m_Origin.size() != inputDims)
342 {
343 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
344 "have the same dimensionality as the input tensor. "
345 "Window origin (index: " +
346 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
347 " dimensions, the input "
348 "tensor has " +
349 to_string(inputDims) + " dimensions.");
350 }
351 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
352 {
353 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
354 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
355 {
356 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
357 "be smaller or equal than the size of the input in that coord.");
358 }
359 }
360 }
361}
362
363//---------------------------------------------------------------
364void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
365{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100366 ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000367
368 if (m_Inputs.size() <= 0)
369 {
370 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
371 }
372 if (m_Outputs.size() <= 0)
373 {
374 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
375 }
376
377 if (workloadInfo.m_InputTensorInfos.size() <= 0)
378 {
379 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
380 }
381 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
382 {
383 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
384 }
385
Nikhil Raj8599a412018-11-19 14:51:07 +0000386 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
387 {
388 throw InvalidArgumentException("Invalid Concatenation Axis provided");
389 }
390
391 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
392 {
393 return;
394 }
395
telsoa014fcda012018-03-09 14:13:49 +0000396 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
397 {
398 throw InvalidArgumentException(
399 "MergerQueueDescriptor: Number of split windows "
400 "has to match number of workloadInfo.m_InputTensorInfos. "
401 "Number of windows: " +
402 to_string(m_ViewOrigins.size()) +
403 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
404 }
405
telsoa01c577f2c2018-08-31 09:22:23 +0100406 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000407 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
408 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
409 {
telsoa01c577f2c2018-08-31 09:22:23 +0100410 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000411 ViewOrigin const& e = m_ViewOrigins[w];
412 if (e.m_Origin.size() != outputDims)
413 {
414 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
415 "have the same dimensionality as the output tensor. "
416 "Window origin (index: " +
417 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
418 " dimensions, the output "
419 "tensor has " +
420 to_string(outputDims) + " dimensions.");
421 }
telsoa01c577f2c2018-08-31 09:22:23 +0100422 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000423 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
424 {
425 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
426 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
427 {
428 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
429 "be smaller or equal than the size of the output in that coord.");
430 }
431 }
432 }
433}
434
435//---------------------------------------------------------------
436void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
437{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100438 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
439 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000440 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
441
442 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
443 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
444 {
445 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
446 }
447
448 if (m_Weight == nullptr)
449 {
450 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
451 }
452
453 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
454
455 if (m_Parameters.m_BiasEnabled)
456 {
457 if (m_Bias == nullptr)
458 {
459 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
460 "bias value tensor descriptor is missing.");
461 }
462
telsoa01c577f2c2018-08-31 09:22:23 +0100463 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000464 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
465 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
466
467 ValidateTensorDataType(m_Bias->GetTensorInfo(),
468 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
469 "FullyConnectedQueueDescriptor", "bias");
470
471 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
472 }
473
474 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
475 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
476}
477
478//---------------------------------------------------------------
479void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
480{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100481 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
482 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000483 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
484 workloadInfo.m_OutputTensorInfos[0],
485 "NormalizationQueueDescriptor",
486 "input",
487 "output");
488}
489
490void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
491{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100492 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
493 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000494
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100495 std::vector<DataType> supportedTypes = {
496 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100497 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100498 DataType::QuantisedSymm16,
499 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100500 };
501
502 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
503 supportedTypes,
504 "AdditionQueueDescriptor");
505
506 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
507 supportedTypes,
508 "AdditionQueueDescriptor");
509
510 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
511 supportedTypes,
512 "AdditionQueueDescriptor");
513
telsoa014fcda012018-03-09 14:13:49 +0000514 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
515 workloadInfo.m_InputTensorInfos[1],
516 workloadInfo.m_OutputTensorInfos[0],
517 "AdditionQueueDescriptor",
518 "first input",
519 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000520}
521
522//---------------------------------------------------------------
523void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
524{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100525 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
526 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100527
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100528 std::vector<DataType> supportedTypes = {
529 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100530 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100531 DataType::QuantisedSymm16,
532 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100533 };
534
535 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
536 supportedTypes,
537 "MultiplicationQueueDescriptor");
538
539 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
540 supportedTypes,
541 "MultiplicationQueueDescriptor");
542
543 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
544 supportedTypes,
545 "MultiplicationQueueDescriptor");
546
surmeh01bceff2f2018-03-29 16:29:27 +0100547 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
548 workloadInfo.m_InputTensorInfos[1],
549 workloadInfo.m_OutputTensorInfos[0],
550 "MultiplicationQueueDescriptor",
551 "first input",
552 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000553}
554
555void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
556{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100557 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
558 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000559 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
560 workloadInfo.m_OutputTensorInfos[0],
561 "BatchNormalizationQueueDescriptor",
562 "input",
563 "output");
564 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
565 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
566 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
567 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
568
569
570 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
571 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
572 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
573 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
574
575 ValidateTensorShapesMatch(
576 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
577 ValidateTensorShapesMatch(
578 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
579 ValidateTensorShapesMatch(
580 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
581}
582
583void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
584{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100585 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
586 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000587
588 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
589 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
590
591 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
592 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
593 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
594 "Convolution2dQueueDescriptor", "weight");
595 if (m_Parameters.m_BiasEnabled)
596 {
597 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
598 ValidateTensorDataType(m_Bias->GetTensorInfo(),
599 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
600 "Convolution2dQueueDescriptor", "bias");
601 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
602 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
603 }
604
605 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
606 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
607}
608
609void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
610{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100611 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
612 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000613
614 ValidateTensorNumDimensions(
615 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
616 ValidateTensorNumDimensions(
617 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
618
619 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
620 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
621
Nikhil Rajcec6b652018-10-12 13:51:57 +0100622 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
623
Matteo Martincigh747ef822018-12-18 09:26:39 +0000624 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
625 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000626 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000627 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100628 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000629 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
630 {
631 throw InvalidArgumentException(
632 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
633 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
634 "(provided %3%).")
635 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
636 }
637
638 if (m_Parameters.m_BiasEnabled)
639 {
640 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
641 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
642 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
643 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
644
645 ValidateTensorDataType(m_Bias->GetTensorInfo(),
646 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
647 "DepthwiseConvolution2dQueueDescriptor", "bias");
648 }
649
650 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
651 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
652}
653
654void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
655{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100656 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
657 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000658
659 const PermutationVector& mapping = m_Parameters.m_DimMappings;
660
661 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
662 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
663
664 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
665 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
666
667 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
668 {
669 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
670 {
671 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
672 " (=" + to_string(input.GetShape()[i]) + ") " +
673 "must match dst dimension " + to_string(mapping[i]) +
674 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
675 }
676 }
677}
678
679void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
680{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100681 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
682 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000683
684 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
685 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
686}
687
688void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
689{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100690 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
691 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000692
693 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
694 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
695
telsoa01c577f2c2018-08-31 09:22:23 +0100696 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000697 {
698 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
699 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
700 if (inputBatchSize != outputBatchSize)
701 {
702 throw InvalidArgumentException(
703 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
704 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
705 }
706 }
707
708 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000709 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100710 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000711 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100712 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000713 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000714 if (inputChannelCount != outputChannelCount)
715 {
716 throw InvalidArgumentException(
717 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
718 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
719 }
720 }
721}
722
723void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
724{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100725 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
726 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000727
728 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
729 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
730 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
731 workloadInfo.m_OutputTensorInfos[0],
732 "FakeQuantizationQueueDescriptor",
733 "input",
734 "output");
735 if (m_Parameters.m_Min > m_Parameters.m_Max)
736 {
737 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
738 }
739
740}
741
742void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
743{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100744 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
745 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000746
747 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
748 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
749 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
750 workloadInfo.m_OutputTensorInfos[0],
751 "L2NormalizationQueueDescriptor",
752 "input",
753 "output");
754}
755
756void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
757{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100758 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
759 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000760
761 if (!m_LayerOutput)
762 {
763 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
764 }
765
766 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
767 workloadInfo.m_OutputTensorInfos[0],
768 "ConstantQueueDescriptor",
769 "constant",
770 "output");
771}
772
773void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
774{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100775 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
776 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000777
778 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
779 {
780 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
781 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
782 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
783 }
784}
785
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000786void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
787{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100788 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
789 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000790
791 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
792 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
793
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000794 if (m_Parameters.m_BlockShape.size() != 2)
795 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000796 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000797 }
798
799 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
800 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000801 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000802 }
803
804 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
805
806 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
807 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
808
Matthew Bentham8800c002018-11-19 13:19:28 +0000809 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
810 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000811 + heightPad.first + heightPad.second;
812
Matthew Bentham8800c002018-11-19 13:19:28 +0000813 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000814 + widthPad.first + widthPad.second;
815
816 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000817 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000818
819 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
820 {
821 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
822 to_string(numInputElements) + " after padding but output tensor has " +
823 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
824 }
825
826 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000827 {
828 throw InvalidArgumentException(
829 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
830 }
831}
832
telsoa014fcda012018-03-09 14:13:49 +0000833void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
834{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100835 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
836 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000837
838 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
839 {
840 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
841 }
842}
843
telsoa01c577f2c2018-08-31 09:22:23 +0100844void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
845{
846 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
847 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
848}
849
850void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
851{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100852 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
853 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100854
855 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
856 {
857 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
858 }
859
860 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
861 {
862 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
863 }
864
865 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
866 workloadInfo.m_OutputTensorInfos[0],
867 "ConvertFp32ToFp16QueueDescriptor",
868 "input",
869 "output");
870}
871
872void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
873{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100874 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
875 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100876
877 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
878 {
879 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
880 }
881 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
882 {
883 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
884 }
885
886 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
887 workloadInfo.m_OutputTensorInfos[0],
888 "ConvertFp16ToFp32QueueDescriptor",
889 "input",
890 "output");
891}
892
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100893void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
894{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100895 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
896 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100897
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100898 std::vector<DataType> supportedTypes = {
899 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100900 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100901 DataType::QuantisedSymm16,
902 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100903 };
904
905 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
906 supportedTypes,
907 "DivisionQueueDescriptor");
908
909 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
910 supportedTypes,
911 "DivisionQueueDescriptor");
912
913 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
914 supportedTypes,
915 "DivisionQueueDescriptor");
916
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100917 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
918 workloadInfo.m_InputTensorInfos[1],
919 workloadInfo.m_OutputTensorInfos[0],
920 "DivisionQueueDescriptor",
921 "first input",
922 "second input");
923}
924
David Beckc2044fe2018-09-05 15:00:38 +0100925void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
926{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100927 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
928 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +0100929
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100930 std::vector<DataType> supportedTypes = {
931 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100932 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100933 DataType::QuantisedSymm16,
934 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100935 };
936
937 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
938 supportedTypes,
939 "SubtractionQueueDescriptor");
940
941 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
942 supportedTypes,
943 "SubtractionQueueDescriptor");
944
945 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
946 supportedTypes,
947 "SubtractionQueueDescriptor");
948
David Beckc2044fe2018-09-05 15:00:38 +0100949 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
950 workloadInfo.m_InputTensorInfos[1],
951 workloadInfo.m_OutputTensorInfos[0],
952 "SubtractionQueueDescriptor",
953 "first input",
954 "second input");
955}
956
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000957void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
958{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100959 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
960 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000961
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100962 std::vector<DataType> supportedTypes = {
963 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100964 DataType::QuantisedAsymm8,
965 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100966 };
967
968 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
969 supportedTypes,
970 "MaximumQueueDescriptor");
971
972 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
973 supportedTypes,
974 "MaximumQueueDescriptor");
975
976 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
977 supportedTypes,
978 "MaximumQueueDescriptor");
979
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000980 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
981 workloadInfo.m_InputTensorInfos[1],
982 workloadInfo.m_OutputTensorInfos[0],
983 "MaximumQueueDescriptor",
984 "first input",
985 "second input");
986}
987
narpra01a6bf9122018-09-10 09:50:09 +0100988void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
989{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100990 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
991 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +0100992
993 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
994 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
995
narpra0132b90462018-09-13 11:07:48 +0100996 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +0100997 {
998 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
999 }
narpra0132b90462018-09-13 11:07:48 +01001000 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001001 {
1002 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1003 }
1004 else
1005 {
narpra0132b90462018-09-13 11:07:48 +01001006 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001007 ValidateTensorNumDimensions(output,
1008 "MeanQueueDescriptor",
1009 outputDim > 0 ? outputDim : 1,
1010 "output");
1011 }
narpra01a6bf9122018-09-10 09:50:09 +01001012}
1013
jimfly012c9322a2018-09-19 10:59:49 +01001014void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1015{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001016 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1017 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001018
1019 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001020 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1021
jimfly012c9322a2018-09-19 10:59:49 +01001022 // input and output should have the same number of dimensions
1023 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1024 // there should be entry in the pad list for each dimension in the input tensor
1025 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1026 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1027 " are dimensions in the input tensor that is " +
1028 to_string(input.GetNumDimensions()) + " entries " +
1029 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1030 }
1031}
1032
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001033void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1034{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001035 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1036 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001037
1038
1039 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1040 {
1041 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1042 }
1043
1044 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1045 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1046 {
1047 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1048 }
1049}
1050
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001051void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1052{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001053 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1054 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001055}
1056
Conor Kennedy430b5d82018-11-14 15:28:28 +00001057void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1058{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001059 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1060 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001061
1062 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1063 const uint32_t rank = input.GetNumDimensions();
1064
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001065 if (rank > 4)
1066 {
1067 throw InvalidArgumentException(
1068 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1069 }
1070
Conor Kennedy430b5d82018-11-14 15:28:28 +00001071 // Begin, End & Stride length must be of rank(input0)
1072 if (m_Parameters.m_Begin.size() != rank)
1073 {
1074 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1075 + to_string(rank) + ")");
1076 }
1077
1078 if (m_Parameters.m_End.size() != rank)
1079 {
1080 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1081 + to_string(rank) + ")");
1082 }
1083
1084 if (m_Parameters.m_Stride.size() != rank)
1085 {
1086 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1087 + to_string(rank) + ")");
1088 }
1089
1090 // Stride entries must be non-zero
1091 for (auto& stride : m_Parameters.m_Stride)
1092 {
1093 if (stride == 0)
1094 {
1095 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1096 }
1097 }
1098}
1099
kevmay0190539692018-11-29 08:40:19 +00001100void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1101{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001102 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1103 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001104
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001105 std::vector<DataType> supportedTypes = {
1106 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001107 DataType::QuantisedAsymm8,
1108 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001109 };
1110
1111 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1112 supportedTypes,
1113 "MinimumQueueDescriptor");
1114
1115 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1116 supportedTypes,
1117 "MinimumQueueDescriptor");
1118
1119 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1120 supportedTypes,
1121 "MinimumQueueDescriptor");
1122
kevmay0190539692018-11-29 08:40:19 +00001123 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1124 workloadInfo.m_InputTensorInfos[1],
1125 workloadInfo.m_OutputTensorInfos[0],
1126 "MinimumQueueDescriptor",
1127 "first input",
1128 "second input");
1129}
1130
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001131void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1132{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001133 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1134 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001135}
1136
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001137void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1138{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001139 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1140 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001141
1142 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1143 workloadInfo.m_InputTensorInfos[1],
1144 workloadInfo.m_OutputTensorInfos[0],
1145 "EqualQueueDescriptor",
1146 "first input",
1147 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001148
1149 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1150 {
1151 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1152 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001153}
1154
FrancisMurtagh878f0232018-12-19 10:56:15 +00001155void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1156{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001157 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1158 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001159
1160 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1161 workloadInfo.m_InputTensorInfos[1],
1162 workloadInfo.m_OutputTensorInfos[0],
1163 "GreaterQueueDescriptor",
1164 "first input",
1165 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001166
1167 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1168 {
1169 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1170 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001171}
1172
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001173void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1174{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001175 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1176 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001177 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1178 workloadInfo.m_OutputTensorInfos[0],
1179 "RsqrtQueueDescriptor",
1180 "input",
1181 "output");
1182}
1183
narpra01b89b05f2019-01-16 09:53:09 +00001184void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1185{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001186 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1187 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001188
1189 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1190
1191 if (indices.GetDataType() != DataType::Signed32)
1192 {
1193 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1194 }
1195
1196 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1197 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1198 unsigned int paramsDim = params.GetNumDimensions();
1199 unsigned int indicesDim = indices.GetNumDimensions();
1200 unsigned int outputDim = paramsDim - 1 + indicesDim;
1201
1202 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001203}
1204
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001205void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1206{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001207 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001208
1209 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1210 {
1211 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1212 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1213 }
1214
1215 if (m_Anchors == nullptr)
1216 {
1217 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1218 }
1219
1220 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1221 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1222 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1223 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001224 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1225 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001226 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1227
1228 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1229 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1230 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1231
1232 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1233 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1234 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1235 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1236
1237 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1238 "DetectionPostProcessQueueDescriptor", "detection boxes");
1239 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1240 "DetectionPostProcessQueueDescriptor", "detection scores");
1241 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1242 "DetectionPostProcessQueueDescriptor", "detection classes");
1243 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1244 "DetectionPostProcessQueueDescriptor", "num detections");
1245
1246 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1247 {
1248 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1249 "must be positive and less than or equal to 1.");
1250 }
1251 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1252 {
1253 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1254 "should be equal to number of classes + 1.");
1255 }
1256}
1257
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001258void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1259{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001260 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1261 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001262
1263 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1264 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1265 {
1266 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1267 }
1268
1269 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1270 {
1271 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1272 }
1273}
1274
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001275void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1276{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001277 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1278 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001279
1280 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1281 workloadInfo.m_InputTensorInfos[1],
1282 "MergeQueueDescriptor",
1283 "input0",
1284 "input1");
1285
1286 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1287 workloadInfo.m_OutputTensorInfos[0],
1288 "MergeQueueDescriptor",
1289 "input0",
1290 "output");
1291
1292 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1293 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1294 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1295}
1296
Sadik Armaganeff363d2019-04-05 15:25:46 +01001297void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1298{
1299 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1300 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1301
1302 std::vector<DataType> supportedTypes = {
1303 DataType::Float32,
1304 DataType::QuantisedAsymm8,
1305 DataType::QuantisedSymm16
1306 };
1307
1308 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1309 supportedTypes,
1310 "SwitchQueueDescriptor");
1311
1312 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1313 supportedTypes,
1314 "SwitchQueueDescriptor");
1315
1316 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1317 supportedTypes,
1318 "SwitchQueueDescriptor");
1319
1320 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1321 workloadInfo.m_OutputTensorInfos[0],
1322 "SwitchQueueDescriptor",
1323 "input0",
1324 "output0");
1325
1326 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1327 workloadInfo.m_OutputTensorInfos[1],
1328 "SwitchQueueDescriptor",
1329 "input0",
1330 "output1");
1331}
1332
Matteo Martincigh49124022019-01-11 13:25:59 +00001333void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1334{
1335 // This is internally generated so it should not need validation.
1336}
1337
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001338} //namespace armnn