blob: e8e10d972a7b2d3e36c15798318e32a959d126b1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +010017#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matteo Martincigh21350152018-11-28 16:22:22 +000019using namespace armnnUtils;
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24//---------------------------------------------------------------
25DataType GetBiasDataType(DataType inputDataType)
26{
27 switch (inputDataType)
28 {
telsoa01c577f2c2018-08-31 09:22:23 +010029 case DataType::Float16:
30 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000031 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
Ruomei Yan88d44b82019-05-23 14:29:06 +010035 case DataType::QuantisedSymm16:
36 return DataType::Signed32;
telsoa014fcda012018-03-09 14:13:49 +000037 default:
38 BOOST_ASSERT_MSG(false, "Invalid input data type");
39 return DataType::Float32;
40 }
41}
42
43namespace
44{
45
46//---------------------------------------------------------------
47//android ndk does not support std::to_string function.
48template <typename T>
49std::string to_string(T value)
50{
51 std::ostringstream os;
52 os << value;
53 return os.str();
54}
55
56//---------------------------------------------------------------
57void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
58{
59 if (!ptr)
60 {
61 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
62 paramName + " parameter must be set.");
63 }
64}
65
66//---------------------------------------------------------------
67void ValidateTensorShapesMatch(const TensorInfo& first,
68 const TensorInfo& second,
69 std::string const& descName,
70 std::string const& firstName,
71 std::string const& secondName)
72{
73 if (first.GetShape() != second.GetShape())
74 {
75 throw InvalidArgumentException(descName + ": "
76 + firstName + " & " + secondName + " must have identical shapes");
77 }
78}
79
80//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010081void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000082{
Sadik Armaganeff363d2019-04-05 15:25:46 +010083 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000084 {
85 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010086 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
telsoa014fcda012018-03-09 14:13:49 +000087 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
88 }
89}
90
91//---------------------------------------------------------------
Sadik Armaganeff363d2019-04-05 15:25:46 +010092void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000093{
Sadik Armaganeff363d2019-04-05 15:25:46 +010094 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
telsoa014fcda012018-03-09 14:13:49 +000095 {
96 throw InvalidArgumentException(descName +
Sadik Armaganeff363d2019-04-05 15:25:46 +010097 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
telsoa014fcda012018-03-09 14:13:49 +000098 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
99 }
100}
101
102//---------------------------------------------------------------
103void ValidateTensorNumDimensions(const TensorInfo& tensor,
104 std::string const& descName,
105 unsigned int numDimensions,
106 std::string const& tensorName)
107{
108 if (tensor.GetNumDimensions() != numDimensions)
109 {
110 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111 to_string(tensor.GetNumDimensions()) + " dimensions for " +
112 tensorName + " tensor.");
113 }
114}
115
116//---------------------------------------------------------------
117void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
118 const std::string& descName, std::string const& tensorName)
119{
120 if (tensor.GetDataType() != dataType)
121 {
122 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
123 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
124 }
125}
126
127//---------------------------------------------------------------
128void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
129 const TensorInfo& weightsTensorInfo, const std::string& descName)
130{
131 if (biasTensor.GetQuantizationOffset() != 0)
132 {
133 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
134 to_string(biasTensor.GetQuantizationOffset()));
135 }
136 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000137 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000138 {
139 // Print the float values with extra precision to see very small differences
140 std::stringstream msg;
141 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
142 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
143 biasTensor.GetQuantizationScale();
144 throw InvalidArgumentException(msg.str());
145 }
146}
147
148//---------------------------------------------------------------
149void ValidateTensors(const std::vector<ITensorHandle*>& vec,
150 unsigned int numExpected,
151 const std::string& descName,
152 const std::string& varName)
153{
154 if (vec.empty() && numExpected > 0)
155 {
156 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
157 }
158
159 for (unsigned int i = 0; i < numExpected; ++i)
160 {
161 if (!vec[i])
162 {
163 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
164 }
165 }
166}
167
168//---------------------------------------------------------------
169void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
170 const TensorInfo& second,
171 const TensorInfo& output,
172 std::string const& descName,
173 std::string const& firstName,
174 std::string const& secondName)
175{
176 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
177 // broadcasted.
178 if (first.GetNumDimensions() != second.GetNumDimensions())
179 {
180 throw InvalidArgumentException(descName + ": Tensors "
181 + firstName + " & " + secondName
182 + " must have the same number of dimensions in order to be broadcasted");
183 }
184 uint32_t numDims = first.GetNumDimensions();
185 std::vector<uint32_t> outputDims(numDims, 0u);
186 for (uint32_t i = 0; i < numDims; i++)
187 {
188 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
189 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
190 if (dimsNotEqual && dimsNotOne)
191 {
192 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
193 }
194 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
195 }
196 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
197 if (broadcastShape != output.GetShape())
198 {
199 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
200 + firstName + " & " + secondName
201 + " does not match the output shape");
202 }
203}
204
205//---------------------------------------------------------------
206/// Validates that the output tensor's quantization scale is greater than the product
207/// of the two input tensors' quantization scales. This is a requirement of the implementation of
208/// the quantized multiplication.
209void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
210 const TensorInfo& outputTensorInfo, std::string const& descName,
211 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
212{
213 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
214 {
215 if (outputTensorInfo.GetQuantizationScale() <=
216 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
217 {
218 std::stringstream msg;
219 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
220 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
221 throw InvalidArgumentException(msg.str());
222 }
223 }
224}
225
Sadik Armaganeff363d2019-04-05 15:25:46 +0100226//---------------------------------------------------------------
227void ValidateDataTypes(const TensorInfo& info,
228 const std::vector<armnn::DataType>& supportedTypes,
229 std::string const& descName)
230{
231 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
232 if (iterator == supportedTypes.end())
233 {
234 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
235 }
236}
237
telsoa014fcda012018-03-09 14:13:49 +0000238} //namespace
239
240void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
241 unsigned int numExpectedIn, unsigned int numExpectedOut) const
242{
243 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
244 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
245}
246
247//---------------------------------------------------------------
248void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
249{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100250 ValidateNumInputs(workloadInfo, "MemCopyQueueDescriptor", 1);
251 ValidateNumOutputs(workloadInfo, "MemCopyQueueDescriptor" , 1);
telsoa014fcda012018-03-09 14:13:49 +0000252
253 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
254 {
255 throw InvalidArgumentException(boost::str(
256 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
257 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
258 }
259
260 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
261 {
262 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
263 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
264 {
265 throw InvalidArgumentException(boost::str(
266 boost::format("Number of elements for tensor input and output %1% does not match")
267 % i ));
268 }
269 }
270
271 if (m_Inputs.size() != m_Outputs.size())
272 {
273 throw InvalidArgumentException(boost::str(
274 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
275 % m_Inputs.size() % m_Outputs.size()));
276 }
277
278 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
279 {
280 if (!m_Inputs[i])
281 {
282 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
283 }
284
285 if (!m_Outputs[i])
286 {
287 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
288 }
289 }
290}
291
292//---------------------------------------------------------------
293void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
294{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100295 ValidateNumInputs(workloadInfo, "ActivationQueueDescriptor", 1);
296 ValidateNumOutputs(workloadInfo, "ActivationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000297 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
298 workloadInfo.m_OutputTensorInfos[0],
299 "ActivationQueueDescriptor",
300 "input",
301 "output");
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100302
303 std::vector<DataType> supportedTypes = {
304 DataType::Float32,
305 DataType::Float16,
Teresa Charlin18515e22019-04-24 10:17:46 +0100306 DataType::QuantisedAsymm8,
307 DataType::QuantisedSymm16
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +0100308 };
309
310 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
311 supportedTypes,
312 "ActivationQueueDescriptor");
313
314 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
315 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
316 "ActivationQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319//---------------------------------------------------------------
320void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
321{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100322 ValidateNumInputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
323 ValidateNumOutputs(workloadInfo, "SoftmaxQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000324
325 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
326 workloadInfo.m_OutputTensorInfos[0],
327 "SoftmaxQueueDescriptor",
328 "input",
329 "output");
nikraj01248683f2019-05-29 16:46:50 +0100330
331 std::vector<DataType> supportedTypes =
332 {
333 DataType::Float16,
334 DataType::Float32,
335 DataType::QuantisedAsymm8,
336 DataType::QuantisedSymm16
337 };
338
339 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
340 supportedTypes,
341 "SoftmaxQueueDescriptor");
342
343 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
344 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
345 "SoftmaxQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000346}
347
348//---------------------------------------------------------------
349void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
350{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100351 ValidateNumInputs(workloadInfo, "SplitterQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000352
353 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
354 {
355 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
356 }
357
358 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
359 {
360 throw InvalidArgumentException(
361 "SplitterQueueDescriptor: Number of split windows "
362 "has to match number of workloadInfo.m_OutputTensorInfos. "
363 "Number of windows: " +
364 to_string(m_ViewOrigins.size()) +
365 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
366 }
367
telsoa01c577f2c2018-08-31 09:22:23 +0100368 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000369 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
370 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
371 {
telsoa01c577f2c2018-08-31 09:22:23 +0100372 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000373 ViewOrigin const& e = m_ViewOrigins[w];
374 if (e.m_Origin.size() != inputDims)
375 {
376 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
377 "have the same dimensionality as the input tensor. "
378 "Window origin (index: " +
379 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
380 " dimensions, the input "
381 "tensor has " +
382 to_string(inputDims) + " dimensions.");
383 }
384 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
385 {
386 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
387 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
388 {
389 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
390 "be smaller or equal than the size of the input in that coord.");
391 }
392 }
393 }
394}
395
396//---------------------------------------------------------------
Jim Flynne242f2d2019-05-22 14:24:13 +0100397void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
telsoa014fcda012018-03-09 14:13:49 +0000398{
Jim Flynne242f2d2019-05-22 14:24:13 +0100399 ValidateNumOutputs(workloadInfo, "ConcatQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000400
401 if (m_Inputs.size() <= 0)
402 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100403 throw InvalidArgumentException("ConcatQueueDescriptor: At least one input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000404 }
405 if (m_Outputs.size() <= 0)
406 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100407 throw InvalidArgumentException("ConcatQueueDescriptor: At least one output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000408 }
409
410 if (workloadInfo.m_InputTensorInfos.size() <= 0)
411 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100412 throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo input needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000413 }
414 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
415 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100416 throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo output needs to be provided.");
telsoa014fcda012018-03-09 14:13:49 +0000417 }
418
Nikhil Raj8599a412018-11-19 14:51:07 +0000419 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
420 {
421 throw InvalidArgumentException("Invalid Concatenation Axis provided");
422 }
423
424 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
425 {
426 return;
427 }
428
telsoa014fcda012018-03-09 14:13:49 +0000429 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
430 {
431 throw InvalidArgumentException(
Jim Flynne242f2d2019-05-22 14:24:13 +0100432 "ConcatQueueDescriptor: Number of split windows "
telsoa014fcda012018-03-09 14:13:49 +0000433 "has to match number of workloadInfo.m_InputTensorInfos. "
434 "Number of windows: " +
435 to_string(m_ViewOrigins.size()) +
436 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
437 }
438
telsoa01c577f2c2018-08-31 09:22:23 +0100439 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000440 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
441 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
442 {
telsoa01c577f2c2018-08-31 09:22:23 +0100443 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000444 ViewOrigin const& e = m_ViewOrigins[w];
445 if (e.m_Origin.size() != outputDims)
446 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100447 throw InvalidArgumentException("ConcatQueueDescriptor: Window origin have to "
telsoa014fcda012018-03-09 14:13:49 +0000448 "have the same dimensionality as the output tensor. "
449 "Window origin (index: " +
450 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
451 " dimensions, the output "
452 "tensor has " +
453 to_string(outputDims) + " dimensions.");
454 }
telsoa01c577f2c2018-08-31 09:22:23 +0100455 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000456 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
457 {
458 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
459 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
460 {
Jim Flynne242f2d2019-05-22 14:24:13 +0100461 throw InvalidArgumentException("ConcatQueueDescriptor: Window extent coordinates have to "
telsoa014fcda012018-03-09 14:13:49 +0000462 "be smaller or equal than the size of the output in that coord.");
463 }
464 }
465 }
Jim Flynncbb66aa2019-05-15 13:03:54 +0100466
467 // Check the supported data types
468 std::vector<DataType> supportedTypes =
469 {
470 DataType::Float32,
471 DataType::Float16,
472 DataType::Boolean,
473 DataType::Signed32,
474 DataType::QuantisedAsymm8,
475 DataType::QuantisedSymm16
476 };
477
478 for (unsigned long i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
479 {
480 ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
481 supportedTypes,
Jim Flynne242f2d2019-05-22 14:24:13 +0100482 "ConcatQueueDescriptor");
Jim Flynncbb66aa2019-05-15 13:03:54 +0100483 }
484 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
485 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
Jim Flynne242f2d2019-05-22 14:24:13 +0100486 "ConcatQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000487}
488
489//---------------------------------------------------------------
490void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
491{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100492 ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
493 ValidateNumOutputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000494 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
495
496 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
497 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
498 {
499 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
500 }
501
502 if (m_Weight == nullptr)
503 {
504 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
505 }
506
507 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
508
509 if (m_Parameters.m_BiasEnabled)
510 {
511 if (m_Bias == nullptr)
512 {
513 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
514 "bias value tensor descriptor is missing.");
515 }
516
telsoa01c577f2c2018-08-31 09:22:23 +0100517 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000518 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
519 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
520
521 ValidateTensorDataType(m_Bias->GetTensorInfo(),
522 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
523 "FullyConnectedQueueDescriptor", "bias");
524
525 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
526 }
527
528 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
529 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
Francis Murtagh46c09d02019-05-28 08:15:28 +0100530
531 // Check the supported data types
532 std::vector<DataType> supportedTypes =
533 {
534 DataType::Float32,
535 DataType::Float16,
536 DataType::QuantisedAsymm8,
537 DataType::QuantisedSymm16
538 };
539
540 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
541 supportedTypes,
542 "FullyConnectedQueueDescriptor");
543
544 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
545 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
546 "FullyConnectedQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000547}
548
549//---------------------------------------------------------------
550void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
551{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100552 ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
553 ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000554 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
555 workloadInfo.m_OutputTensorInfos[0],
556 "NormalizationQueueDescriptor",
557 "input",
558 "output");
559}
560
561void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
562{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100563 ValidateNumInputs(workloadInfo, "AdditionQueueDescriptor", 2);
564 ValidateNumOutputs(workloadInfo, "AdditionQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000565
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100566 std::vector<DataType> supportedTypes = {
567 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100568 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100569 DataType::QuantisedSymm16,
570 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100571 };
572
573 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
574 supportedTypes,
575 "AdditionQueueDescriptor");
576
577 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
578 supportedTypes,
579 "AdditionQueueDescriptor");
580
581 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
582 supportedTypes,
583 "AdditionQueueDescriptor");
584
telsoa014fcda012018-03-09 14:13:49 +0000585 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
586 workloadInfo.m_InputTensorInfos[1],
587 workloadInfo.m_OutputTensorInfos[0],
588 "AdditionQueueDescriptor",
589 "first input",
590 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000591}
592
593//---------------------------------------------------------------
594void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
595{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100596 ValidateNumInputs(workloadInfo, "MultiplicationQueueDescriptor", 2);
597 ValidateNumOutputs(workloadInfo, "MultiplicationQueueDescriptor", 1);
surmeh01bceff2f2018-03-29 16:29:27 +0100598
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100599 std::vector<DataType> supportedTypes = {
600 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +0100601 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +0100602 DataType::QuantisedSymm16,
603 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100604 };
605
606 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
607 supportedTypes,
608 "MultiplicationQueueDescriptor");
609
610 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
611 supportedTypes,
612 "MultiplicationQueueDescriptor");
613
614 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
615 supportedTypes,
616 "MultiplicationQueueDescriptor");
617
surmeh01bceff2f2018-03-29 16:29:27 +0100618 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
619 workloadInfo.m_InputTensorInfos[1],
620 workloadInfo.m_OutputTensorInfos[0],
621 "MultiplicationQueueDescriptor",
622 "first input",
623 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000624}
625
626void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
627{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100628 ValidateNumInputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
629 ValidateNumOutputs(workloadInfo, "BatchNormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000630 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
631 workloadInfo.m_OutputTensorInfos[0],
632 "BatchNormalizationQueueDescriptor",
633 "input",
634 "output");
635 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
636 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
637 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
638 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
639
640
641 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
642 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
643 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
644 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
645
646 ValidateTensorShapesMatch(
647 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
648 ValidateTensorShapesMatch(
649 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
650 ValidateTensorShapesMatch(
651 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
652}
653
654void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
655{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100656 ValidateNumInputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
657 ValidateNumOutputs(workloadInfo, "Convolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000658
659 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
660 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
661
662 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
663 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
664 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
665 "Convolution2dQueueDescriptor", "weight");
666 if (m_Parameters.m_BiasEnabled)
667 {
668 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
669 ValidateTensorDataType(m_Bias->GetTensorInfo(),
670 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
671 "Convolution2dQueueDescriptor", "bias");
672 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
673 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
674 }
675
676 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
677 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
678}
679
680void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
681{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100682 ValidateNumInputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
683 ValidateNumOutputs(workloadInfo, "DepthwiseConvolution2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000684
685 ValidateTensorNumDimensions(
686 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
687 ValidateTensorNumDimensions(
688 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
689
690 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
691 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
692
Bruno Goncalves22972f02019-04-26 21:03:24 -0300693 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
694 {
695 throw InvalidArgumentException(
696 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: dilationX (provided %1%) "
697 "and dilationY (provided %2%) cannot be smaller than 1.")
698 % m_Parameters.m_DilationX % m_Parameters.m_DilationX));
699 }
700
Nikhil Rajcec6b652018-10-12 13:51:57 +0100701 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
702
Matteo Martincigh747ef822018-12-18 09:26:39 +0000703 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
704 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000705 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000706 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100707 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000708 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
709 {
710 throw InvalidArgumentException(
711 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
712 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
713 "(provided %3%).")
714 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
715 }
716
717 if (m_Parameters.m_BiasEnabled)
718 {
719 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
720 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
721 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
722 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
723
724 ValidateTensorDataType(m_Bias->GetTensorInfo(),
725 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
726 "DepthwiseConvolution2dQueueDescriptor", "bias");
727 }
728
729 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
730 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
Ruomei Yan88d44b82019-05-23 14:29:06 +0100731
732 // Check the supported data types
733 std::vector<DataType> supportedTypes = {
734 DataType::Float32,
735 DataType::QuantisedAsymm8,
736 DataType::QuantisedSymm16,
737 DataType::Float16
738 };
739
740 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
741 supportedTypes,
742 "DepthwiseConvolution2dQueueDescriptor");
743
744 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
745 {workloadInfo.m_InputTensorInfos[0].GetDataType()},
746 "DepthwiseConvolution2dQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000747}
748
749void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
750{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100751 ValidateNumInputs(workloadInfo, "PermuteQueueDescriptor", 1);
752 ValidateNumOutputs(workloadInfo, "PermuteQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000753
754 const PermutationVector& mapping = m_Parameters.m_DimMappings;
755
756 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
757 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
758
759 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
760 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
761
762 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
763 {
764 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
765 {
766 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
767 " (=" + to_string(input.GetShape()[i]) + ") " +
768 "must match dst dimension " + to_string(mapping[i]) +
769 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
770 }
771 }
772}
773
774void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
775{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100776 ValidateNumInputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
777 ValidateNumOutputs(workloadInfo, "Pooling2dQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000778
779 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
780 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
781}
782
783void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
784{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100785 ValidateNumInputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
786 ValidateNumOutputs(workloadInfo, "ResizeBilinearQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000787
788 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
789 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
790
telsoa01c577f2c2018-08-31 09:22:23 +0100791 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000792 {
793 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
794 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
795 if (inputBatchSize != outputBatchSize)
796 {
797 throw InvalidArgumentException(
798 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
799 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
800 }
801 }
802
803 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000804 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100805 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000806 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100807 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000808 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000809 if (inputChannelCount != outputChannelCount)
810 {
811 throw InvalidArgumentException(
812 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
813 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
814 }
815 }
816}
817
818void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
819{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100820 ValidateNumInputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
821 ValidateNumOutputs(workloadInfo, "FakeQuantizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000822
823 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
824 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
825 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
826 workloadInfo.m_OutputTensorInfos[0],
827 "FakeQuantizationQueueDescriptor",
828 "input",
829 "output");
830 if (m_Parameters.m_Min > m_Parameters.m_Max)
831 {
832 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
833 }
834
835}
836
837void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
838{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100839 ValidateNumInputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
840 ValidateNumOutputs(workloadInfo, "L2NormalizationQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000841
842 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
843 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
844 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
845 workloadInfo.m_OutputTensorInfos[0],
846 "L2NormalizationQueueDescriptor",
847 "input",
848 "output");
849}
850
851void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
852{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100853 ValidateNumInputs(workloadInfo, "ConstantQueueDescriptor", 0);
854 ValidateNumOutputs(workloadInfo, "ConstantQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000855
856 if (!m_LayerOutput)
857 {
858 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
859 }
860
861 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
862 workloadInfo.m_OutputTensorInfos[0],
863 "ConstantQueueDescriptor",
864 "constant",
865 "output");
Nina Drozd58ef2c62019-05-16 12:09:18 +0100866
867 // Check the supported data types
868 std::vector<DataType> supportedTypes =
Nina Drozd2f2778f2019-05-27 10:37:05 +0100869 {
870 DataType::Float32,
871 DataType::Float16,
872 DataType::Signed32,
873 DataType::QuantisedAsymm8,
874 DataType::QuantisedSymm16
875 };
Nina Drozd58ef2c62019-05-16 12:09:18 +0100876
877 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ConstantQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000878}
879
880void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
881{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100882 ValidateNumInputs(workloadInfo, "ReshapeQueueDescriptor", 1);
883 ValidateNumOutputs(workloadInfo, "ReshapeQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000884
885 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
886 {
887 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
888 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
889 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
890 }
Nina Drozd2f2778f2019-05-27 10:37:05 +0100891
892 // Check the supported data types
893 std::vector<DataType> supportedTypes =
894 {
895 DataType::Float32,
896 DataType::Float16,
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100897 DataType::QuantisedAsymm8,
898 DataType::QuantisedSymm16
Nina Drozd2f2778f2019-05-27 10:37:05 +0100899 };
900
901 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor");
902 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000903}
904
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000905void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
906{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100907 ValidateNumInputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
908 ValidateNumOutputs(workloadInfo, "SpaceToBatchNdQueueDescriptor", 1);
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000909
910 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
911 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
912
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000913 if (m_Parameters.m_BlockShape.size() != 2)
914 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000915 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000916 }
917
918 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
919 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000920 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000921 }
922
923 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
924
925 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
926 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
927
Matthew Bentham8800c002018-11-19 13:19:28 +0000928 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
929 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000930 + heightPad.first + heightPad.second;
931
Matthew Bentham8800c002018-11-19 13:19:28 +0000932 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000933 + widthPad.first + widthPad.second;
934
935 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000936 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000937
938 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
939 {
940 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
941 to_string(numInputElements) + " after padding but output tensor has " +
942 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
943 }
944
945 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000946 {
947 throw InvalidArgumentException(
948 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
949 }
950}
951
telsoa014fcda012018-03-09 14:13:49 +0000952void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
953{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100954 ValidateNumInputs(workloadInfo, "FloorQueueDescriptor", 1);
955 ValidateNumOutputs(workloadInfo, "FlootQueueDescriptor", 1);
telsoa014fcda012018-03-09 14:13:49 +0000956
957 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
958 {
959 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
960 }
961}
962
telsoa01c577f2c2018-08-31 09:22:23 +0100963void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
964{
965 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
966 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100967
968 std::vector<DataType> supportedTypes = {
Conor Kennedyb9971c92019-05-07 07:14:23 +0100969 DataType::Float16,
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100970 DataType::Float32,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100971 DataType::QuantisedSymm16
Nattapat Chaimanowongeb2b3292019-05-07 12:02:30 +0100972 };
973
974 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
975 supportedTypes,
976 "LstmQueueDescriptor");
977
978 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
979 supportedTypes,
980 "LstmQueueDescriptor");
telsoa01c577f2c2018-08-31 09:22:23 +0100981}
982
983void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
984{
Sadik Armaganeff363d2019-04-05 15:25:46 +0100985 ValidateNumInputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
986 ValidateNumOutputs(workloadInfo, "ConvertFp32ToFp16QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +0100987
988 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
989 {
990 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
991 }
992
993 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
994 {
995 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
996 }
997
998 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
999 workloadInfo.m_OutputTensorInfos[0],
1000 "ConvertFp32ToFp16QueueDescriptor",
1001 "input",
1002 "output");
1003}
1004
1005void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1006{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001007 ValidateNumInputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
1008 ValidateNumOutputs(workloadInfo, "ConvertFp16ToFp32QueueDescriptor", 1);
telsoa01c577f2c2018-08-31 09:22:23 +01001009
1010 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
1011 {
1012 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
1013 }
1014 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1015 {
1016 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
1017 }
1018
1019 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1020 workloadInfo.m_OutputTensorInfos[0],
1021 "ConvertFp16ToFp32QueueDescriptor",
1022 "input",
1023 "output");
1024}
1025
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001026void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1027{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001028 ValidateNumInputs(workloadInfo, "DivisionQueueDescriptor", 2);
1029 ValidateNumOutputs(workloadInfo, "DivisionQueueDescriptor", 1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001030
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001031 std::vector<DataType> supportedTypes = {
1032 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001033 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001034 DataType::QuantisedSymm16,
1035 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001036 };
1037
1038 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1039 supportedTypes,
1040 "DivisionQueueDescriptor");
1041
1042 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1043 supportedTypes,
1044 "DivisionQueueDescriptor");
1045
1046 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1047 supportedTypes,
1048 "DivisionQueueDescriptor");
1049
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001050 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1051 workloadInfo.m_InputTensorInfos[1],
1052 workloadInfo.m_OutputTensorInfos[0],
1053 "DivisionQueueDescriptor",
1054 "first input",
1055 "second input");
1056}
1057
David Beckc2044fe2018-09-05 15:00:38 +01001058void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1059{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001060 ValidateNumInputs(workloadInfo, "SubtractionQueueDescriptor", 2);
1061 ValidateNumOutputs(workloadInfo, "SubtractionQueueDescriptor", 1);
David Beckc2044fe2018-09-05 15:00:38 +01001062
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001063 std::vector<DataType> supportedTypes = {
1064 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001065 DataType::QuantisedAsymm8,
Jim Flynn82fbe7c2019-04-02 15:19:08 +01001066 DataType::QuantisedSymm16,
1067 DataType::Float16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001068 };
1069
1070 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1071 supportedTypes,
1072 "SubtractionQueueDescriptor");
1073
1074 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1075 supportedTypes,
1076 "SubtractionQueueDescriptor");
1077
1078 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1079 supportedTypes,
1080 "SubtractionQueueDescriptor");
1081
David Beckc2044fe2018-09-05 15:00:38 +01001082 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1083 workloadInfo.m_InputTensorInfos[1],
1084 workloadInfo.m_OutputTensorInfos[0],
1085 "SubtractionQueueDescriptor",
1086 "first input",
1087 "second input");
1088}
1089
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001090void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1091{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001092 ValidateNumInputs(workloadInfo, "MaximumQueueDescriptor", 2);
1093 ValidateNumOutputs(workloadInfo, "MaximumQueueDescriptor", 1);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001094
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001095 std::vector<DataType> supportedTypes = {
1096 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001097 DataType::QuantisedAsymm8,
1098 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001099 };
1100
1101 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1102 supportedTypes,
1103 "MaximumQueueDescriptor");
1104
1105 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1106 supportedTypes,
1107 "MaximumQueueDescriptor");
1108
1109 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1110 supportedTypes,
1111 "MaximumQueueDescriptor");
1112
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +00001113 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1114 workloadInfo.m_InputTensorInfos[1],
1115 workloadInfo.m_OutputTensorInfos[0],
1116 "MaximumQueueDescriptor",
1117 "first input",
1118 "second input");
1119}
1120
narpra01a6bf9122018-09-10 09:50:09 +01001121void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1122{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001123 ValidateNumInputs(workloadInfo, "MeanQueueDescriptor", 1);
1124 ValidateNumOutputs(workloadInfo, "MeanQueueDescriptor", 1);
narpra01eb061912018-09-10 17:35:27 +01001125
1126 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1127 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1128
narpra0132b90462018-09-13 11:07:48 +01001129 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +01001130 {
1131 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
1132 }
narpra0132b90462018-09-13 11:07:48 +01001133 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +01001134 {
1135 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
1136 }
1137 else
1138 {
narpra0132b90462018-09-13 11:07:48 +01001139 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +01001140 ValidateTensorNumDimensions(output,
1141 "MeanQueueDescriptor",
1142 outputDim > 0 ? outputDim : 1,
1143 "output");
1144 }
narpra01a6bf9122018-09-10 09:50:09 +01001145}
1146
jimfly012c9322a2018-09-19 10:59:49 +01001147void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1148{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001149 ValidateNumInputs(workloadInfo, "PadQueueDescriptor", 1);
1150 ValidateNumOutputs(workloadInfo, "PadQueueDescriptor", 1);
jimfly012c9322a2018-09-19 10:59:49 +01001151
1152 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +01001153 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1154
jimfly012c9322a2018-09-19 10:59:49 +01001155 // input and output should have the same number of dimensions
1156 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
1157 // there should be entry in the pad list for each dimension in the input tensor
1158 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
1159 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
1160 " are dimensions in the input tensor that is " +
1161 to_string(input.GetNumDimensions()) + " entries " +
1162 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
1163 }
1164}
1165
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001166void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1167{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001168 ValidateNumInputs(workloadInfo, "QuantizeQueueDescriptor", 1);
1169 ValidateNumOutputs(workloadInfo, "QuantizeQueueDescriptor", 1);
Derek Lambertia9cca6a2019-03-25 15:41:58 +00001170
1171
1172 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
1173 {
1174 throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
1175 }
1176
1177 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1178 workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1179 {
1180 throw InvalidArgumentException("Output of quantized layer must be quantized type.");
1181 }
1182}
1183
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001184void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1185{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001186 ValidateNumInputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
1187 ValidateNumOutputs(workloadInfo, "BatchToSpaceNdQueueDescriptor", 1);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00001188}
1189
Conor Kennedy430b5d82018-11-14 15:28:28 +00001190void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1191{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001192 ValidateNumInputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
1193 ValidateNumOutputs(workloadInfo, "StridedSliceQueueDescriptor", 1);
Conor Kennedy430b5d82018-11-14 15:28:28 +00001194
1195 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
1196 const uint32_t rank = input.GetNumDimensions();
1197
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001198 if (rank > 4)
1199 {
1200 throw InvalidArgumentException(
1201 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
1202 }
1203
Conor Kennedy430b5d82018-11-14 15:28:28 +00001204 // Begin, End & Stride length must be of rank(input0)
1205 if (m_Parameters.m_Begin.size() != rank)
1206 {
1207 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
1208 + to_string(rank) + ")");
1209 }
1210
1211 if (m_Parameters.m_End.size() != rank)
1212 {
1213 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
1214 + to_string(rank) + ")");
1215 }
1216
1217 if (m_Parameters.m_Stride.size() != rank)
1218 {
1219 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
1220 + to_string(rank) + ")");
1221 }
1222
1223 // Stride entries must be non-zero
1224 for (auto& stride : m_Parameters.m_Stride)
1225 {
1226 if (stride == 0)
1227 {
1228 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
1229 }
1230 }
1231}
1232
kevmay0190539692018-11-29 08:40:19 +00001233void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1234{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001235 ValidateNumInputs(workloadInfo, "MinimumQueueDescriptor", 2);
1236 ValidateNumOutputs(workloadInfo, "MinimumQueueDescriptor", 1);
kevmay0190539692018-11-29 08:40:19 +00001237
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001238 std::vector<DataType> supportedTypes = {
1239 DataType::Float32,
Sadik Armagan2999a022019-04-09 14:20:12 +01001240 DataType::QuantisedAsymm8,
1241 DataType::QuantisedSymm16
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +01001242 };
1243
1244 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1245 supportedTypes,
1246 "MinimumQueueDescriptor");
1247
1248 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1249 supportedTypes,
1250 "MinimumQueueDescriptor");
1251
1252 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1253 supportedTypes,
1254 "MinimumQueueDescriptor");
1255
kevmay0190539692018-11-29 08:40:19 +00001256 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1257 workloadInfo.m_InputTensorInfos[1],
1258 workloadInfo.m_OutputTensorInfos[0],
1259 "MinimumQueueDescriptor",
1260 "first input",
1261 "second input");
1262}
1263
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001264void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1265{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001266 ValidateNumInputs(workloadInfo, "DebugQueueDescriptor", 1);
1267 ValidateNumOutputs(workloadInfo, "DebugQueueDescriptor", 1);
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001268}
1269
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001270void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1271{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001272 ValidateNumInputs(workloadInfo, "EqualQueueDescriptor", 2);
1273 ValidateNumOutputs(workloadInfo, "EqualQueueDescriptor", 1);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001274
1275 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1276 workloadInfo.m_InputTensorInfos[1],
1277 workloadInfo.m_OutputTensorInfos[0],
1278 "EqualQueueDescriptor",
1279 "first input",
1280 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001281
1282 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1283 {
1284 throw InvalidArgumentException("EqualQueueDescriptor: Output tensor type must be Boolean.");
1285 }
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001286}
1287
FrancisMurtagh878f0232018-12-19 10:56:15 +00001288void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1289{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001290 ValidateNumInputs(workloadInfo, "GreaterQueueDescriptor", 2);
1291 ValidateNumOutputs(workloadInfo, "GreaterQueueDescriptor", 1);
FrancisMurtagh878f0232018-12-19 10:56:15 +00001292
1293 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1294 workloadInfo.m_InputTensorInfos[1],
1295 workloadInfo.m_OutputTensorInfos[0],
1296 "GreaterQueueDescriptor",
1297 "first input",
1298 "second input");
kevmay012b4d88e2019-01-24 14:05:09 +00001299
1300 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Boolean)
1301 {
1302 throw InvalidArgumentException("GreaterQueueDescriptor: Output tensor type must be Boolean.");
1303 }
FrancisMurtagh878f0232018-12-19 10:56:15 +00001304}
1305
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001306void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1307{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001308 ValidateNumInputs(workloadInfo, "RsqrtQueueDescriptor", 1);
1309 ValidateNumOutputs(workloadInfo, "RsqrtQueueDescriptor", 1);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001310 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1311 workloadInfo.m_OutputTensorInfos[0],
1312 "RsqrtQueueDescriptor",
1313 "input",
1314 "output");
1315}
1316
narpra01b89b05f2019-01-16 09:53:09 +00001317void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1318{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001319 ValidateNumInputs(workloadInfo, "GatherQueueDescriptor", 2);
1320 ValidateNumOutputs(workloadInfo, "GatherQueueDescriptor", 1);
narpra014951d842019-01-18 16:53:53 +00001321
1322 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1323
1324 if (indices.GetDataType() != DataType::Signed32)
1325 {
1326 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1327 }
1328
1329 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1330 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1331 unsigned int paramsDim = params.GetNumDimensions();
1332 unsigned int indicesDim = indices.GetNumDimensions();
1333 unsigned int outputDim = paramsDim - 1 + indicesDim;
1334
1335 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001336}
1337
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001338void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1339{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001340 ValidateNumInputs(workloadInfo, "DetectionPostProcessQueueDescriptor", 2);
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001341
1342 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1343 {
1344 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Requires exactly four outputs. " +
1345 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
1346 }
1347
1348 if (m_Anchors == nullptr)
1349 {
1350 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Anchors tensor descriptor is missing.");
1351 }
1352
1353 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
1354 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
1355 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
1356 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00001357 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
1358 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
Narumol Prangnawaratbc67cef2019-01-31 15:31:54 +00001359 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
1360
1361 ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings");
1362 ValidateTensorNumDimensions(scoresInfo, "DetectionPostProcessQueueDescriptor", 3, "scores");
1363 ValidateTensorNumDimensions(anchorsInfo, "DetectionPostProcessQueueDescriptor", 2, "anchors");
1364
1365 ValidateTensorNumDimensions(detectionBoxesInfo, "DetectionPostProcessQueueDescriptor", 3, "detection boxes");
1366 ValidateTensorNumDimensions(detectionScoresInfo, "DetectionPostProcessQueueDescriptor", 2, "detection scores");
1367 ValidateTensorNumDimensions(detectionClassesInfo, "DetectionPostProcessQueueDescriptor", 2, "detection classes");
1368 ValidateTensorNumDimensions(numDetectionsInfo, "DetectionPostProcessQueueDescriptor", 1, "num detections");
1369
1370 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32,
1371 "DetectionPostProcessQueueDescriptor", "detection boxes");
1372 ValidateTensorDataType(detectionScoresInfo, DataType::Float32,
1373 "DetectionPostProcessQueueDescriptor", "detection scores");
1374 ValidateTensorDataType(detectionClassesInfo, DataType::Float32,
1375 "DetectionPostProcessQueueDescriptor", "detection classes");
1376 ValidateTensorDataType(numDetectionsInfo, DataType::Float32,
1377 "DetectionPostProcessQueueDescriptor", "num detections");
1378
1379 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
1380 {
1381 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Intersection over union threshold "
1382 "must be positive and less than or equal to 1.");
1383 }
1384 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
1385 {
1386 throw InvalidArgumentException("DetectionPostProcessQueueDescriptor: Number of classes with background "
1387 "should be equal to number of classes + 1.");
1388 }
1389}
1390
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001391void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1392{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001393 ValidateNumInputs(workloadInfo, "DequantizeQueueDescriptor", 1);
1394 ValidateNumOutputs(workloadInfo, "DequantizeQueueDescriptor", 1);
Nattapat Chaimanowonge4294fd2019-03-28 09:56:53 +00001395
1396 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
1397 workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
1398 {
1399 throw InvalidArgumentException("Input to dequantize layer must be quantized type.");
1400 }
1401
1402 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
1403 {
1404 throw InvalidArgumentException("Output of dequantize layer must be Float32 type.");
1405 }
1406}
1407
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001408void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1409{
Sadik Armaganeff363d2019-04-05 15:25:46 +01001410 ValidateNumInputs(workloadInfo, "MergeQueueDescriptor", 2);
1411 ValidateNumOutputs(workloadInfo, "MergeQueueDescriptor", 1);
Nattapat Chaimanowong1f886302019-04-05 13:37:19 +01001412
1413 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1414 workloadInfo.m_InputTensorInfos[1],
1415 "MergeQueueDescriptor",
1416 "input0",
1417 "input1");
1418
1419 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1420 workloadInfo.m_OutputTensorInfos[0],
1421 "MergeQueueDescriptor",
1422 "input0",
1423 "output");
1424
1425 const DataType dataType = workloadInfo.m_InputTensorInfos[0].GetDataType();
1426 ValidateTensorDataType(workloadInfo.m_InputTensorInfos[1], dataType, "MergeQueueDescriptor", "input1");
1427 ValidateTensorDataType(workloadInfo.m_OutputTensorInfos[0], dataType, "MergeQueueDescriptor", "output");
1428}
1429
Sadik Armaganeff363d2019-04-05 15:25:46 +01001430void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1431{
1432 ValidateNumInputs(workloadInfo, "SwitchQueueDescriptor", 2);
1433 ValidateNumOutputs(workloadInfo, "SwitchQueueDescriptor", 2);
1434
1435 std::vector<DataType> supportedTypes = {
1436 DataType::Float32,
1437 DataType::QuantisedAsymm8,
1438 DataType::QuantisedSymm16
1439 };
1440
1441 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
1442 supportedTypes,
1443 "SwitchQueueDescriptor");
1444
1445 ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
1446 supportedTypes,
1447 "SwitchQueueDescriptor");
1448
1449 ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
1450 supportedTypes,
1451 "SwitchQueueDescriptor");
1452
1453 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1454 workloadInfo.m_OutputTensorInfos[0],
1455 "SwitchQueueDescriptor",
1456 "input0",
1457 "output0");
1458
1459 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1460 workloadInfo.m_OutputTensorInfos[1],
1461 "SwitchQueueDescriptor",
1462 "input0",
1463 "output1");
1464}
1465
Matteo Martincigh49124022019-01-11 13:25:59 +00001466void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1467{
1468 // This is internally generated so it should not need validation.
1469}
1470
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001471} //namespace armnn