blob: 05f4e317a9081d9b9dbf3704efd8495faa74bcb2 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matteo Martincigh21350152018-11-28 16:22:22 +00009#include <DataLayoutIndexed.hpp>
Matthew Bentham8800c002018-11-19 13:19:28 +000010
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
17
Matteo Martincigh21350152018-11-28 16:22:22 +000018using namespace armnnUtils;
19
telsoa014fcda012018-03-09 14:13:49 +000020namespace armnn
21{
22
23//---------------------------------------------------------------
24DataType GetBiasDataType(DataType inputDataType)
25{
26 switch (inputDataType)
27 {
telsoa01c577f2c2018-08-31 09:22:23 +010028 case DataType::Float16:
29 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000030 case DataType::Float32:
31 return DataType::Float32;
32 case DataType::QuantisedAsymm8:
33 return DataType::Signed32;
34 default:
35 BOOST_ASSERT_MSG(false, "Invalid input data type");
36 return DataType::Float32;
37 }
38}
39
40namespace
41{
42
43//---------------------------------------------------------------
44//android ndk does not support std::to_string function.
45template <typename T>
46std::string to_string(T value)
47{
48 std::ostringstream os;
49 os << value;
50 return os.str();
51}
52
53//---------------------------------------------------------------
54void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
55{
56 if (!ptr)
57 {
58 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
59 paramName + " parameter must be set.");
60 }
61}
62
63//---------------------------------------------------------------
64void ValidateTensorShapesMatch(const TensorInfo& first,
65 const TensorInfo& second,
66 std::string const& descName,
67 std::string const& firstName,
68 std::string const& secondName)
69{
70 if (first.GetShape() != second.GetShape())
71 {
72 throw InvalidArgumentException(descName + ": "
73 + firstName + " & " + secondName + " must have identical shapes");
74 }
75}
76
77//---------------------------------------------------------------
78void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
79{
80 if (workloadInfo.m_InputTensorInfos.size() != 0)
81 {
82 throw InvalidArgumentException(descName +
83 ": Requires no inputs. " +
84 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided.");
85 }
86}
87
88//---------------------------------------------------------------
89void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName)
90{
91 if (workloadInfo.m_InputTensorInfos.size() != 1)
92 {
93 throw InvalidArgumentException(descName +
94 ": Requires exactly one input. " +
95 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." );
96 }
97}
98
99//---------------------------------------------------------------
100void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
101{
102 if (workloadInfo.m_InputTensorInfos.size() != 2)
103 {
104 throw InvalidArgumentException(descName +
105 ": Requires exactly two workloadInfo.m_InputTensorInfos. " +
106 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
107 }
108}
109
110//---------------------------------------------------------------
111void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName)
112{
113 if (workloadInfo.m_OutputTensorInfos.size() != 1)
114 {
115 throw InvalidArgumentException(descName +
116 ": Requires exactly one output. " +
117 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
118 }
119}
120
121//---------------------------------------------------------------
122void ValidateTensorNumDimensions(const TensorInfo& tensor,
123 std::string const& descName,
124 unsigned int numDimensions,
125 std::string const& tensorName)
126{
127 if (tensor.GetNumDimensions() != numDimensions)
128 {
129 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
130 to_string(tensor.GetNumDimensions()) + " dimensions for " +
131 tensorName + " tensor.");
132 }
133}
134
135//---------------------------------------------------------------
136void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
137 const std::string& descName, std::string const& tensorName)
138{
139 if (tensor.GetDataType() != dataType)
140 {
141 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
142 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
143 }
144}
145
146//---------------------------------------------------------------
147void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
148 const TensorInfo& weightsTensorInfo, const std::string& descName)
149{
150 if (biasTensor.GetQuantizationOffset() != 0)
151 {
152 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
153 to_string(biasTensor.GetQuantizationOffset()));
154 }
155 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
kevmay016c46dd32018-12-17 15:32:45 +0000156 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
telsoa014fcda012018-03-09 14:13:49 +0000157 {
158 // Print the float values with extra precision to see very small differences
159 std::stringstream msg;
160 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
161 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
162 biasTensor.GetQuantizationScale();
163 throw InvalidArgumentException(msg.str());
164 }
165}
166
167//---------------------------------------------------------------
168void ValidateTensors(const std::vector<ITensorHandle*>& vec,
169 unsigned int numExpected,
170 const std::string& descName,
171 const std::string& varName)
172{
173 if (vec.empty() && numExpected > 0)
174 {
175 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
176 }
177
178 for (unsigned int i = 0; i < numExpected; ++i)
179 {
180 if (!vec[i])
181 {
182 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
183 }
184 }
185}
186
187//---------------------------------------------------------------
188void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
189 const TensorInfo& second,
190 const TensorInfo& output,
191 std::string const& descName,
192 std::string const& firstName,
193 std::string const& secondName)
194{
195 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
196 // broadcasted.
197 if (first.GetNumDimensions() != second.GetNumDimensions())
198 {
199 throw InvalidArgumentException(descName + ": Tensors "
200 + firstName + " & " + secondName
201 + " must have the same number of dimensions in order to be broadcasted");
202 }
203 uint32_t numDims = first.GetNumDimensions();
204 std::vector<uint32_t> outputDims(numDims, 0u);
205 for (uint32_t i = 0; i < numDims; i++)
206 {
207 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
208 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
209 if (dimsNotEqual && dimsNotOne)
210 {
211 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
212 }
213 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
214 }
215 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
216 if (broadcastShape != output.GetShape())
217 {
218 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
219 + firstName + " & " + secondName
220 + " does not match the output shape");
221 }
222}
223
224//---------------------------------------------------------------
225/// Validates that the output tensor's quantization scale is greater than the product
226/// of the two input tensors' quantization scales. This is a requirement of the implementation of
227/// the quantized multiplication.
228void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
229 const TensorInfo& outputTensorInfo, std::string const& descName,
230 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
231{
232 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
233 {
234 if (outputTensorInfo.GetQuantizationScale() <=
235 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
236 {
237 std::stringstream msg;
238 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
239 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
240 throw InvalidArgumentException(msg.str());
241 }
242 }
243}
244
245} //namespace
246
247void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
248 unsigned int numExpectedIn, unsigned int numExpectedOut) const
249{
250 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
251 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
252}
253
254//---------------------------------------------------------------
255void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
256{
257 ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor");
258 ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor");
259
260 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
261 {
262 throw InvalidArgumentException(boost::str(
263 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
264 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
265 }
266
267 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
268 {
269 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
270 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
271 {
272 throw InvalidArgumentException(boost::str(
273 boost::format("Number of elements for tensor input and output %1% does not match")
274 % i ));
275 }
276 }
277
278 if (m_Inputs.size() != m_Outputs.size())
279 {
280 throw InvalidArgumentException(boost::str(
281 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
282 % m_Inputs.size() % m_Outputs.size()));
283 }
284
285 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
286 {
287 if (!m_Inputs[i])
288 {
289 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
290 }
291
292 if (!m_Outputs[i])
293 {
294 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
295 }
296 }
297}
298
299//---------------------------------------------------------------
300void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
301{
302 ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor");
303 ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor");
304 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
305 workloadInfo.m_OutputTensorInfos[0],
306 "ActivationQueueDescriptor",
307 "input",
308 "output");
309}
310
311//---------------------------------------------------------------
312void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
313{
314 ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor");
315 ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor");
telsoa014fcda012018-03-09 14:13:49 +0000316
317 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
318 workloadInfo.m_OutputTensorInfos[0],
319 "SoftmaxQueueDescriptor",
320 "input",
321 "output");
322}
323
324//---------------------------------------------------------------
325void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
326{
327 ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor");
328
329 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
330 {
331 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
332 }
333
334 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
335 {
336 throw InvalidArgumentException(
337 "SplitterQueueDescriptor: Number of split windows "
338 "has to match number of workloadInfo.m_OutputTensorInfos. "
339 "Number of windows: " +
340 to_string(m_ViewOrigins.size()) +
341 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
342 }
343
telsoa01c577f2c2018-08-31 09:22:23 +0100344 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000345 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
346 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
347 {
telsoa01c577f2c2018-08-31 09:22:23 +0100348 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000349 ViewOrigin const& e = m_ViewOrigins[w];
350 if (e.m_Origin.size() != inputDims)
351 {
352 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
353 "have the same dimensionality as the input tensor. "
354 "Window origin (index: " +
355 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
356 " dimensions, the input "
357 "tensor has " +
358 to_string(inputDims) + " dimensions.");
359 }
360 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
361 {
362 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
363 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
364 {
365 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
366 "be smaller or equal than the size of the input in that coord.");
367 }
368 }
369 }
370}
371
372//---------------------------------------------------------------
373void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
374{
375 ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor");
376
377 if (m_Inputs.size() <= 0)
378 {
379 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
380 }
381 if (m_Outputs.size() <= 0)
382 {
383 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
384 }
385
386 if (workloadInfo.m_InputTensorInfos.size() <= 0)
387 {
388 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
389 }
390 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
391 {
392 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
393 }
394
Nikhil Raj8599a412018-11-19 14:51:07 +0000395 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
396 {
397 throw InvalidArgumentException("Invalid Concatenation Axis provided");
398 }
399
400 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
401 {
402 return;
403 }
404
telsoa014fcda012018-03-09 14:13:49 +0000405 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
406 {
407 throw InvalidArgumentException(
408 "MergerQueueDescriptor: Number of split windows "
409 "has to match number of workloadInfo.m_InputTensorInfos. "
410 "Number of windows: " +
411 to_string(m_ViewOrigins.size()) +
412 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
413 }
414
telsoa01c577f2c2018-08-31 09:22:23 +0100415 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000416 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
417 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
418 {
telsoa01c577f2c2018-08-31 09:22:23 +0100419 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000420 ViewOrigin const& e = m_ViewOrigins[w];
421 if (e.m_Origin.size() != outputDims)
422 {
423 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
424 "have the same dimensionality as the output tensor. "
425 "Window origin (index: " +
426 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
427 " dimensions, the output "
428 "tensor has " +
429 to_string(outputDims) + " dimensions.");
430 }
telsoa01c577f2c2018-08-31 09:22:23 +0100431 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000432 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
433 {
434 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
435 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
436 {
437 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
438 "be smaller or equal than the size of the output in that coord.");
439 }
440 }
441 }
442}
443
444//---------------------------------------------------------------
445void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
446{
447 ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor");
448 ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor");
449 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
450
451 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
452 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
453 {
454 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
455 }
456
457 if (m_Weight == nullptr)
458 {
459 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
460 }
461
462 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
463
464 if (m_Parameters.m_BiasEnabled)
465 {
466 if (m_Bias == nullptr)
467 {
468 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
469 "bias value tensor descriptor is missing.");
470 }
471
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000473 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
474 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
475
476 ValidateTensorDataType(m_Bias->GetTensorInfo(),
477 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
478 "FullyConnectedQueueDescriptor", "bias");
479
480 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
481 }
482
483 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
484 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
485}
486
487//---------------------------------------------------------------
488void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
489{
490 ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor");
491 ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor");
492 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
493 workloadInfo.m_OutputTensorInfos[0],
494 "NormalizationQueueDescriptor",
495 "input",
496 "output");
497}
498
499void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
500{
501 ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor");
502 ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor");
503
504 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
505 workloadInfo.m_InputTensorInfos[1],
506 workloadInfo.m_OutputTensorInfos[0],
507 "AdditionQueueDescriptor",
508 "first input",
509 "second input");
510
511}
512
513//---------------------------------------------------------------
514void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
515{
516 ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
517 ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
surmeh01bceff2f2018-03-29 16:29:27 +0100518
519 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
520 workloadInfo.m_InputTensorInfos[1],
521 workloadInfo.m_OutputTensorInfos[0],
522 "MultiplicationQueueDescriptor",
523 "first input",
524 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000525}
526
527void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
528{
529 ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor");
530 ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor");
531 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
532 workloadInfo.m_OutputTensorInfos[0],
533 "BatchNormalizationQueueDescriptor",
534 "input",
535 "output");
536 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
537 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
538 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
539 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
540
541
542 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
543 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
544 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
545 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
546
547 ValidateTensorShapesMatch(
548 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
549 ValidateTensorShapesMatch(
550 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
551 ValidateTensorShapesMatch(
552 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
553}
554
555void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
556{
557 ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor");
558 ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor");
559
560 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
561 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
562
563 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
564 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
565 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
566 "Convolution2dQueueDescriptor", "weight");
567 if (m_Parameters.m_BiasEnabled)
568 {
569 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
570 ValidateTensorDataType(m_Bias->GetTensorInfo(),
571 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
572 "Convolution2dQueueDescriptor", "bias");
573 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
574 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
575 }
576
577 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
578 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
579}
580
581void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
582{
583 ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
584 ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
585
586 ValidateTensorNumDimensions(
587 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
588 ValidateTensorNumDimensions(
589 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
590
591 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
592 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
593
Nikhil Rajcec6b652018-10-12 13:51:57 +0100594 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
595
Matteo Martincigh747ef822018-12-18 09:26:39 +0000596 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
597 // inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000598 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Matteo Martincigh747ef822018-12-18 09:26:39 +0000599 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100600 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000601 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
602 {
603 throw InvalidArgumentException(
604 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
605 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
606 "(provided %3%).")
607 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
608 }
609
610 if (m_Parameters.m_BiasEnabled)
611 {
612 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
613 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
614 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
615 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
616
617 ValidateTensorDataType(m_Bias->GetTensorInfo(),
618 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
619 "DepthwiseConvolution2dQueueDescriptor", "bias");
620 }
621
622 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
623 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
624}
625
626void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
627{
628 ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor");
629 ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor");
630
631 const PermutationVector& mapping = m_Parameters.m_DimMappings;
632
633 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
634 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
635
636 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
637 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
638
639 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
640 {
641 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
642 {
643 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
644 " (=" + to_string(input.GetShape()[i]) + ") " +
645 "must match dst dimension " + to_string(mapping[i]) +
646 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
647 }
648 }
649}
650
651void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
652{
653 ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor");
654 ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor");
655
656 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
657 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
658}
659
660void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
661{
662 ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor");
663 ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor");
664
665 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
666 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
667
telsoa01c577f2c2018-08-31 09:22:23 +0100668 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000669 {
670 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
671 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
672 if (inputBatchSize != outputBatchSize)
673 {
674 throw InvalidArgumentException(
675 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
676 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
677 }
678 }
679
680 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000681 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100682 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000683 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100684 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000685 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000686 if (inputChannelCount != outputChannelCount)
687 {
688 throw InvalidArgumentException(
689 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
690 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
691 }
692 }
693}
694
695void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
696{
697 ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor");
698 ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor");
699
700 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
701 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
702 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
703 workloadInfo.m_OutputTensorInfos[0],
704 "FakeQuantizationQueueDescriptor",
705 "input",
706 "output");
707 if (m_Parameters.m_Min > m_Parameters.m_Max)
708 {
709 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
710 }
711
712}
713
714void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
715{
716 ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor");
717 ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor");
718
719 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
720 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
721 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
722 workloadInfo.m_OutputTensorInfos[0],
723 "L2NormalizationQueueDescriptor",
724 "input",
725 "output");
726}
727
728void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
729{
730 ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor");
731 ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor");
732
733 if (!m_LayerOutput)
734 {
735 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
736 }
737
738 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
739 workloadInfo.m_OutputTensorInfos[0],
740 "ConstantQueueDescriptor",
741 "constant",
742 "output");
743}
744
745void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
746{
747 ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor");
748 ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor");
749
750 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
751 {
752 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
753 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
754 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
755 }
756}
757
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000758void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
759{
760 ValidateSingleInput(workloadInfo, "SpaceToBatchNdQueueDescriptor");
761 ValidateSingleOutput(workloadInfo, "SpaceToBatchNdQueueDescriptor");
762
763 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
764 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
765
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000766 if (m_Parameters.m_BlockShape.size() != 2)
767 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000768 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000769 }
770
771 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
772 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000773 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000774 }
775
776 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
777
778 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
779 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
780
Matthew Bentham8800c002018-11-19 13:19:28 +0000781 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
782 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000783 + heightPad.first + heightPad.second;
784
Matthew Bentham8800c002018-11-19 13:19:28 +0000785 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000786 + widthPad.first + widthPad.second;
787
788 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000789 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000790
791 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
792 {
793 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
794 to_string(numInputElements) + " after padding but output tensor has " +
795 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
796 }
797
798 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000799 {
800 throw InvalidArgumentException(
801 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
802 }
803}
804
telsoa014fcda012018-03-09 14:13:49 +0000805void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
806{
807 ValidateSingleInput(workloadInfo, "FloorQueueDescriptor");
808 ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor");
809
810 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
811 {
812 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
813 }
814}
815
telsoa01c577f2c2018-08-31 09:22:23 +0100816void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
817{
818 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
819 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
820}
821
822void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
823{
824 ValidateSingleInput(workloadInfo, "ConvertFp32ToFp16QueueDescriptor");
825 ValidateSingleOutput(workloadInfo, "ConvertFp32ToFp16QueueDescriptor");
826
827 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
828 {
829 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
830 }
831
832 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
833 {
834 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
835 }
836
837 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
838 workloadInfo.m_OutputTensorInfos[0],
839 "ConvertFp32ToFp16QueueDescriptor",
840 "input",
841 "output");
842}
843
844void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
845{
846 ValidateSingleInput(workloadInfo, "ConvertFp16ToFp32QueueDescriptor");
847 ValidateSingleOutput(workloadInfo, "ConvertFp16ToFp32QueueDescriptor");
848
849 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
850 {
851 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
852 }
853 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
854 {
855 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
856 }
857
858 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
859 workloadInfo.m_OutputTensorInfos[0],
860 "ConvertFp16ToFp32QueueDescriptor",
861 "input",
862 "output");
863}
864
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100865void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
866{
867 ValidateTwoInputs(workloadInfo, "DivisionQueueDescriptor");
868 ValidateSingleOutput(workloadInfo, "DivisionQueueDescriptor");
869
870 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
871 workloadInfo.m_InputTensorInfos[1],
872 workloadInfo.m_OutputTensorInfos[0],
873 "DivisionQueueDescriptor",
874 "first input",
875 "second input");
876}
877
David Beckc2044fe2018-09-05 15:00:38 +0100878void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
879{
880 ValidateTwoInputs(workloadInfo, "SubtractionQueueDescriptor");
881 ValidateSingleOutput(workloadInfo, "SubtractionQueueDescriptor");
882
883 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
884 workloadInfo.m_InputTensorInfos[1],
885 workloadInfo.m_OutputTensorInfos[0],
886 "SubtractionQueueDescriptor",
887 "first input",
888 "second input");
889}
890
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000891void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
892{
893 ValidateTwoInputs(workloadInfo, "MaximumQueueDescriptor");
894 ValidateSingleOutput(workloadInfo, "MaximumQueueDescriptor");
895
896 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
897 workloadInfo.m_InputTensorInfos[1],
898 workloadInfo.m_OutputTensorInfos[0],
899 "MaximumQueueDescriptor",
900 "first input",
901 "second input");
902}
903
narpra01a6bf9122018-09-10 09:50:09 +0100904void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
905{
906 ValidateSingleInput(workloadInfo, "MeanQueueDescriptor");
907 ValidateSingleOutput(workloadInfo, "MeanQueueDescriptor");
narpra01eb061912018-09-10 17:35:27 +0100908
909 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
910 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
911
narpra0132b90462018-09-13 11:07:48 +0100912 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +0100913 {
914 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
915 }
narpra0132b90462018-09-13 11:07:48 +0100916 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +0100917 {
918 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
919 }
920 else
921 {
narpra0132b90462018-09-13 11:07:48 +0100922 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +0100923 ValidateTensorNumDimensions(output,
924 "MeanQueueDescriptor",
925 outputDim > 0 ? outputDim : 1,
926 "output");
927 }
narpra01a6bf9122018-09-10 09:50:09 +0100928}
929
jimfly012c9322a2018-09-19 10:59:49 +0100930void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
931{
932 ValidateSingleInput(workloadInfo, "PadQueueDescriptor");
933 ValidateSingleOutput(workloadInfo, "PadQueueDescriptor");
934
935 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +0100936 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
937
jimfly012c9322a2018-09-19 10:59:49 +0100938 // input and output should have the same number of dimensions
939 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
940 // there should be entry in the pad list for each dimension in the input tensor
941 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
942 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
943 " are dimensions in the input tensor that is " +
944 to_string(input.GetNumDimensions()) + " entries " +
945 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
946 }
947}
948
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000949void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
950{
951 ValidateSingleInput(workloadInfo, "BatchToSpaceNdQueueDescriptor");
952 ValidateSingleOutput(workloadInfo, "BatchToSpaceNdQueueDescriptor");
953}
954
Conor Kennedy430b5d82018-11-14 15:28:28 +0000955void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
956{
957 ValidateSingleInput(workloadInfo, "StridedSliceQueueDescriptor");
958 ValidateSingleOutput(workloadInfo, "StridedSliceQueueDescriptor");
959
960 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
961 const uint32_t rank = input.GetNumDimensions();
962
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +0000963 if (rank > 4)
964 {
965 throw InvalidArgumentException(
966 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
967 }
968
Conor Kennedy430b5d82018-11-14 15:28:28 +0000969 // Begin, End & Stride length must be of rank(input0)
970 if (m_Parameters.m_Begin.size() != rank)
971 {
972 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
973 + to_string(rank) + ")");
974 }
975
976 if (m_Parameters.m_End.size() != rank)
977 {
978 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
979 + to_string(rank) + ")");
980 }
981
982 if (m_Parameters.m_Stride.size() != rank)
983 {
984 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
985 + to_string(rank) + ")");
986 }
987
988 // Stride entries must be non-zero
989 for (auto& stride : m_Parameters.m_Stride)
990 {
991 if (stride == 0)
992 {
993 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
994 }
995 }
996}
997
kevmay0190539692018-11-29 08:40:19 +0000998void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
999{
1000 ValidateTwoInputs(workloadInfo, "MinimumQueueDescriptor");
1001 ValidateSingleOutput(workloadInfo, "MinimumQueueDescriptor");
1002
1003 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1004 workloadInfo.m_InputTensorInfos[1],
1005 workloadInfo.m_OutputTensorInfos[0],
1006 "MinimumQueueDescriptor",
1007 "first input",
1008 "second input");
1009}
1010
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001011void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1012{
1013 ValidateSingleInput(workloadInfo, "DebugQueueDescriptor");
1014 ValidateSingleOutput(workloadInfo, "DebugQueueDescriptor");
1015}
1016
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001017void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1018{
1019 ValidateTwoInputs(workloadInfo, "EqualQueueDescriptor");
1020 ValidateSingleOutput(workloadInfo, "EqualQueueDescriptor");
1021
1022 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1023 workloadInfo.m_InputTensorInfos[1],
1024 workloadInfo.m_OutputTensorInfos[0],
1025 "EqualQueueDescriptor",
1026 "first input",
1027 "second input");
1028}
1029
FrancisMurtagh878f0232018-12-19 10:56:15 +00001030void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1031{
1032 ValidateTwoInputs(workloadInfo, "GreaterQueueDescriptor");
1033 ValidateSingleOutput(workloadInfo, "GreaterQueueDescriptor");
1034
1035 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1036 workloadInfo.m_InputTensorInfos[1],
1037 workloadInfo.m_OutputTensorInfos[0],
1038 "GreaterQueueDescriptor",
1039 "first input",
1040 "second input");
1041}
1042
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001043void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1044{
1045 ValidateSingleInput(workloadInfo, "RsqrtQueueDescriptor");
1046 ValidateSingleOutput(workloadInfo, "RsqrtQueueDescriptor");
1047 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1048 workloadInfo.m_OutputTensorInfos[0],
1049 "RsqrtQueueDescriptor",
1050 "input",
1051 "output");
1052}
1053
narpra01b89b05f2019-01-16 09:53:09 +00001054void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1055{
1056 ValidateTwoInputs(workloadInfo, "GatherQueueDescriptor");
1057 ValidateSingleOutput(workloadInfo, "GatherQueueDescriptor");
narpra014951d842019-01-18 16:53:53 +00001058
1059 const TensorInfo& indices = workloadInfo.m_InputTensorInfos[1];
1060
1061 if (indices.GetDataType() != DataType::Signed32)
1062 {
1063 throw InvalidArgumentException("GatherQueueDescriptor: Indices tensor type must be int32.");
1064 }
1065
1066 const TensorInfo& params = workloadInfo.m_InputTensorInfos[0];
1067 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
1068 unsigned int paramsDim = params.GetNumDimensions();
1069 unsigned int indicesDim = indices.GetNumDimensions();
1070 unsigned int outputDim = paramsDim - 1 + indicesDim;
1071
1072 ValidateTensorNumDimensions(output, "GatherQueueDescriptor", outputDim, "output");
narpra01b89b05f2019-01-16 09:53:09 +00001073}
1074
Matteo Martincigh49124022019-01-11 13:25:59 +00001075void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1076{
1077 // This is internally generated so it should not need validation.
1078}
1079
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001080} //namespace armnn