blob: 08f78fffe2defb6ae672e362cec3a2e6a6931ffd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "WorkloadData.hpp"
6
7#include "CpuTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
Matthew Bentham8800c002018-11-19 13:19:28 +00009#include <backendsCommon/DataLayoutIndexed.hpp>
10
telsoa014fcda012018-03-09 14:13:49 +000011#include <algorithm>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <iomanip>
telsoa014fcda012018-03-09 14:13:49 +000013#include <string>
14#include <sstream>
telsoa014fcda012018-03-09 14:13:49 +000015
16#include <boost/format.hpp>
17
18namespace armnn
19{
20
21//---------------------------------------------------------------
22DataType GetBiasDataType(DataType inputDataType)
23{
24 switch (inputDataType)
25 {
telsoa01c577f2c2018-08-31 09:22:23 +010026 case DataType::Float16:
27 return DataType::Float16;
telsoa014fcda012018-03-09 14:13:49 +000028 case DataType::Float32:
29 return DataType::Float32;
30 case DataType::QuantisedAsymm8:
31 return DataType::Signed32;
32 default:
33 BOOST_ASSERT_MSG(false, "Invalid input data type");
34 return DataType::Float32;
35 }
36}
37
38namespace
39{
40
41//---------------------------------------------------------------
42//android ndk does not support std::to_string function.
43template <typename T>
44std::string to_string(T value)
45{
46 std::ostringstream os;
47 os << value;
48 return os.str();
49}
50
51//---------------------------------------------------------------
52void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
53{
54 if (!ptr)
55 {
56 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
57 paramName + " parameter must be set.");
58 }
59}
60
61//---------------------------------------------------------------
62void ValidateTensorShapesMatch(const TensorInfo& first,
63 const TensorInfo& second,
64 std::string const& descName,
65 std::string const& firstName,
66 std::string const& secondName)
67{
68 if (first.GetShape() != second.GetShape())
69 {
70 throw InvalidArgumentException(descName + ": "
71 + firstName + " & " + secondName + " must have identical shapes");
72 }
73}
74
75//---------------------------------------------------------------
76void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
77{
78 if (workloadInfo.m_InputTensorInfos.size() != 0)
79 {
80 throw InvalidArgumentException(descName +
81 ": Requires no inputs. " +
82 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided.");
83 }
84}
85
86//---------------------------------------------------------------
87void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName)
88{
89 if (workloadInfo.m_InputTensorInfos.size() != 1)
90 {
91 throw InvalidArgumentException(descName +
92 ": Requires exactly one input. " +
93 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." );
94 }
95}
96
97//---------------------------------------------------------------
98void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
99{
100 if (workloadInfo.m_InputTensorInfos.size() != 2)
101 {
102 throw InvalidArgumentException(descName +
103 ": Requires exactly two workloadInfo.m_InputTensorInfos. " +
104 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
105 }
106}
107
108//---------------------------------------------------------------
109void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName)
110{
111 if (workloadInfo.m_OutputTensorInfos.size() != 1)
112 {
113 throw InvalidArgumentException(descName +
114 ": Requires exactly one output. " +
115 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
116 }
117}
118
119//---------------------------------------------------------------
120void ValidateTensorNumDimensions(const TensorInfo& tensor,
121 std::string const& descName,
122 unsigned int numDimensions,
123 std::string const& tensorName)
124{
125 if (tensor.GetNumDimensions() != numDimensions)
126 {
127 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
128 to_string(tensor.GetNumDimensions()) + " dimensions for " +
129 tensorName + " tensor.");
130 }
131}
132
133//---------------------------------------------------------------
134void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
135 const std::string& descName, std::string const& tensorName)
136{
137 if (tensor.GetDataType() != dataType)
138 {
139 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
140 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
141 }
142}
143
144//---------------------------------------------------------------
145void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
146 const TensorInfo& weightsTensorInfo, const std::string& descName)
147{
148 if (biasTensor.GetQuantizationOffset() != 0)
149 {
150 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
151 to_string(biasTensor.GetQuantizationOffset()));
152 }
153 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
telsoa01c577f2c2018-08-31 09:22:23 +0100154 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.000000001f)
telsoa014fcda012018-03-09 14:13:49 +0000155 {
156 // Print the float values with extra precision to see very small differences
157 std::stringstream msg;
158 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
159 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
160 biasTensor.GetQuantizationScale();
161 throw InvalidArgumentException(msg.str());
162 }
163}
164
165//---------------------------------------------------------------
166void ValidateTensors(const std::vector<ITensorHandle*>& vec,
167 unsigned int numExpected,
168 const std::string& descName,
169 const std::string& varName)
170{
171 if (vec.empty() && numExpected > 0)
172 {
173 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
174 }
175
176 for (unsigned int i = 0; i < numExpected; ++i)
177 {
178 if (!vec[i])
179 {
180 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
181 }
182 }
183}
184
185//---------------------------------------------------------------
186void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
187 const TensorInfo& second,
188 const TensorInfo& output,
189 std::string const& descName,
190 std::string const& firstName,
191 std::string const& secondName)
192{
193 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
194 // broadcasted.
195 if (first.GetNumDimensions() != second.GetNumDimensions())
196 {
197 throw InvalidArgumentException(descName + ": Tensors "
198 + firstName + " & " + secondName
199 + " must have the same number of dimensions in order to be broadcasted");
200 }
201 uint32_t numDims = first.GetNumDimensions();
202 std::vector<uint32_t> outputDims(numDims, 0u);
203 for (uint32_t i = 0; i < numDims; i++)
204 {
205 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
206 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
207 if (dimsNotEqual && dimsNotOne)
208 {
209 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
210 }
211 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
212 }
213 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
214 if (broadcastShape != output.GetShape())
215 {
216 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
217 + firstName + " & " + secondName
218 + " does not match the output shape");
219 }
220}
221
222//---------------------------------------------------------------
223/// Validates that the output tensor's quantization scale is greater than the product
224/// of the two input tensors' quantization scales. This is a requirement of the implementation of
225/// the quantized multiplication.
226void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
227 const TensorInfo& outputTensorInfo, std::string const& descName,
228 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
229{
230 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
231 {
232 if (outputTensorInfo.GetQuantizationScale() <=
233 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
234 {
235 std::stringstream msg;
236 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
237 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
238 throw InvalidArgumentException(msg.str());
239 }
240 }
241}
242
243} //namespace
244
245void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
246 unsigned int numExpectedIn, unsigned int numExpectedOut) const
247{
248 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
249 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
250}
251
252//---------------------------------------------------------------
253void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
254{
255 ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor");
256 ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor");
257
258 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
259 {
260 throw InvalidArgumentException(boost::str(
261 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
262 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
263 }
264
265 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
266 {
267 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
268 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
269 {
270 throw InvalidArgumentException(boost::str(
271 boost::format("Number of elements for tensor input and output %1% does not match")
272 % i ));
273 }
274 }
275
276 if (m_Inputs.size() != m_Outputs.size())
277 {
278 throw InvalidArgumentException(boost::str(
279 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
280 % m_Inputs.size() % m_Outputs.size()));
281 }
282
283 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
284 {
285 if (!m_Inputs[i])
286 {
287 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
288 }
289
290 if (!m_Outputs[i])
291 {
292 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
293 }
294 }
295}
296
297//---------------------------------------------------------------
298void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
299{
300 ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor");
301 ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor");
302 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
303 workloadInfo.m_OutputTensorInfos[0],
304 "ActivationQueueDescriptor",
305 "input",
306 "output");
307}
308
309//---------------------------------------------------------------
310void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
311{
312 ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor");
313 ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor");
314 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "input");
315 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "output");
316
317 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
318 workloadInfo.m_OutputTensorInfos[0],
319 "SoftmaxQueueDescriptor",
320 "input",
321 "output");
322}
323
324//---------------------------------------------------------------
325void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
326{
327 ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor");
328
329 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
330 {
331 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
332 }
333
334 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
335 {
336 throw InvalidArgumentException(
337 "SplitterQueueDescriptor: Number of split windows "
338 "has to match number of workloadInfo.m_OutputTensorInfos. "
339 "Number of windows: " +
340 to_string(m_ViewOrigins.size()) +
341 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
342 }
343
telsoa01c577f2c2018-08-31 09:22:23 +0100344 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
telsoa014fcda012018-03-09 14:13:49 +0000345 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
346 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
347 {
telsoa01c577f2c2018-08-31 09:22:23 +0100348 //Checks that the dimensionality of input is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000349 ViewOrigin const& e = m_ViewOrigins[w];
350 if (e.m_Origin.size() != inputDims)
351 {
352 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
353 "have the same dimensionality as the input tensor. "
354 "Window origin (index: " +
355 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
356 " dimensions, the input "
357 "tensor has " +
358 to_string(inputDims) + " dimensions.");
359 }
360 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
361 {
362 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
363 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
364 {
365 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
366 "be smaller or equal than the size of the input in that coord.");
367 }
368 }
369 }
370}
371
372//---------------------------------------------------------------
373void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
374{
375 ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor");
376
377 if (m_Inputs.size() <= 0)
378 {
379 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
380 }
381 if (m_Outputs.size() <= 0)
382 {
383 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
384 }
385
386 if (workloadInfo.m_InputTensorInfos.size() <= 0)
387 {
388 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
389 }
390 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
391 {
392 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
393 }
394
Nikhil Raj8599a412018-11-19 14:51:07 +0000395 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
396 {
397 throw InvalidArgumentException("Invalid Concatenation Axis provided");
398 }
399
400 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
401 {
402 return;
403 }
404
telsoa014fcda012018-03-09 14:13:49 +0000405 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
406 {
407 throw InvalidArgumentException(
408 "MergerQueueDescriptor: Number of split windows "
409 "has to match number of workloadInfo.m_InputTensorInfos. "
410 "Number of windows: " +
411 to_string(m_ViewOrigins.size()) +
412 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
413 }
414
telsoa01c577f2c2018-08-31 09:22:23 +0100415 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
telsoa014fcda012018-03-09 14:13:49 +0000416 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
417 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
418 {
telsoa01c577f2c2018-08-31 09:22:23 +0100419 //Checks that the dimensionality of output is same as the split windows.
telsoa014fcda012018-03-09 14:13:49 +0000420 ViewOrigin const& e = m_ViewOrigins[w];
421 if (e.m_Origin.size() != outputDims)
422 {
423 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
424 "have the same dimensionality as the output tensor. "
425 "Window origin (index: " +
426 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
427 " dimensions, the output "
428 "tensor has " +
429 to_string(outputDims) + " dimensions.");
430 }
telsoa01c577f2c2018-08-31 09:22:23 +0100431 //Checks that the merge windows are within the output tensor.
telsoa014fcda012018-03-09 14:13:49 +0000432 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
433 {
434 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
435 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
436 {
437 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
438 "be smaller or equal than the size of the output in that coord.");
439 }
440 }
441 }
442}
443
444//---------------------------------------------------------------
445void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
446{
447 ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor");
448 ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor");
449 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
450
451 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
452 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
453 {
454 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
455 }
456
457 if (m_Weight == nullptr)
458 {
459 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
460 }
461
462 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
463
464 if (m_Parameters.m_BiasEnabled)
465 {
466 if (m_Bias == nullptr)
467 {
468 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
469 "bias value tensor descriptor is missing.");
470 }
471
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Validates type and quantization values.
telsoa014fcda012018-03-09 14:13:49 +0000473 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
474 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
475
476 ValidateTensorDataType(m_Bias->GetTensorInfo(),
477 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
478 "FullyConnectedQueueDescriptor", "bias");
479
480 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
481 }
482
483 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
484 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
485}
486
487//---------------------------------------------------------------
488void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
489{
490 ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor");
491 ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor");
492 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
493 workloadInfo.m_OutputTensorInfos[0],
494 "NormalizationQueueDescriptor",
495 "input",
496 "output");
497}
498
499void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
500{
501 ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor");
502 ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor");
503
504 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
505 workloadInfo.m_InputTensorInfos[1],
506 workloadInfo.m_OutputTensorInfos[0],
507 "AdditionQueueDescriptor",
508 "first input",
509 "second input");
510
511}
512
513//---------------------------------------------------------------
514void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
515{
516 ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
517 ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
surmeh01bceff2f2018-03-29 16:29:27 +0100518
519 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
520 workloadInfo.m_InputTensorInfos[1],
521 workloadInfo.m_OutputTensorInfos[0],
522 "MultiplicationQueueDescriptor",
523 "first input",
524 "second input");
telsoa014fcda012018-03-09 14:13:49 +0000525}
526
527void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
528{
529 ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor");
530 ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor");
531 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
532 workloadInfo.m_OutputTensorInfos[0],
533 "BatchNormalizationQueueDescriptor",
534 "input",
535 "output");
536 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
537 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
538 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
539 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
540
541
542 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
543 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
544 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
545 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
546
547 ValidateTensorShapesMatch(
548 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
549 ValidateTensorShapesMatch(
550 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
551 ValidateTensorShapesMatch(
552 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
553}
554
555void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
556{
557 ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor");
558 ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor");
559
560 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
561 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
562
563 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
564 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
565 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
566 "Convolution2dQueueDescriptor", "weight");
567 if (m_Parameters.m_BiasEnabled)
568 {
569 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
570 ValidateTensorDataType(m_Bias->GetTensorInfo(),
571 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
572 "Convolution2dQueueDescriptor", "bias");
573 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
574 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
575 }
576
577 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
578 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
579}
580
581void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
582{
583 ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
584 ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
585
586 ValidateTensorNumDimensions(
587 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
588 ValidateTensorNumDimensions(
589 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
590
591 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
592 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
593
Nikhil Rajcec6b652018-10-12 13:51:57 +0100594 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
595
telsoa01c577f2c2018-08-31 09:22:23 +0100596 //inputChannels * channelMultiplier should be equal to outputChannels.
telsoa014fcda012018-03-09 14:13:49 +0000597 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
Nikhil Rajcec6b652018-10-12 13:51:57 +0100598 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[channelIndex];
599 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[channelIndex];
telsoa014fcda012018-03-09 14:13:49 +0000600 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
601 {
602 throw InvalidArgumentException(
603 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
604 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
605 "(provided %3%).")
606 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
607 }
608
609 if (m_Parameters.m_BiasEnabled)
610 {
611 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
612 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
613 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
614 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
615
616 ValidateTensorDataType(m_Bias->GetTensorInfo(),
617 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
618 "DepthwiseConvolution2dQueueDescriptor", "bias");
619 }
620
621 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
622 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
623}
624
625void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
626{
627 ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor");
628 ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor");
629
630 const PermutationVector& mapping = m_Parameters.m_DimMappings;
631
632 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
633 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
634
635 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
636 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
637
638 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
639 {
640 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
641 {
642 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
643 " (=" + to_string(input.GetShape()[i]) + ") " +
644 "must match dst dimension " + to_string(mapping[i]) +
645 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
646 }
647 }
648}
649
650void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
651{
652 ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor");
653 ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor");
654
655 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
656 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
657}
658
659void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
660{
661 ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor");
662 ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor");
663
664 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
665 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
666
telsoa01c577f2c2018-08-31 09:22:23 +0100667 // Resizes bilinear only changes width and height: batch and channel count must match.
telsoa014fcda012018-03-09 14:13:49 +0000668 {
669 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
670 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
671 if (inputBatchSize != outputBatchSize)
672 {
673 throw InvalidArgumentException(
674 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
675 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
676 }
677 }
678
679 {
Matthew Bentham8800c002018-11-19 13:19:28 +0000680 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
James Conroy59540822018-10-11 12:39:05 +0100681 const unsigned int inputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000682 workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
James Conroy59540822018-10-11 12:39:05 +0100683 const unsigned int outputChannelCount =
Matthew Bentham8800c002018-11-19 13:19:28 +0000684 workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
telsoa014fcda012018-03-09 14:13:49 +0000685 if (inputChannelCount != outputChannelCount)
686 {
687 throw InvalidArgumentException(
688 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
689 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
690 }
691 }
692}
693
694void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
695{
696 ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor");
697 ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor");
698
699 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
700 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
701 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
702 workloadInfo.m_OutputTensorInfos[0],
703 "FakeQuantizationQueueDescriptor",
704 "input",
705 "output");
706 if (m_Parameters.m_Min > m_Parameters.m_Max)
707 {
708 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
709 }
710
711}
712
713void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
714{
715 ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor");
716 ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor");
717
718 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
719 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
720 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
721 workloadInfo.m_OutputTensorInfos[0],
722 "L2NormalizationQueueDescriptor",
723 "input",
724 "output");
725}
726
727void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
728{
729 ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor");
730 ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor");
731
732 if (!m_LayerOutput)
733 {
734 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
735 }
736
737 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
738 workloadInfo.m_OutputTensorInfos[0],
739 "ConstantQueueDescriptor",
740 "constant",
741 "output");
742}
743
744void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
745{
746 ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor");
747 ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor");
748
749 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
750 {
751 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
752 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
753 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
754 }
755}
756
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000757void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
758{
759 ValidateSingleInput(workloadInfo, "SpaceToBatchNdQueueDescriptor");
760 ValidateSingleOutput(workloadInfo, "SpaceToBatchNdQueueDescriptor");
761
762 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "input");
763 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SpaceToBatchNdQueueDescriptor", 4, "output");
764
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000765 if (m_Parameters.m_BlockShape.size() != 2)
766 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000767 throw InvalidArgumentException("Block Shape must contain 2 spatial dimensions");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000768 }
769
770 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
771 {
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000772 throw InvalidArgumentException("Pad List must contain the same number of dimensions as Block Shape.");
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000773 }
774
775 const TensorShape inputShape = workloadInfo.m_InputTensorInfos[0].GetShape();
776
777 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
778 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
779
Matthew Bentham8800c002018-11-19 13:19:28 +0000780 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
781 unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000782 + heightPad.first + heightPad.second;
783
Matthew Bentham8800c002018-11-19 13:19:28 +0000784 unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()]
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000785 + widthPad.first + widthPad.second;
786
787 unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth
Matthew Bentham8800c002018-11-19 13:19:28 +0000788 * inputShape[dimensionIndices.GetChannelsIndex()];
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +0000789
790 if (workloadInfo.m_OutputTensorInfos[0].GetNumElements() != numInputElements)
791 {
792 throw InvalidArgumentException("SpaceToBatchNdQueueDescriptor: Input tensor has " +
793 to_string(numInputElements) + " after padding but output tensor has " +
794 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
795 }
796
797 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000798 {
799 throw InvalidArgumentException(
800 "Input shape after padding must be divisible by Block Shape in all spatial dimensions");
801 }
802}
803
telsoa014fcda012018-03-09 14:13:49 +0000804void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
805{
806 ValidateSingleInput(workloadInfo, "FloorQueueDescriptor");
807 ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor");
808
809 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
810 {
811 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
812 }
813}
814
telsoa01c577f2c2018-08-31 09:22:23 +0100815void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
816{
817 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "LstmQueueDescriptor", 2, "input");
818 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "LstmQueueDescriptor", 2, "output");
819}
820
821void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
822{
823 ValidateSingleInput(workloadInfo, "ConvertFp32ToFp16QueueDescriptor");
824 ValidateSingleOutput(workloadInfo, "ConvertFp32ToFp16QueueDescriptor");
825
826 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
827 {
828 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Input tensor type must be Float32.");
829 }
830
831 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float16)
832 {
833 throw InvalidArgumentException("ConvertFp32ToFp16QueueDescriptor: Output tensor type must be Float16.");
834 }
835
836 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
837 workloadInfo.m_OutputTensorInfos[0],
838 "ConvertFp32ToFp16QueueDescriptor",
839 "input",
840 "output");
841}
842
843void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
844{
845 ValidateSingleInput(workloadInfo, "ConvertFp16ToFp32QueueDescriptor");
846 ValidateSingleOutput(workloadInfo, "ConvertFp16ToFp32QueueDescriptor");
847
848 if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float16)
849 {
850 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Input tensor type must be Float16.");
851 }
852 if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32)
853 {
854 throw InvalidArgumentException("ConvertFp16ToFp32QueueDescriptor: Output tensor type must be Float32.");
855 }
856
857 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
858 workloadInfo.m_OutputTensorInfos[0],
859 "ConvertFp16ToFp32QueueDescriptor",
860 "input",
861 "output");
862}
863
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100864void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
865{
866 ValidateTwoInputs(workloadInfo, "DivisionQueueDescriptor");
867 ValidateSingleOutput(workloadInfo, "DivisionQueueDescriptor");
868
869 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
870 workloadInfo.m_InputTensorInfos[1],
871 workloadInfo.m_OutputTensorInfos[0],
872 "DivisionQueueDescriptor",
873 "first input",
874 "second input");
875}
876
David Beckc2044fe2018-09-05 15:00:38 +0100877void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
878{
879 ValidateTwoInputs(workloadInfo, "SubtractionQueueDescriptor");
880 ValidateSingleOutput(workloadInfo, "SubtractionQueueDescriptor");
881
882 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
883 workloadInfo.m_InputTensorInfos[1],
884 workloadInfo.m_OutputTensorInfos[0],
885 "SubtractionQueueDescriptor",
886 "first input",
887 "second input");
888}
889
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000890void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
891{
892 ValidateTwoInputs(workloadInfo, "MaximumQueueDescriptor");
893 ValidateSingleOutput(workloadInfo, "MaximumQueueDescriptor");
894
895 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
896 workloadInfo.m_InputTensorInfos[1],
897 workloadInfo.m_OutputTensorInfos[0],
898 "MaximumQueueDescriptor",
899 "first input",
900 "second input");
901}
902
narpra01a6bf9122018-09-10 09:50:09 +0100903void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
904{
905 ValidateSingleInput(workloadInfo, "MeanQueueDescriptor");
906 ValidateSingleOutput(workloadInfo, "MeanQueueDescriptor");
narpra01eb061912018-09-10 17:35:27 +0100907
908 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
909 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
910
narpra0132b90462018-09-13 11:07:48 +0100911 if (m_Parameters.m_KeepDims)
narpra01eb061912018-09-10 17:35:27 +0100912 {
913 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
914 }
narpra0132b90462018-09-13 11:07:48 +0100915 else if (m_Parameters.m_Axis.empty())
narpra01eb061912018-09-10 17:35:27 +0100916 {
917 ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
918 }
919 else
920 {
narpra0132b90462018-09-13 11:07:48 +0100921 auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
narpra01eb061912018-09-10 17:35:27 +0100922 ValidateTensorNumDimensions(output,
923 "MeanQueueDescriptor",
924 outputDim > 0 ? outputDim : 1,
925 "output");
926 }
narpra01a6bf9122018-09-10 09:50:09 +0100927}
928
jimfly012c9322a2018-09-19 10:59:49 +0100929void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
930{
931 ValidateSingleInput(workloadInfo, "PadQueueDescriptor");
932 ValidateSingleOutput(workloadInfo, "PadQueueDescriptor");
933
934 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
Nina Drozd661dfa72018-10-02 11:14:17 +0100935 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
936
jimfly012c9322a2018-09-19 10:59:49 +0100937 // input and output should have the same number of dimensions
938 ValidateTensorNumDimensions(output, "PadQueueDescriptor", input.GetNumDimensions(), "output");
939 // there should be entry in the pad list for each dimension in the input tensor
940 if (m_Parameters.m_PadList.size() != input.GetNumDimensions()) {
941 throw InvalidArgumentException("Pad List should contain the same number of entries as there"
942 " are dimensions in the input tensor that is " +
943 to_string(input.GetNumDimensions()) + " entries " +
944 " not " + to_string(m_Parameters.m_PadList.size()) + " entries.");
945 }
946}
947
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +0000948void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
949{
950 ValidateSingleInput(workloadInfo, "BatchToSpaceNdQueueDescriptor");
951 ValidateSingleOutput(workloadInfo, "BatchToSpaceNdQueueDescriptor");
952}
953
Conor Kennedy430b5d82018-11-14 15:28:28 +0000954void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
955{
956 ValidateSingleInput(workloadInfo, "StridedSliceQueueDescriptor");
957 ValidateSingleOutput(workloadInfo, "StridedSliceQueueDescriptor");
958
959 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
960 const uint32_t rank = input.GetNumDimensions();
961
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +0000962 if (rank > 4)
963 {
964 throw InvalidArgumentException(
965 "StridedSliceLayer: Input tensors with rank greater than 4 are not supported");
966 }
967
Conor Kennedy430b5d82018-11-14 15:28:28 +0000968 // Begin, End & Stride length must be of rank(input0)
969 if (m_Parameters.m_Begin.size() != rank)
970 {
971 throw InvalidArgumentException("StridedSliceLayer: Begin length must be of rank input0("
972 + to_string(rank) + ")");
973 }
974
975 if (m_Parameters.m_End.size() != rank)
976 {
977 throw InvalidArgumentException("StridedSliceLayer: End length must be of rank input0("
978 + to_string(rank) + ")");
979 }
980
981 if (m_Parameters.m_Stride.size() != rank)
982 {
983 throw InvalidArgumentException("StridedSliceLayer: Stride length must be of rank input0("
984 + to_string(rank) + ")");
985 }
986
987 // Stride entries must be non-zero
988 for (auto& stride : m_Parameters.m_Stride)
989 {
990 if (stride == 0)
991 {
992 throw InvalidArgumentException("StridedSliceLayer: Stride entries must be non-zero");
993 }
994 }
995}
996
kevmay0190539692018-11-29 08:40:19 +0000997void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
998{
999 ValidateTwoInputs(workloadInfo, "MinimumQueueDescriptor");
1000 ValidateSingleOutput(workloadInfo, "MinimumQueueDescriptor");
1001
1002 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
1003 workloadInfo.m_InputTensorInfos[1],
1004 workloadInfo.m_OutputTensorInfos[0],
1005 "MinimumQueueDescriptor",
1006 "first input",
1007 "second input");
1008}
1009
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001010void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1011{
1012 ValidateSingleInput(workloadInfo, "DebugQueueDescriptor");
1013 ValidateSingleOutput(workloadInfo, "DebugQueueDescriptor");
1014}
1015
Nattapat Chaimanowonga0d28442018-11-21 16:48:17 +00001016} //namespace armnn