blob: be341b670aba407efcafcff3908e534c0f1c83f3 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
16#if defined(ARMNN_CAFFE_PARSER)
17#include "armnnCaffeParser/ICaffeParser.hpp"
18#endif
19#if defined(ARMNN_TF_PARSER)
20#include "armnnTfParser/ITfParser.hpp"
21#endif
22#if defined(ARMNN_TF_LITE_PARSER)
23#include "armnnTfLiteParser/ITfLiteParser.hpp"
24#endif
25#if defined(ARMNN_ONNX_PARSER)
26#include "armnnOnnxParser/IOnnxParser.hpp"
27#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000028#if defined(ARMNN_TFLITE_DELEGATE)
29#include <armnn_delegate.hpp>
30#include <DelegateOptions.hpp>
31
32#include <tensorflow/lite/builtin_ops.h>
33#include <tensorflow/lite/c/builtin_op_data.h>
34#include <tensorflow/lite/c/common.h>
35#include <tensorflow/lite/optional_debug_tools.h>
36#include <tensorflow/lite/kernels/builtin_op_kernels.h>
37#include <tensorflow/lite/interpreter.h>
38#include <tensorflow/lite/kernels/register.h>
39#endif
Jan Eilers45274902020-10-15 18:34:43 +010040
41#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000042#if defined(ARMNN_TFLITE_DELEGATE)
43int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
44 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
45{
46 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010047
Sadik Armagan5d03e312020-11-17 16:43:56 +000048 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
49
50 auto tfLiteInterpreter = std::make_unique<Interpreter>();
51 tflite::ops::builtin::BuiltinOpResolver resolver;
52
53 tflite::InterpreterBuilder builder(*model, resolver);
54 builder(&tfLiteInterpreter);
55 tfLiteInterpreter->AllocateTensors();
56
57 // Create the Armnn Delegate
58 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
59 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
60 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
61 armnnDelegate::TfLiteArmnnDelegateDelete);
62 // Register armnn_delegate to TfLiteInterpreter
63 int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
64
65 std::vector<std::string> inputBindings;
66 for (const std::string& inputName: params.m_InputNames)
67 {
68 inputBindings.push_back(inputName);
69 }
70
71 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
72 ? armnn::EmptyOptional()
73 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
74
75 const size_t numInputs = inputBindings.size();
76
77 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
78 {
79 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000080 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
81
82 long inputSize = 1;
83 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
84 {
85 inputSize *= inputDims->data[dim];
86 }
87
Sadik Armagan5d03e312020-11-17 16:43:56 +000088 if (params.m_InputTypes[inputIndex].compare("float") == 0)
89 {
90 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williams4f55a252020-11-20 13:57:53 +000091 std::vector<float> tensorData;
92 PopulateTensorWithDataGeneric<float>(tensorData,
93 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
94 dataFile,
95 [](const std::string& s)
96 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +000097
Finn Williams4f55a252020-11-20 13:57:53 +000098 std::copy(tensorData.begin(), tensorData.end(), inputData);
99 }
100 else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
101 {
102 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
103 std::vector<int8_t> tensorData;
104 PopulateTensorWithDataGeneric<int8_t>(tensorData,
105 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
106 dataFile,
107 [](const std::string& s)
108 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
109
110 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000111 }
112 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
113 {
114 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williams4f55a252020-11-20 13:57:53 +0000115 std::vector<int32_t> tensorData;
116 PopulateTensorWithDataGeneric<int32_t>(tensorData,
117 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
118 dataFile,
119 [](const std::string& s)
120 { return std::stoi(s); });
121
122 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000123 }
124 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
125 {
126 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williams4f55a252020-11-20 13:57:53 +0000127 std::vector<uint8_t> tensorData;
128 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
129 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
130 dataFile,
131 [](const std::string& s)
132 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
133
134 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000135 }
136 else
137 {
138 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
139 return EXIT_FAILURE;
140 }
141 }
142
143 for (size_t x = 0; x < params.m_Iterations; x++)
144 {
145 // Run the inference
146 tfLiteInterpreter->Invoke();
147
148 // Print out the output
149 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
150 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000151 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000152 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000153
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000154 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000155 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
156 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000157 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000158 }
159
160 std::cout << params.m_OutputNames[outputIndex] << ": ";
161 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
162 {
163 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000164 if(tfLiteDelageOutputData == NULL)
165 {
166 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
167 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
168 return EXIT_FAILURE;
169 }
170
171 for (int i = 0; i < outputSize; ++i)
172 {
173 std::cout << tfLiteDelageOutputData[i] << ", ";
174 if (i % 60 == 0)
175 {
176 std::cout << std::endl;
177 }
178 }
179 }
180 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
181 {
182 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000183 if(tfLiteDelageOutputData == NULL)
184 {
185 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
186 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
187 return EXIT_FAILURE;
188 }
189
190 for (int i = 0; i < outputSize; ++i)
191 {
192 std::cout << tfLiteDelageOutputData[i] << ", ";
193 if (i % 60 == 0)
194 {
195 std::cout << std::endl;
196 }
197 }
198 }
Finn Williams4f55a252020-11-20 13:57:53 +0000199 else if (params.m_OutputTypes[outputIndex].compare("int8") == 0)
200 {
201 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
202 if(tfLiteDelageOutputData == NULL)
203 {
204 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
205 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
206 return EXIT_FAILURE;
207 }
208
209 for (int i = 0; i < outputSize; ++i)
210 {
211 std::cout << signed(tfLiteDelageOutputData[i]) << ", ";
212 if (i % 60 == 0)
213 {
214 std::cout << std::endl;
215 }
216 }
217 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000218 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
219 {
220 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000221 if(tfLiteDelageOutputData == NULL)
222 {
223 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
224 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
225 return EXIT_FAILURE;
226 }
227
228 for (int i = 0; i < outputSize; ++i)
229 {
230 std::cout << unsigned(tfLiteDelageOutputData[i]) << ", ";
231 if (i % 60 == 0)
232 {
233 std::cout << std::endl;
234 }
235 }
236 }
237 else
238 {
239 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
240 "\"" << params.m_OutputTypes[outputIndex] <<
241 "\" may be incorrect. Output type can be specified with -z argument";
242 return EXIT_FAILURE;
243 }
244 std::cout << std::endl;
245 }
246 }
247
248 return status;
249}
250#endif
Jan Eilers45274902020-10-15 18:34:43 +0100251template<typename TParser, typename TDataType>
252int MainImpl(const ExecuteNetworkParams& params,
253 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
254{
255 using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
256
257 std::vector<TContainer> inputDataContainers;
258
259 try
260 {
261 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
262 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
263 inferenceModelParams.m_ModelPath = params.m_ModelPath;
264 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
265 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
266 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
267 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
268 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
269 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
270 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
271 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
272
273 for(const std::string& inputName: params.m_InputNames)
274 {
275 inferenceModelParams.m_InputBindings.push_back(inputName);
276 }
277
278 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
279 {
280 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
281 }
282
283 for(const std::string& outputName: params.m_OutputNames)
284 {
285 inferenceModelParams.m_OutputBindings.push_back(outputName);
286 }
287
288 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
289 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
290 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
291
292 InferenceModel<TParser, TDataType> model(inferenceModelParams,
293 params.m_EnableProfiling,
294 params.m_DynamicBackendsPath,
295 runtime);
296
297 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
298 for(unsigned int i = 0; i < numInputs; ++i)
299 {
300 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
301 armnn::MakeOptional<QuantizationParams>(
302 model.GetInputQuantizationParams()) :
303 armnn::EmptyOptional();
304
305 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
306 armnn::EmptyOptional() :
307 armnn::MakeOptional<std::string>(
308 params.m_InputTensorDataFilePaths[i]);
309
310 unsigned int numElements = model.GetInputSize(i);
311 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
312 {
313 // If the user has provided a tensor shape for the current input,
314 // override numElements
315 numElements = params.m_InputTensorShapes[i]->GetNumElements();
316 }
317
318 TContainer tensorData;
319 PopulateTensorWithData(tensorData,
320 numElements,
321 params.m_InputTypes[i],
322 qParams,
323 dataFile);
324
325 inputDataContainers.push_back(tensorData);
326 }
327
328 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
329 std::vector<TContainer> outputDataContainers;
330
331 for (unsigned int i = 0; i < numOutputs; ++i)
332 {
333 if (params.m_OutputTypes[i].compare("float") == 0)
334 {
335 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
336 }
337 else if (params.m_OutputTypes[i].compare("int") == 0)
338 {
339 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
340 }
341 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
342 {
343 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
344 }
345 else
346 {
347 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
348 return EXIT_FAILURE;
349 }
350 }
351
352 for (size_t x = 0; x < params.m_Iterations; x++)
353 {
354 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
355 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
356
357 if (params.m_GenerateTensorData)
358 {
359 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
360 }
361
362 // Print output tensors
363 const auto& infosOut = model.GetOutputBindingInfos();
364 for (size_t i = 0; i < numOutputs; i++)
365 {
366 const armnn::TensorInfo& infoOut = infosOut[i].second;
367 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
368
369 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
370 infoOut,
371 outputTensorFile,
372 params.m_DequantizeOutput);
373 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
374 }
375
376 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
377 << std::fixed << inference_duration.count() << " ms\n";
378
379 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
380 if (params.m_ThresholdTime != 0.0)
381 {
382 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
383 << std::fixed << params.m_ThresholdTime << " ms";
384 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
385 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
386 << std::fixed << thresholdMinusInference << " ms" << "\n";
387
388 if (thresholdMinusInference < 0)
389 {
390 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
391 ARMNN_LOG(fatal) << errorMessage;
392 }
393 }
394 }
395 }
396 catch (const armnn::Exception& e)
397 {
398 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
399 return EXIT_FAILURE;
400 }
401
402 return EXIT_SUCCESS;
403}
404
telsoa01c577f2c2018-08-31 09:22:23 +0100405
James Conroy7b4886f2019-04-11 10:23:58 +0100406// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100407int main(int argc, const char* argv[])
408{
409 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100410 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100411 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100412 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100413 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100414 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100415 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100416
telsoa01c577f2c2018-08-31 09:22:23 +0100417
Jan Eilers45274902020-10-15 18:34:43 +0100418 // Get ExecuteNetwork parameters and runtime options from command line
419 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000420
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100421 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100422 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100423
Jan Eilers45274902020-10-15 18:34:43 +0100424 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
425
426 // Forward to implementation based on the parser type
427 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100428 {
Jan Eilers45274902020-10-15 18:34:43 +0100429 #if defined(ARMNN_SERIALIZER)
430 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
431 #else
432 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100433 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100434 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100435 }
Jan Eilers45274902020-10-15 18:34:43 +0100436 else if (modelFormat.find("caffe") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100437 {
Jan Eilers45274902020-10-15 18:34:43 +0100438 #if defined(ARMNN_CAFFE_PARSER)
439 return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
440 #else
441 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
442 return EXIT_FAILURE;
443 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100444 }
Jan Eilers45274902020-10-15 18:34:43 +0100445 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100446 {
Jan Eilers45274902020-10-15 18:34:43 +0100447 #if defined(ARMNN_ONNX_PARSER)
448 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
449 #else
450 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
451 return EXIT_FAILURE;
452 #endif
453 }
454 else if (modelFormat.find("tensorflow") != std::string::npos)
455 {
456 #if defined(ARMNN_TF_PARSER)
457 return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
458 #else
459 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
460 return EXIT_FAILURE;
461 #endif
462 }
463 else if(modelFormat.find("tflite") != std::string::npos)
464 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000465
466 if (ProgramOptions.m_ExNetParams.m_EnableDelegate)
467 {
468 #if defined(ARMNN_TF_LITE_DELEGATE)
469 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
470 #else
471 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
472 return EXIT_FAILURE;
473 #endif
474 }
Jan Eilers45274902020-10-15 18:34:43 +0100475 #if defined(ARMNN_TF_LITE_PARSER)
476 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
477 #else
478 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
479 return EXIT_FAILURE;
480 #endif
481 }
482 else
483 {
484 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
485 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
486 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000487 }
488}