blob: 4700660eeebc4a5adb1b98b583b9cdbfbf0c5018 [file] [log] [blame]
Eanna O Cathain2f0ddb62022-03-03 15:58:10 +00001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include <catch.hpp>
6#include <opencv2/opencv.hpp>
7#include "ArmnnNetworkExecutor.hpp"
8#include "Types.hpp"
9#include "ImageUtils.hpp"
10#include "SSDResultDecoder.hpp"
11#include "YoloResultDecoder.hpp"
12
13using namespace std;
14
15static string GetResourceFilePath(const string& filename)
16{
17 string testResources = TEST_RESOURCE_DIR;
18
19 if(testResources.back() != '/')
20 {
21 return testResources + "/" + filename;
22 }
23 else
24 {
25 return testResources + filename;
26 }
27}
28
29TEST_CASE("Test Delegate Execution SSD_MOBILE")
30{
31 string testResources = TEST_RESOURCE_DIR;
32 REQUIRE(testResources != "");
33
34 vector<armnn::BackendId> m_backends = {"CpuRef"};
35 string file_path = GetResourceFilePath("ssd_mobilenet_v1.tflite");
36 common::InferenceResults<float> results;
37 cv::Mat processed;
38 cv::Mat cache;
39 float detectionThreshold = 0.6;
40 common::Profiling profiling(true);
41
42 profiling.ProfilingStart();
43 auto executor = make_unique<common::ArmnnNetworkExecutor<float>>(file_path, m_backends, true);
44 int width = executor->GetImageAspectRatio().m_Width;
45 int height = executor->GetImageAspectRatio().m_Height;
46 od::SSDResultDecoder ssdResult(detectionThreshold);
47
48 /* check GetInputDataType */
49 CHECK(executor->GetInputDataType() == armnn::DataType::QAsymmU8);
50 /* check GetImageAspectRatio */
51 CHECK(width == 300);
52 CHECK(height == 300);
53
54 cv::Mat inputFrame = cv::imread(GetResourceFilePath("basketball1.png"), cv::IMREAD_COLOR);
55 cv::cvtColor(inputFrame, inputFrame, cv::COLOR_BGR2RGB);
56 ResizeWithPad(inputFrame, processed, cache, common::Size(width,height));
57 CHECK(executor->Run(processed.data, processed.total() * processed.elemSize(), results) == true);
58 od::DetectedObjects detections = ssdResult.Decode(results,
59 common::Size(inputFrame.size().width, inputFrame.size().height),
60 common::Size(width, height), {});
61
62 /* Make sure we've found 2 persons in the image */
63 CHECK(detections.size() == 2 );
64 CHECK(detections[0].GetLabel() == "0");
65 CHECK(detections[1].GetLabel() == "0");
66 /* check GetQuantizationScale */
67 CHECK(to_string(executor->GetQuantizationScale()) == string("0.007812"));
68 /* check GetQuantizationOffset */
69 CHECK(executor->GetQuantizationOffset() == 128);
70 /* check GetQuantizationScale */
71 CHECK(executor->GetOutputQuantizationScale(0) == 0.0f);
72 /* check GetOutputQuantizationOffset */
73 CHECK(executor->GetOutputQuantizationOffset(0) == 0);
74 profiling.ProfilingStopAndPrintUs("Overall test");
75}
76
77TEST_CASE("Test Delegate Execution YOLO_V3")
78{
79 string testResources = TEST_RESOURCE_DIR;
80 REQUIRE(testResources != "");
81
82 vector<armnn::BackendId> m_backends = {"CpuRef"};
83 string file_path = GetResourceFilePath("yolo_v3_tiny_darknet_fp32.tflite");
84 common::InferenceResults<float> results;
85 cv::Mat processed;
86 cv::Mat cache;
87 float NMSThreshold = 0.3f;
88 float ClsThreshold = 0.3f;
89 float ObjectThreshold = 0.3f;
90
91
92 auto executor = make_unique<common::ArmnnNetworkExecutor<float>>(file_path, m_backends);
93 int width = executor->GetImageAspectRatio().m_Width;
94 int height = executor->GetImageAspectRatio().m_Height;
95 od::YoloResultDecoder yoloResult(NMSThreshold, ClsThreshold, ObjectThreshold);
96
97 /* check GetInputDataType */
98 CHECK(executor->GetInputDataType() == armnn::DataType::Float32);
99 /* check GetImageAspectRatio */
100 CHECK(width == 416);
101 CHECK(height == 416);
102
103 /* read the image */
104 cv::Mat inputFrame = cv::imread(GetResourceFilePath("basketball1.png"), cv::IMREAD_COLOR);
105 /* resize it according to the the input tensor requirments */
106 ResizeWithPad(inputFrame, processed, cache, common::Size(width,height));
107 /* converting to 3 channel matrix of 32 bits floats */
108 processed.convertTo(processed, CV_32FC3);
109 /* run the inference */
110 CHECK(executor->Run(processed.data, processed.total() * processed.elemSize(), results) == true);
111 /* decode the results */
112 od::DetectedObjects detections = yoloResult.Decode(results,
113 common::Size(inputFrame.size().width, inputFrame.size().height),
114 common::Size(width, height), {});
115
116 /* Make sure we've found 2 persons in the image */
117 CHECK(detections.size() == 2 );
118 CHECK(detections[0].GetLabel() == "0");
119 CHECK(detections[1].GetLabel() == "0");
120 /* check GetQuantizationScale */
121 CHECK(to_string(executor->GetQuantizationScale()) == string("0.000000"));
122 /* check GetQuantizationOffset */
123 CHECK(executor->GetQuantizationOffset() == 0);
124 /* check GetQuantizationScale */
125 CHECK(executor->GetOutputQuantizationScale(0) == 0.0f);
126 /* check GetOutputQuantizationOffset */
127 CHECK(executor->GetOutputQuantizationOffset(0) == 0);
128
129}