blob: a3b606ddf169ea6609778d5ae1b927212fc4da23 [file] [log] [blame]
/*
* Copyright (c) 2021 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef USECASE_COMMON_UTILS_HPP
#define USECASE_COMMON_UTILS_HPP
#include "hal.h"
#include "Model.hpp"
#include "AppContext.hpp"
#include "Profiler.hpp"
#include "UseCaseHandler.hpp" /* Handlers for different user options. */
#include "Classifier.hpp" /* Classifier. */
#include "InputFiles.hpp"
#include <inttypes.h>
/* Helper macro to convert RGB888 to RGB565 format. */
#define RGB888_TO_RGB565(R8,G8,B8) ((((R8>>3) & 0x1F) << 11) | \
(((G8>>2) & 0x3F) << 5) | \
((B8>>3) & 0x1F))
constexpr uint16_t COLOR_BLACK = 0;
constexpr uint16_t COLOR_GREEN = RGB888_TO_RGB565( 0, 255, 0); // 2016;
constexpr uint16_t COLOR_YELLOW = RGB888_TO_RGB565(255, 255, 0); // 65504;
void DisplayCommonMenu();
namespace image{
/**
* @brief Helper function to convert a UINT8 image to INT8 format.
* @param[in,out] data Pointer to the data start.
* @param[in] kMaxImageSize Total number of pixels in the image.
**/
void ConvertImgToInt8(void * data, size_t kMaxImageSize);
/**
* @brief Presents inference results using the data presentation
* object.
* @param[in] platform Reference to the hal platform object.
* @param[in] results Vector of classification results to be displayed.
* @return true if successful, false otherwise.
**/
bool PresentInferenceResult(hal_platform & platform,
const std::vector < arm::app::ClassificationResult > & results);
/**
* @brief Presents inference results along with the inference time using the data presentation
* object.
* @param[in] platform Reference to the hal platform object.
* @param[in] results Vector of classification results to be displayed.
* @param[in] results Inference time in ms.
* @return true if successful, false otherwise.
**/
bool PresentInferenceResult(hal_platform & platform,
const std::vector < arm::app::ClassificationResult > & results,
const time_t infTimeMs);
/**
* @brief Presents inference results along with the inference time using the data presentation
* object.
* @param[in] platform Reference to the hal platform object.
* @param[in] results Vector of classification results to be displayed.
* @param[in] results Inference time in ms.
* @return true if successful, false otherwise.
**/
bool PresentInferenceResult(hal_platform & platform,
const std::vector < arm::app::ClassificationResult > & results,
bool profilingEnabled,
const time_t infTimeMs = 0);
}
/**
* @brief Helper function to increment current input feature vector index.
* @param[in,out] ctx Pointer to the application context object.
* @param[in] useCase Use case name
**/
void IncrementAppCtxIfmIdx(arm::app::ApplicationContext& ctx, std::string useCase);
/**
* @brief Helper function to set the input feature map index.
* @param[in,out] ctx Pointer to the application context object.
* @param[in] idx Value to be set.
* @param[in] ctxIfmName Input Feature Map name
* @return true if index is set, false otherwise.
**/
bool SetAppCtxIfmIdx(arm::app::ApplicationContext& ctx, uint32_t idx, std::string ctxIfmName);
namespace common {
enum OPCODES {
MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */
MENU_OPT_RUN_INF_ALL, /* Run inference on all. */
MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */
MENU_OPT_LIST_IFM /* List the current IFM. */
};
}
namespace arm {
namespace app {
/**
* @brief Run inference using given model
* object. If profiling is enabled, it will log the
* statistics too.
* @param[in] model Reference to the initialised model.
* @param[in] profiler Reference to the initialised profiler.
* @return true if inference succeeds, false otherwise.
**/
bool RunInference(arm::app::Model& model, Profiler& profiler);
/**
* @brief Read input and return as an integer.
* @param[in] platform Reference to the hal platform object.
* @return Integer value corresponding to the user input.
**/
int ReadUserInputAsInt(hal_platform& platform);
#if VERIFY_TEST_OUTPUT
/**
* @brief Helper function to dump a tensor to stdout
* @param[in] tensor tensor to be dumped
* @param[in] lineBreakForNumElements number of elements
* after which line break will be added.
**/
void DumpTensor(const TfLiteTensor* tensor,
size_t lineBreakForNumElements = 16);
void DumpTensorData(const uint8_t* tensorData,
size_t size,
size_t lineBreakForNumElements = 16);
#endif /* VERIFY_TEST_OUTPUT */
/**
* @brief List the files baked in the application.
* @param[in] ctx Reference to the application context.
* @return true or false based on event being handled.
**/
bool ListFilesHandler(ApplicationContext& ctx);
} /* namespace app */
} /* namespace arm */
#endif /* USECASE_COMMON_UTILS_HPP */