Remove parsing of EthosN profiling data, too specific for pyarmnn

Signed-off-by: Nina Drozd <nina.drozd@arm.com>
Change-Id: I04d82dd3a9759c660fdb908433becda915a3f5b5
diff --git a/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py b/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py
index 6988c52..c6e0718 100644
--- a/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py
+++ b/python/pyarmnn/src/pyarmnn/_utilities/profiling_helper.py
@@ -5,11 +5,6 @@
 """
 import json
 from collections import namedtuple
-from operator import itemgetter
-import itertools
-
-"""Profiling data is in cycles, to get duration in us, divide by clock frequency. Expected clock frequency is 5 MHz."""
-ClockFrequencyDivider = 5
 
 ProfilerData = namedtuple('ProfilerData', ['inference_data', 'per_workload_execution_data'])
 ProfilerData.__doc__ = """Container to hold the profiling inference data, and the profiling data per workload.
@@ -45,14 +40,12 @@
 """
 
 
-def get_profiling_data(profiler: 'IProfiler', backends) -> ProfilerData:
-    """Reads IProfiler object passed in, extracts the relevant data.
-        If EthosNAcc backend is enabled and trace.json profiling file present
-        adds EthosN profiling data and returns all profiling data in a ProfilerData container.
+def get_profiling_data(profiler: 'IProfiler') -> ProfilerData:
+    """Reads IProfiler object passed in, extracts the relevant data
+        and returns it in a ProfilerData container.
 
         Args:
             profiler (IProfiler): The IProfiler object to be parsed.
-            backends: List of preferred backends.
 
         Returns:
             ProfilerData: A container containing the relevant data extracted from the Profiler output.
@@ -77,62 +70,8 @@
         if exec_key.startswith("Wall clock time_#") and exec_value["type"] == "Measurement":
             time_data = __get_wall_clock_times__(exec_value)
             inference_data.update(time_data)
-        ethosn_backend = [backend for backend in backends if "EthosNAcc" == str(backend)]
-        if ethosn_backend:
-            ethosn_profiling_data = get_ethosn_profiling_data()
-            if ethosn_profiling_data:
-                workload_data.update(ethosn_profiling_data)
-
     return ProfilerData(inference_data=inference_data, per_workload_execution_data=workload_data)
 
-def get_ethosn_profiling_data(profiling_json_file = 'trace.json'):
-    """If profiling is enabled, profiling data will be recorded in the current directory in trace.json file.
-    Read the trace.json file to get timings and operation names.
-
-    Args:
-        profiling_json_file (str): Name of profiling json file, defaults to trace.json created in current directory.
-
-    Returns:
-        dictionary containing EthosN workload_data of the same structure as per_workload_execution_data.
-            Each operation has
-            'time_unit' - timer units.
-            'execution_time' - list of total execution times for each inference run.
-            'backend' - backend used for this operation.
-"""
-    try:
-        with open(profiling_json_file, 'r') as trace_file:
-            json_objects = json.loads(trace_file.read())
-
-            # Filter python objects with list comprehensions
-            per_workload_execution_data = {}
-            commands = [command for command in json_objects if command['name'].startswith("COMMAND")]
-
-            mce_ple_commands = [mce_ple_command for mce_ple_command in commands
-                                if "OPERATION_MCE_PLE" in mce_ple_command['args']['command_xml'].keys()]
-            per_workload_execution_data.update(__get_command_timings_with_op_info__(mce_ple_commands,
-                                                                                    "OPERATION_MCE_PLE", "MCE_OP_INFO"))
-
-            ple_only_commands = [mce_ple_command for mce_ple_command in commands
-                                if "OPERATION_PLE_ONLY" in mce_ple_command['args']['command_xml'].keys()]
-            per_workload_execution_data.update(__get_command_timings_with_op_info__(ple_only_commands,
-                                                                                "OPERATION_PLE_ONLY", "PLE_OP_INFO"))
-
-            other_command_names = {"OPERATION_SOFTMAX", "OPERATION_CONVERT", "OPERATION_DUMP_DRAM",
-                                   "OPERATION_DUMP_SRAM", "OPERATION_FENCE", "OPERATION_SECTION", "OPERATION_DELAY"}
-
-            for command_name in other_command_names:
-                commands_to_parse = [command for command in commands
-                                     if command_name in command['args']['command_xml'].keys()]
-                per_workload_execution_data.update(__get_command_timings__(commands_to_parse, command_name))
-
-            return per_workload_execution_data
-    except FileNotFoundError:
-        print("EthosN profiling file not found, not adding profiling data:", profiling_json_file)
-        return None
-    except Exception as e:
-        print("Got exception while trying to parse EthosN profiling data:", e)
-        return None
-
 
 def __get_wall_clock_times__(wall_clock_item):
     execution_times = wall_clock_item["raw"]
@@ -155,40 +94,4 @@
     elif "ethos" in exec_key.lower():
         return "EthosNAcc"
     else:
-        return "Unknown"
-
-def __get_command_timings_with_op_info__(json_objects, operation_name, op_info_name):
-    commands_data = {}
-    sorted_objects = sorted(json_objects, key=itemgetter('name'))
-    for key, group in itertools.groupby(sorted_objects, key=lambda x:x['name']):
-        command_objects = list(group)
-        time_data = {"time_unit": 'us'}
-        raw_data = []
-        for command_object in command_objects:
-            duration = ( command_object['ts_end'] - command_object['ts_start'] ) / ClockFrequencyDivider
-            raw_data.append(duration)
-            time_data["execution_time"] = raw_data
-        mce_ple_operation_name = command_objects[0]['args']['command_xml'][operation_name][op_info_name]['OPERATION']
-        layer_name = "EthosnCommand#" + str(command_objects[0]['args']['command_idx']) + "_" + \
-                     mce_ple_operation_name.capitalize()
-        time_data["backend"] = __get_backend(layer_name)
-        commands_data[layer_name] = time_data
-    return commands_data
-
-def __get_command_timings__(json_objects, operation_name):
-    commands_data = {}
-    sorted_objects = sorted(json_objects, key=itemgetter('name'))
-    for key, group in itertools.groupby(sorted_objects, key=lambda x:x['name']):
-        command_objects = list(group)
-        time_data = {"time_unit": 'us'}
-        raw_data = []
-        for command_object in command_objects:
-            # Profiling data is in cycles, to get duration in us, divide by clock frequency
-            duration = ( command_object['ts_end'] - command_object['ts_start'] ) / ClockFrequencyDivider
-            raw_data.append(duration)
-            time_data["execution_time"] = raw_data
-        layer_name = "EthosnCommand#" + str(command_objects[0]['args']['command_idx']) + "_" + \
-                     operation_name.capitalize()
-        time_data["backend"] = __get_backend(layer_name)
-        commands_data[layer_name] = time_data
-    return commands_data
\ No newline at end of file
+        return "Unknown"
\ No newline at end of file
diff --git a/python/pyarmnn/test/test_profiling_utilities.py b/python/pyarmnn/test/test_profiling_utilities.py
index 9fb7fd8..642d293 100644
--- a/python/pyarmnn/test/test_profiling_utilities.py
+++ b/python/pyarmnn/test/test_profiling_utilities.py
@@ -24,9 +24,7 @@
 
 
 def test_inference_exec(mock_profiler):
-    preferred_backends = [ann.BackendId('CpuRef'), ann.BackendId('CpuAcc'),
-                          ann.BackendId('GpuAcc'), ann.BackendId('EthosNAcc')]
-    profiling_data_obj = ann.get_profiling_data(mock_profiler, preferred_backends)
+    profiling_data_obj = ann.get_profiling_data(mock_profiler)
 
     assert (len(profiling_data_obj.inference_data) > 0)
     assert (len(profiling_data_obj.per_workload_execution_data) > 0)
@@ -62,11 +60,9 @@
                                                                   'EthosNSomeMock4dWorkload_Execute_#8')
                                                                  ])
 def test_profiler_workloads(mock_profiler, exec_times, unit, backend, workload):
-    preferred_backends = [ann.BackendId('CpuRef'), ann.BackendId('CpuAcc'),
-                          ann.BackendId('GpuAcc'), ann.BackendId('EthosNAcc')]
-    profiling_data_obj = ann.get_profiling_data(mock_profiler, preferred_backends)
+    profiling_data_obj = ann.get_profiling_data(mock_profiler)
 
     work_load_exec = profiling_data_obj.per_workload_execution_data[workload]
     assert work_load_exec["execution_time"] == exec_times
     assert work_load_exec["time_unit"] == unit
-    assert work_load_exec["backend"] == backend
+    assert work_load_exec["backend"] == backend
\ No newline at end of file