blob: eb49e654035c6256369208bd3f56bfee28683664 [file] [log] [blame]
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
import tensorflow as tf
import tempfile
import tosa_checker
@pytest.fixture(scope="module")
def build_tosa_non_compat_model():
num_boxes = 6
max_output_size = 5
iou_threshold = 0.5
score_threshold = 0.1
def non_max_suppression(x):
boxes = x[0]
scores = x[1]
output = tf.image.non_max_suppression_with_scores(
boxes[0],
scores[0],
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
soft_nms_sigma=1.0,
)
return output
boxes_in = tf.keras.layers.Input(
shape=(num_boxes, 4), batch_size=1, dtype=tf.float32, name="boxes"
)
scores_in = tf.keras.layers.Input(
shape=(num_boxes), batch_size=1, dtype=tf.float32, name="scores"
)
outputs = tf.keras.layers.Lambda(non_max_suppression)([boxes_in, scores_in])
model = tf.keras.models.Model(inputs=[boxes_in, scores_in], outputs=outputs)
return model
@pytest.fixture(scope="module")
def build_tosa_compat_model():
input = tf.keras.layers.Input(shape=(16,))
x = tf.keras.layers.Dense(8, activation="relu")(input)
model = tf.keras.models.Model(inputs=[input], outputs=x)
return model
def create_tflite(model):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
return tflite_model
@pytest.fixture(scope="module")
def non_compat_file(build_tosa_non_compat_model):
tflite_model = create_tflite(build_tosa_non_compat_model)
with tempfile.TemporaryDirectory() as tmp_dir:
file = os.path.join(tmp_dir, "test.tflite")
open(file, "wb").write(tflite_model)
yield file
@pytest.fixture(scope="module")
def compat_file(build_tosa_compat_model):
tflite_model = create_tflite(build_tosa_compat_model)
with tempfile.TemporaryDirectory() as tmp_dir:
file = os.path.join(tmp_dir, "test.tflite")
open(file, "wb").write(tflite_model)
yield file
class TestTosaCompatibilityTool:
def test_bad_tflite_file(self):
make_bad_tfile = os.path.join(tempfile.mkdtemp(), "test.tflite")
open(make_bad_tfile, "wb").write("bad tflite file".encode("ASCII"))
with pytest.raises(RuntimeError):
checker = tosa_checker.TOSAChecker(model_path=make_bad_tfile)
def test_tosa_non_compat_model(self, non_compat_file):
checker = tosa_checker.TOSAChecker(model_path=non_compat_file)
tosa_compatible = checker.is_tosa_compatible()
assert tosa_compatible == False
ops = checker._get_tosa_compatibility_for_ops()
assert type(ops) == list
assert [[op.name, op.is_tosa_compatible] for op in ops] == [
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.strided_slice", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.strided_slice", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.pseudo_const", True],
["tfl.non_max_suppression_v5", False],
]
tosa_ops = checker._get_used_tosa_ops()
assert type(tosa_ops) == list
assert [[op.name, op.is_tosa_compatible] for op in tosa_ops] == [
["tosa.const", True],
["tosa.const", True],
["tosa.const", True],
["tosa.const", True],
["tosa.reshape", True],
["tosa.reshape", True],
]
def test_tosa_compat_model(self, compat_file):
checker = tosa_checker.TOSAChecker(model_path=compat_file)
tosa_compatible = checker.is_tosa_compatible()
assert tosa_compatible == True
ops = checker._get_tosa_compatibility_for_ops()
assert type(ops) == list
assert [[op.name, op.is_tosa_compatible] for op in ops] == [
["tfl.pseudo_const", True],
["tfl.no_value", True],
["tfl.fully_connected", True],
]
tosa_ops = checker._get_used_tosa_ops()
assert type(tosa_ops) == list
assert [[op.name, op.is_tosa_compatible] for op in tosa_ops] == [
["tosa.const", True],
["tosa.const", True],
["tosa.fully_connected", True],
["tosa.clamp", True],
]
def test_tosa_non_compat_model_mlir_representation(self, non_compat_file):
checker = tosa_checker.TOSAChecker(model_path=non_compat_file)
tfl_mlir_representation = checker._get_mlir_model_representation(
elide_large_elements_attrs=True
)
expected_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["lambda_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tfl.pseudo_const"() {value = dense<0> : tensor<3xi32>} : () -> tensor<3xi32>
%1 = "tfl.pseudo_const"() {value = dense<[1, 6, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%2 = "tfl.pseudo_const"() {value = dense<1> : tensor<3xi32>} : () -> tensor<3xi32>
%3 = "tfl.strided_slice"(%arg0, %0, %1, %2) {begin_mask = 6 : i32, ellipsis_mask = 0 : i32, end_mask = 6 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<1x6x4xf32>, tensor<3xi32>, tensor<3xi32>, tensor<3xi32>) -> tensor<6x4xf32>
%4 = "tfl.pseudo_const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32>
%5 = "tfl.pseudo_const"() {value = dense<[1, 6]> : tensor<2xi32>} : () -> tensor<2xi32>
%6 = "tfl.pseudo_const"() {value = dense<1> : tensor<2xi32>} : () -> tensor<2xi32>
%7 = "tfl.strided_slice"(%arg1, %4, %5, %6) {begin_mask = 2 : i32, ellipsis_mask = 0 : i32, end_mask = 2 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<1x6xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<6xf32>
%8 = "tfl.pseudo_const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
%9 = "tfl.pseudo_const"() {value = dense<5.000000e-01> : tensor<f32>} : () -> tensor<f32>
%10 = "tfl.pseudo_const"() {value = dense<1.000000e-01> : tensor<f32>} : () -> tensor<f32>
%11 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<f32>} : () -> tensor<f32>
%selected_indices, %selected_scores, %valid_outputs = "tfl.non_max_suppression_v5"(%3, %7, %8, %9, %10, %11) : (tensor<6x4xf32>, tensor<6xf32>, tensor<i32>, tensor<f32>, tensor<f32>, tensor<f32>) -> (tensor<?xi32>, tensor<?xf32>, tensor<*xi32>)
return %selected_scores, %selected_indices : tensor<?xf32>, tensor<?xi32>
}
}
"""
assert tfl_mlir_representation == expected_mlir_representation
tosa_mlir_representation = checker._get_mlir_tosa_model_representation(
elide_large_elements_attrs=True
)
expected_tosa_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
func @main(%arg0: tensor<1x6x4xf32> {tf_saved_model.index_path = ["boxes"]}, %arg1: tensor<1x6xf32> {tf_saved_model.index_path = ["scores"]}) -> (tensor<?xf32> {tf_saved_model.index_path = ["lambda_1"]}, tensor<?xi32> {tf_saved_model.index_path = ["lambda"]}) attributes {tf.entry_function = {inputs = "serving_default_boxes:0,serving_default_scores:0", outputs = "PartitionedCall:1,PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tosa.const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
%1 = "tosa.const"() {value = dense<5.000000e-01> : tensor<f32>} : () -> tensor<f32>
%2 = "tosa.const"() {value = dense<1.000000e-01> : tensor<f32>} : () -> tensor<f32>
%3 = "tosa.const"() {value = dense<1.000000e+00> : tensor<f32>} : () -> tensor<f32>
%4 = "tosa.reshape"(%arg0) {new_shape = [6, 4]} : (tensor<1x6x4xf32>) -> tensor<6x4xf32>
%5 = "tosa.reshape"(%arg1) {new_shape = [6]} : (tensor<1x6xf32>) -> tensor<6xf32>
%selected_indices, %selected_scores, %valid_outputs = "tfl.non_max_suppression_v5"(%4, %5, %0, %1, %2, %3) : (tensor<6x4xf32>, tensor<6xf32>, tensor<i32>, tensor<f32>, tensor<f32>, tensor<f32>) -> (tensor<?xi32>, tensor<?xf32>, tensor<*xi32>)
return %selected_scores, %selected_indices : tensor<?xf32>, tensor<?xi32>
}
}
"""
assert tosa_mlir_representation == expected_tosa_mlir_representation
def test_tosa_compat_model_mlir_representation(self, compat_file):
checker = tosa_checker.TOSAChecker(model_path=compat_file)
tfl_mlir_representation = checker._get_mlir_model_representation(
elide_large_elements_attrs=True
)
expected_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input_1"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tfl.pseudo_const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32>
%1 = "tfl.no_value"() {value} : () -> none
%2 = "tfl.fully_connected"(%arg0, %0, %1) {asymmetric_quantize_inputs = false, fused_activation_function = "RELU", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<?x16xf32>, tensor<8x16xf32>, none) -> tensor<?x8xf32>
return %2 : tensor<?x8xf32>
}
}
"""
assert tfl_mlir_representation == expected_mlir_representation
tosa_mlir_representation = checker._get_mlir_tosa_model_representation(
elide_large_elements_attrs=True
)
expected_tosa_mlir_representation = """\
module attributes {tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32} {
func @main(%arg0: tensor<?x16xf32> {tf_saved_model.index_path = ["input_1"]}) -> (tensor<?x8xf32> {tf_saved_model.index_path = ["dense"]}) attributes {tf.entry_function = {inputs = "serving_default_input_1:0", outputs = "StatefulPartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
%0 = "tosa.const"() {value = opaque<"elided_large_const", "0xDEADBEEF"> : tensor<8x16xf32>} : () -> tensor<8x16xf32>
%1 = "tosa.const"() {value = dense<0.000000e+00> : tensor<8xf32>} : () -> tensor<8xf32>
%2 = "tosa.fully_connected"(%arg0, %0, %1) : (tensor<?x16xf32>, tensor<8x16xf32>, tensor<8xf32>) -> tensor<?x8xf32>
%3 = "tosa.clamp"(%2) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<?x8xf32>) -> tensor<?x8xf32>
return %3 : tensor<?x8xf32>
}
}
"""
assert tosa_mlir_representation == expected_tosa_mlir_representation