Pavel Macenauer | 59e057f | 2020-04-15 14:17:26 +0000 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
Pavel Macenauer | d0fedae | 2020-04-15 14:52:57 +0000 | [diff] [blame] | 2 | # Copyright 2020 NXP |
| 3 | # SPDX-License-Identifier: MIT |
| 4 | |
| 5 | import pyarmnn as ann |
| 6 | import numpy as np |
| 7 | from PIL import Image |
| 8 | import example_utils as eu |
| 9 | |
| 10 | |
| 11 | def preprocess_onnx(img: Image, width: int, height: int, data_type, scale: float, mean: list, |
| 12 | stddev: list): |
| 13 | """Preprocessing function for ONNX imagenet models based on: |
| 14 | https://github.com/onnx/models/blob/master/vision/classification/imagenet_inference.ipynb |
| 15 | |
| 16 | Args: |
| 17 | img (PIL.Image): Loaded PIL.Image |
| 18 | width (int): Target image width |
| 19 | height (int): Target image height |
| 20 | data_type: Image datatype (np.uint8 or np.float32) |
| 21 | scale (float): Scaling factor |
| 22 | mean: RGB mean values |
| 23 | stddev: RGB standard deviation |
| 24 | |
| 25 | Returns: |
| 26 | np.array: Preprocess image as Numpy array |
| 27 | """ |
| 28 | img = img.resize((256, 256), Image.BILINEAR) |
| 29 | # first rescale to 256,256 and then center crop |
| 30 | left = (256 - width) / 2 |
| 31 | top = (256 - height) / 2 |
| 32 | right = (256 + width) / 2 |
| 33 | bottom = (256 + height) / 2 |
| 34 | img = img.crop((left, top, right, bottom)) |
| 35 | img = img.convert('RGB') |
| 36 | img = np.array(img) |
| 37 | img = np.reshape(img, (-1, 3)) # reshape to [RGB][RGB]... |
| 38 | img = ((img / scale) - mean) / stddev |
| 39 | # NHWC to NCHW conversion, by default NHWC is expected |
| 40 | # image is loaded as [RGB][RGB][RGB]... transposing it makes it [RRR...][GGG...][BBB...] |
| 41 | img = np.transpose(img) |
| 42 | img = img.flatten().astype(data_type) # flatten into a 1D tensor and convert to float32 |
| 43 | return img |
| 44 | |
| 45 | |
| 46 | if __name__ == "__main__": |
| 47 | # Download resources |
| 48 | kitten_filename = eu.download_file('https://s3.amazonaws.com/model-server/inputs/kitten.jpg') |
| 49 | labels_filename = eu.download_file('https://s3.amazonaws.com/onnx-model-zoo/synset.txt') |
| 50 | model_filename = eu.download_file( |
| 51 | 'https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.onnx') |
| 52 | |
| 53 | # Create a network from a model file |
| 54 | net_id, parser, runtime = eu.create_onnx_network(model_filename) |
| 55 | |
| 56 | # Load input information from the model and create input tensors |
| 57 | input_binding_info = parser.GetNetworkInputBindingInfo("data") |
| 58 | |
| 59 | # Load output information from the model and create output tensors |
| 60 | output_binding_info = parser.GetNetworkOutputBindingInfo("mobilenetv20_output_flatten0_reshape0") |
| 61 | output_tensors = ann.make_output_tensors([output_binding_info]) |
| 62 | |
| 63 | # Load labels |
| 64 | labels = eu.load_labels(labels_filename) |
| 65 | |
| 66 | # Load images and resize to expected size |
| 67 | image_names = [kitten_filename] |
| 68 | images = eu.load_images(image_names, |
| 69 | 224, 224, |
| 70 | np.float32, |
| 71 | 255.0, |
| 72 | [0.485, 0.456, 0.406], |
| 73 | [0.229, 0.224, 0.225], |
| 74 | preprocess_onnx) |
| 75 | |
| 76 | for idx, im in enumerate(images): |
| 77 | # Create input tensors |
| 78 | input_tensors = ann.make_input_tensors([input_binding_info], [im]) |
| 79 | |
| 80 | # Run inference |
| 81 | print("Running inference on '{0}' ...".format(image_names[idx])) |
| 82 | runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) |
| 83 | |
| 84 | # Process output |
| 85 | out_tensor = ann.workload_tensors_to_ndarray(output_tensors)[0][0] |
| 86 | results = np.argsort(out_tensor)[::-1] |
| 87 | eu.print_top_n(5, results, labels, out_tensor) |