mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-01-30 14:52:17 +00:00
cleaning up the git
This commit is contained in:
parent
f55982be23
commit
47a7970dc5
@ -0,0 +1,53 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Contains functions specific to decoding and processing inference results for SSD Mobilenet V1 models.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def ssd_processing(output: np.ndarray, confidence_threshold=0.60):
|
||||
"""
|
||||
Gets class, bounding box positions and confidence from the four outputs of the SSD model.
|
||||
|
||||
Args:
|
||||
output: Vector of outputs from network.
|
||||
confidence_threshold: Selects only strong detections above this value.
|
||||
|
||||
Returns:
|
||||
A list of detected objects in the form [class, [box positions], confidence]
|
||||
"""
|
||||
if len(output) != 4:
|
||||
raise RuntimeError('Number of outputs from SSD model does not equal 4')
|
||||
|
||||
position, classification, confidence, num_detections = [index[0] for index in output]
|
||||
|
||||
detections = []
|
||||
for i in range(int(num_detections)):
|
||||
if confidence[i] > confidence_threshold:
|
||||
class_idx = classification[i]
|
||||
box = position[i, :4]
|
||||
# Reorder positions in format [x_min, y_min, x_max, y_max]
|
||||
box[0], box[1], box[2], box[3] = box[1], box[0], box[3], box[2]
|
||||
confidence_value = confidence[i]
|
||||
detections.append((class_idx, box, confidence_value))
|
||||
return detections
|
||||
|
||||
|
||||
def ssd_resize_factor(video: cv2.VideoCapture):
|
||||
"""
|
||||
Gets a multiplier to scale the bounding box positions to
|
||||
their correct position in the frame.
|
||||
|
||||
Args:
|
||||
video: Video capture object, contains information about data source.
|
||||
|
||||
Returns:
|
||||
Resizing factor to scale box coordinates to output frame size.
|
||||
"""
|
||||
frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
return max(frame_height, frame_width)
|
@ -0,0 +1,98 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Contains functions specific to decoding and processing inference results for YOLO V3 Tiny models.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
def iou(box1: list, box2: list):
|
||||
"""
|
||||
Calculates the intersection-over-union (IoU) value for two bounding boxes.
|
||||
|
||||
Args:
|
||||
box1: Array of positions for first bounding box
|
||||
in the form [x_min, y_min, x_max, y_max].
|
||||
box2: Array of positions for second bounding box.
|
||||
|
||||
Returns:
|
||||
Calculated intersection-over-union (IoU) value for two bounding boxes.
|
||||
"""
|
||||
area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
|
||||
area_box2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
|
||||
|
||||
if area_box1 <= 0 or area_box2 <= 0:
|
||||
iou_value = 0
|
||||
else:
|
||||
y_min_intersection = max(box1[1], box2[1])
|
||||
x_min_intersection = max(box1[0], box2[0])
|
||||
y_max_intersection = min(box1[3], box2[3])
|
||||
x_max_intersection = min(box1[2], box2[2])
|
||||
|
||||
area_intersection = max(0, y_max_intersection - y_min_intersection) *\
|
||||
max(0, x_max_intersection - x_min_intersection)
|
||||
area_union = area_box1 + area_box2 - area_intersection
|
||||
|
||||
try:
|
||||
iou_value = area_intersection / area_union
|
||||
except ZeroDivisionError:
|
||||
iou_value = 0
|
||||
|
||||
return iou_value
|
||||
|
||||
|
||||
def yolo_processing(output: np.ndarray, confidence_threshold=0.40, iou_threshold=0.40):
|
||||
"""
|
||||
Performs non-maximum suppression on input detections. Any detections
|
||||
with IOU value greater than given threshold are suppressed.
|
||||
|
||||
Args:
|
||||
output: Vector of outputs from network.
|
||||
confidence_threshold: Selects only strong detections above this value.
|
||||
iou_threshold: Filters out boxes with IOU values above this value.
|
||||
|
||||
Returns:
|
||||
A list of detected objects in the form [class, [box positions], confidence]
|
||||
"""
|
||||
if len(output) != 1:
|
||||
raise RuntimeError('Number of outputs from YOLO model does not equal 1')
|
||||
|
||||
# Find the array index of detections with confidence value above threshold
|
||||
confidence_det = output[0][:, :, 4][0]
|
||||
detections = list(np.where(confidence_det > confidence_threshold)[0])
|
||||
all_det, nms_det = [], []
|
||||
|
||||
# Create list of all detections above confidence threshold
|
||||
for d in detections:
|
||||
box_positions = list(output[0][:, d, :4][0])
|
||||
confidence_score = output[0][:, d, 4][0]
|
||||
class_idx = np.argmax(output[0][:, d, 5:])
|
||||
all_det.append((class_idx, box_positions, confidence_score))
|
||||
|
||||
# Suppress detections with IOU value above threshold
|
||||
while all_det:
|
||||
element = int(np.argmax([all_det[i][2] for i in range(len(all_det))]))
|
||||
nms_det.append(all_det.pop(element))
|
||||
all_det = [*filter(lambda x: (iou(x[1], nms_det[-1][1]) <= iou_threshold), [det for det in all_det])]
|
||||
return nms_det
|
||||
|
||||
|
||||
def yolo_resize_factor(video: cv2.VideoCapture, input_binding_info: tuple):
|
||||
"""
|
||||
Gets a multiplier to scale the bounding box positions to
|
||||
their correct position in the frame.
|
||||
|
||||
Args:
|
||||
video: Video capture object, contains information about data source.
|
||||
input_binding_info: Contains shape of model input layer.
|
||||
|
||||
Returns:
|
||||
Resizing factor to scale box coordinates to output frame size.
|
||||
"""
|
||||
frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
model_height, model_width = list(input_binding_info[1].GetShape())[1:3]
|
||||
return max(frame_height, frame_width) / max(model_height, model_width)
|
@ -0,0 +1,175 @@
|
||||
# Automatic Speech Recognition with PyArmNN
|
||||
|
||||
This sample application guides the user to perform automatic speech recognition (ASR) with PyArmNN API.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### PyArmNN
|
||||
|
||||
Before proceeding to the next steps, make sure that you have successfully installed the newest version of PyArmNN on your system by following the instructions in the README of the PyArmNN root directory.
|
||||
|
||||
You can verify that PyArmNN library is installed and check PyArmNN version using:
|
||||
|
||||
```bash
|
||||
$ pip show pyarmnn
|
||||
```
|
||||
|
||||
You can also verify it by running the following and getting output similar to below:
|
||||
|
||||
```bash
|
||||
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
|
||||
'28.0.0'
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
Install the PortAudio package:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get install libsndfile1 libportaudio2
|
||||
```
|
||||
|
||||
Install the required Python modules:
|
||||
|
||||
```bash
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Model
|
||||
The model we are using is the [Wav2Letter](https://github.com/ARM-software/ML-zoo/tree/master/models/speech_recognition/wav2letter/tflite_int8 ) which can be found in the [Arm Model Zoo repository](
|
||||
https://github.com/ARM-software/ML-zoo/tree/master/models).
|
||||
|
||||
A small selection of suitable wav files containing human speech can be found [here](https://github.com/Azure-Samples/cognitive-services-speech-sdk/tree/master/sampledata/audiofiles).
|
||||
|
||||
Labels for this model are defined within run_audio_file.py.
|
||||
|
||||
## Performing Automatic Speech Recognition
|
||||
|
||||
### Processing Audio Files
|
||||
|
||||
Please ensure that your audio file has a sampling rate of 16000Hz.
|
||||
|
||||
To run ASR on an audio file, use the following command:
|
||||
|
||||
```bash
|
||||
$ python run_audio_file.py --audio_file_path <path/to/your_audio> --model_file_path <path/to/your_model>
|
||||
```
|
||||
|
||||
You may also add the optional flags:
|
||||
|
||||
* `--preferred_backends`
|
||||
|
||||
* Takes the preferred backends in preference order, separated by whitespace. For example, passing in "CpuAcc CpuRef" will be read as list ["CpuAcc", "CpuRef"] (defaults to this list)
|
||||
|
||||
* CpuAcc represents the CPU backend
|
||||
|
||||
* GpuAcc represents the GPU backend
|
||||
|
||||
* CpuRef represents the CPU reference kernels
|
||||
|
||||
* `--help` prints all available options to screen
|
||||
|
||||
## Application Overview
|
||||
|
||||
1. [Initialization](#initialization)
|
||||
|
||||
2. [Creating a network](#creating-a-network)
|
||||
|
||||
3. [Automatic speech recognition pipeline](#automatic-speech-recognition-pipeline)
|
||||
|
||||
### Initialization
|
||||
|
||||
The application parses the supplied user arguments and loads the audio file in chunks through the `capture_audio()` method which accepts sampling criteria as an `AudioCaptureParams` tuple.
|
||||
|
||||
With ASR from an audio file, the application will create a generator object to yield blocks of audio data from the file with a minimum sample size defined in AudioCaptureParams.
|
||||
|
||||
MFCC features are extracted from each block based on criteria defined in the `MFCCParams` tuple.
|
||||
these extracted features constitute the input tensors for the model.
|
||||
|
||||
To interpret the inference result of the loaded network; the application passes the label dictionary defined in run_audio_file.py to a decoder and displays the result.
|
||||
|
||||
### Creating a network
|
||||
|
||||
A PyArmNN application must import a graph from file using an appropriate parser. Arm NN provides parsers for various model file types, including TFLite and ONNX. These parsers are libraries for loading neural networks of various formats into the Arm NN runtime.
|
||||
|
||||
Arm NN supports optimized execution on multiple CPU, GPU, and Ethos-N devices. Before executing a graph, the application must select the appropriate device context by using `IRuntime()` to create a runtime context with default options. We can optimize the imported graph by specifying a list of backends in order of preference and implementing backend-specific optimizations, identified by a unique string, for example CpuAcc, GpuAcc, CpuRef represent the accelerated CPU and GPU backends and the CPU reference kernels respectively.
|
||||
|
||||
Arm NN splits the entire graph into subgraphs based on these backends. Each subgraph is then optimized, and the corresponding subgraph in the original graph is substituted with its optimized version.
|
||||
|
||||
The `Optimize()` function optimizes the graph for inference, then `LoadNetwork()` loads the optimized network onto the compute device. The `LoadNetwork()` function also creates the backend-specific workloads for the layers and a backend-specific workload factory.
|
||||
|
||||
Parsers extract the input information for the network. The `GetSubgraphInputTensorNames()` function extracts all the input names and the `GetNetworkInputBindingInfo()` function obtains the input binding information of the graph. The input binding information contains all the essential information about the input. This information is a tuple consisting of integer identifiers for bindable layers and tensor information (data type, quantization info, dimension count, total elements).
|
||||
|
||||
Similarly, we can get the output binding information for an output layer by using the parser to retrieve output tensor names and calling the `GetNetworkOutputBindingInfo()` function
|
||||
|
||||
For this application, the main point of contact with PyArmNN is through the `ArmnnNetworkExecutor` class, which will handle the network creation step for you.
|
||||
|
||||
```python
|
||||
# common/network_executor.py
|
||||
# The provided wav2letter model is in .tflite format so we use TfLiteParser() to import the graph
|
||||
if ext == '.tflite':
|
||||
parser = ann.ITfLiteParser()
|
||||
network = parser.CreateNetworkFromBinaryFile(model_file)
|
||||
...
|
||||
# Optimize the network for the list of preferred backends
|
||||
opt_network, messages = ann.Optimize(
|
||||
network, preferred_backends, self.runtime.GetDeviceSpec(), ann.OptimizerOptions()
|
||||
)
|
||||
# Load the optimized network onto the runtime device
|
||||
self.network_id, _ = self.runtime.LoadNetwork(opt_network)
|
||||
# Get the input and output binding information
|
||||
self.input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
|
||||
self.output_binding_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
|
||||
```
|
||||
|
||||
### Automatic speech recognition pipeline
|
||||
Mel-frequency Cepstral Coefficients (MFCCs, [see Wikipedia](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum)) are extracted based on criteria defined in the MFCCParams tuple and associated`MFCC Class`.
|
||||
MFCCs are the result of computing the dot product of the Discrete Cosine Transform (DCT) Matrix and the log of the Mel energy.
|
||||
|
||||
The `MFCC` class is used in conjunction with the `AudioPreProcessor` class to extract and process MFCC features from a given audio frame.
|
||||
|
||||
|
||||
After all the MFCCs needed for an inference have been extracted from the audio data we convolve them with 1-dimensional Savitzky-Golay filters to compute the first and second MFCC derivatives with respect to time. The MFCCs and the derivatives constitute the input tensors that will be classified by an `ArmnnNetworkExecutor`object.
|
||||
|
||||
|
||||
```python
|
||||
# mfcc.py & wav2lettermfcc.py
|
||||
# Extract MFCC features
|
||||
log_mel_energy = np.maximum(log_mel_energy, log_mel_energy.max() - top_db)
|
||||
mfcc_feats = np.dot(self.__dct_matrix, log_mel_energy)
|
||||
...
|
||||
# Compute first and second derivatives (delta and delta-delta respectively) by passing a
|
||||
# Savitzky-Golay filter as a 1D convolution over the features
|
||||
for i in range(features.shape[1]):
|
||||
idelta = np.convolve(features[:, i], self.__savgol_order1_coeffs, 'same')
|
||||
mfcc_delta_np[:, i] = (idelta)
|
||||
ideltadelta = np.convolve(features[:, i], self.savgol_order2_coeffs, 'same')
|
||||
mfcc_delta2_np[:, i] = (ideltadelta)
|
||||
```
|
||||
|
||||
```python
|
||||
# audio_utils.py
|
||||
# Quantize the input data and create input tensors with PyArmNN
|
||||
input_tensor = quantize_input(input_tensor, input_binding_info)
|
||||
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor])
|
||||
```
|
||||
|
||||
Note: `ArmnnNetworkExecutor` has already created the output tensors for you.
|
||||
|
||||
After creating the workload tensors, the compute device performs inference for the loaded network by using the `EnqueueWorkload()` function of the runtime context. Calling the `workload_tensors_to_ndarray()` function obtains the inference results as a list of ndarrays.
|
||||
|
||||
```python
|
||||
# common/network_executor.py
|
||||
status = runtime.EnqueueWorkload(net_id, input_tensors, self.output_tensors)
|
||||
self.output_result = ann.workload_tensors_to_ndarray(self.output_tensors)
|
||||
```
|
||||
|
||||
The output from the inference must be decoded to obtain the recognised characters from the speech. A simple greedy decoder classifies the results by taking the highest element of the output as a key for the labels dictionary. The value returned is a character which is appended to a list, and the list is filtered to remove unwanted characters. The produced string is displayed on the console.
|
||||
|
||||
## Next steps
|
||||
|
||||
Having now gained a solid understanding of performing automatic speech recognition with PyArmNN, you are able to take control and create your own application. For your next steps we suggest to first implement your own network, which can be done by updating the parameters of `ModelParams` and `MfccParams` to match your custom model. The `ArmnnNetworkExecutor` class will handle the network optimisation and loading for you.
|
||||
|
||||
An important step to improving accuracy of the generated output sentences is by providing cleaner data to the network. This can be done by including additional preprocessing steps such as noise reduction of your audio data.
|
||||
|
||||
In this application, we had used a greedy decoder to decode the integer-encoded output however, better results can be achieved by implementing a beam search decoder. You may even try adding a language model at the end to aim to correct any spelling mistakes the model may produce.
|
@ -0,0 +1,82 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""Utilities for speech recognition apps."""
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def decode(model_output: np.ndarray, labels: dict) -> str:
|
||||
"""Decodes the integer encoded results from inference into a string.
|
||||
|
||||
Args:
|
||||
model_output: Results from running inference.
|
||||
labels: Dictionary of labels keyed on the classification index.
|
||||
|
||||
Returns:
|
||||
Decoded string.
|
||||
"""
|
||||
top1_results = [labels[np.argmax(row)] for row in model_output]
|
||||
return filter_characters(top1_results)
|
||||
|
||||
|
||||
def filter_characters(results: list) -> str:
|
||||
"""Filters unwanted and duplicate characters.
|
||||
|
||||
Args:
|
||||
results: List of top 1 results from inference.
|
||||
|
||||
Returns:
|
||||
Final output string to present to user.
|
||||
"""
|
||||
text = ""
|
||||
for i in range(len(results)):
|
||||
if results[i] == "$":
|
||||
continue
|
||||
elif i + 1 < len(results) and results[i] == results[i + 1]:
|
||||
continue
|
||||
else:
|
||||
text += results[i]
|
||||
return text
|
||||
|
||||
|
||||
def display_text(text: str):
|
||||
"""Presents the results on the console.
|
||||
|
||||
Args:
|
||||
text: Results of performing ASR on the input audio data.
|
||||
"""
|
||||
print(text, sep="", end="", flush=True)
|
||||
|
||||
|
||||
def decode_text(is_first_window, labels, output_result):
|
||||
"""
|
||||
Slices the text appropriately depending on the window, and decodes for wav2letter output.
|
||||
* First run, take the left context, and inner context.
|
||||
* Every other run, take the inner context.
|
||||
Stores the current right context, and updates it for each inference. Will get used after last inference.
|
||||
|
||||
Args:
|
||||
is_first_window: Boolean to show if it is the first window we are running inference on
|
||||
labels: the label set
|
||||
output_result: the output from the inference
|
||||
Returns:
|
||||
current_r_context: the current right context
|
||||
text: the current text string, with the latest output decoded and appended
|
||||
"""
|
||||
# For wav2letter with 148 output steps:
|
||||
# Left context is index 0-48, inner context 49-99, right context 100-147
|
||||
inner_context_start = 49
|
||||
inner_context_end = 99
|
||||
right_context_start = 100
|
||||
|
||||
if is_first_window:
|
||||
# Since it's the first inference, keep the left context, and inner context, and decode
|
||||
text = decode(output_result[0][0][0][0:inner_context_end], labels)
|
||||
else:
|
||||
# Only decode the inner context
|
||||
text = decode(output_result[0][0][0][inner_context_start:inner_context_end], labels)
|
||||
|
||||
# Store the right context, we will need it after the last inference
|
||||
current_r_context = decode(output_result[0][0][0][right_context_start:], labels)
|
||||
return current_r_context, text
|
@ -0,0 +1,5 @@
|
||||
numpy>=1.19.2
|
||||
soundfile>=0.10.3
|
||||
pytest==6.2.4
|
||||
pytest-allclose==1.0.0
|
||||
sounddevice==0.4.2
|
@ -0,0 +1,98 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""Automatic speech recognition with PyArmNN demo for processing audio clips to text."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from network_executor import ArmnnNetworkExecutor
|
||||
from utils import prepare_input_tensors
|
||||
from audio_capture import AudioCaptureParams, capture_audio
|
||||
from audio_utils import decode_text, display_text
|
||||
from wav2letter_mfcc import Wav2LetterMFCC, W2LAudioPreprocessor
|
||||
from mfcc import MFCCParams
|
||||
|
||||
# Model Specific Labels
|
||||
labels = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm',
|
||||
13: 'n',
|
||||
14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y',
|
||||
25: 'z',
|
||||
26: "'", 27: ' ', 28: '$'}
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser(description="ASR with PyArmNN")
|
||||
parser.add_argument(
|
||||
"--audio_file_path",
|
||||
required=True,
|
||||
type=str,
|
||||
help="Path to the audio file to perform ASR",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_file_path",
|
||||
required=True,
|
||||
type=str,
|
||||
help="Path to ASR model to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--preferred_backends",
|
||||
type=str,
|
||||
nargs="+",
|
||||
default=["CpuAcc", "CpuRef"],
|
||||
help="""List of backends in order of preference for optimizing
|
||||
subgraphs, falling back to the next backend in the list on unsupported
|
||||
layers. Defaults to [CpuAcc, CpuRef]""",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main(args):
|
||||
# Read command line args
|
||||
audio_file = args.audio_file_path
|
||||
|
||||
# Create the ArmNN inference runner
|
||||
network = ArmnnNetworkExecutor(args.model_file_path, args.preferred_backends)
|
||||
|
||||
# Specify model specific audio data requirements
|
||||
audio_capture_params = AudioCaptureParams(dtype=np.float32, overlap=31712, min_samples=47712, sampling_freq=16000,
|
||||
mono=True)
|
||||
|
||||
buffer = capture_audio(audio_file, audio_capture_params)
|
||||
|
||||
# Extract features and create the preprocessor
|
||||
|
||||
mfcc_params = MFCCParams(sampling_freq=16000, num_fbank_bins=128, mel_lo_freq=0, mel_hi_freq=8000,
|
||||
num_mfcc_feats=13, frame_len=512, use_htk_method=False, n_fft=512)
|
||||
|
||||
wmfcc = Wav2LetterMFCC(mfcc_params)
|
||||
preprocessor = W2LAudioPreprocessor(wmfcc, model_input_size=296, stride=160)
|
||||
current_r_context = ""
|
||||
is_first_window = True
|
||||
|
||||
print("Processing Audio Frames...")
|
||||
for audio_data in buffer:
|
||||
# Prepare the input Tensors
|
||||
input_tensors = prepare_input_tensors(audio_data, network.input_binding_info, preprocessor)
|
||||
|
||||
# Run inference
|
||||
output_result = network.run(input_tensors)
|
||||
|
||||
# Slice and Decode the text, and store the right context
|
||||
current_r_context, text = decode_text(is_first_window, labels, output_result)
|
||||
|
||||
is_first_window = False
|
||||
|
||||
display_text(text)
|
||||
|
||||
print(current_r_context, flush=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args)
|
@ -0,0 +1,24 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
import ntpath
|
||||
|
||||
import urllib.request
|
||||
|
||||
import pytest
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_data_folder(request):
|
||||
"""
|
||||
This fixture returns path to folder with shared test resources among asr tests
|
||||
"""
|
||||
|
||||
data_dir = os.path.join(script_dir, "testdata")
|
||||
|
||||
if not os.path.exists(data_dir):
|
||||
os.mkdir(data_dir)
|
||||
|
||||
return data_dir
|
@ -0,0 +1,29 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from context import audio_utils
|
||||
|
||||
labels = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm',
|
||||
13: 'n',
|
||||
14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y',
|
||||
25: 'z',
|
||||
26: "'", 27: ' ', 28: '$'}
|
||||
|
||||
|
||||
def test_labels(test_data_folder):
|
||||
assert len(labels) == 29
|
||||
assert labels[26] == "\'"
|
||||
assert labels[27] == r" "
|
||||
assert labels[28] == "$"
|
||||
|
||||
|
||||
def test_decoder(test_data_folder):
|
||||
|
||||
output_tensor = os.path.join(test_data_folder, "inference_output.npy")
|
||||
encoded = np.load(output_tensor)
|
||||
decoded_text = audio_utils.decode(encoded, labels)
|
||||
assert decoded_text == "my voice is my pass"
|
BIN
arch/arm/ARMnn/python/pyarmnn/examples/speech_recognition/tests/testdata/inference_output.npy
vendored
Normal file
BIN
arch/arm/ARMnn/python/pyarmnn/examples/speech_recognition/tests/testdata/inference_output.npy
vendored
Normal file
Binary file not shown.
Binary file not shown.
@ -0,0 +1,91 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
|
||||
|
||||
from mfcc import MFCC, AudioPreprocessor
|
||||
|
||||
|
||||
class Wav2LetterMFCC(MFCC):
|
||||
"""Extends base MFCC class to provide Wav2Letter-specific MFCC requirements."""
|
||||
|
||||
def __init__(self, mfcc_params):
|
||||
super().__init__(mfcc_params)
|
||||
|
||||
def spectrum_calc(self, audio_data):
|
||||
return np.abs(np.fft.rfft(np.hanning(self.mfcc_params.frame_len + 1)[0:self.mfcc_params.frame_len] * audio_data,
|
||||
self.mfcc_params.n_fft)) ** 2
|
||||
|
||||
def log_mel(self, mel_energy):
|
||||
mel_energy += 1e-10
|
||||
log_mel_energy = 10.0 * np.log10(mel_energy)
|
||||
top_db = 80.0
|
||||
return np.maximum(log_mel_energy, log_mel_energy.max() - top_db)
|
||||
|
||||
def create_dct_matrix(self, num_fbank_bins, num_mfcc_feats):
|
||||
"""
|
||||
Creates the Discrete Cosine Transform matrix to be used in the compute function.
|
||||
|
||||
Args:
|
||||
num_fbank_bins: The number of filter bank bins
|
||||
num_mfcc_feats: the number of MFCC features
|
||||
|
||||
Returns:
|
||||
the DCT matrix
|
||||
"""
|
||||
dct_m = np.zeros(num_fbank_bins * num_mfcc_feats)
|
||||
for k in range(num_mfcc_feats):
|
||||
for n in range(num_fbank_bins):
|
||||
if k == 0:
|
||||
dct_m[(k * num_fbank_bins) + n] = 2 * np.sqrt(1 / (4 * num_fbank_bins)) * np.cos(
|
||||
(np.pi / num_fbank_bins) * (n + 0.5) * k)
|
||||
else:
|
||||
dct_m[(k * num_fbank_bins) + n] = 2 * np.sqrt(1 / (2 * num_fbank_bins)) * np.cos(
|
||||
(np.pi / num_fbank_bins) * (n + 0.5) * k)
|
||||
|
||||
dct_m = np.reshape(dct_m, [self.mfcc_params.num_mfcc_feats, self.mfcc_params.num_fbank_bins])
|
||||
return dct_m
|
||||
|
||||
def mel_norm(self, weight, right_mel, left_mel):
|
||||
"""Over-riding parent class with ASR specific weight normalisation."""
|
||||
enorm = 2.0 / (self.inv_mel_scale(right_mel, False) - self.inv_mel_scale(left_mel, False))
|
||||
return weight * enorm
|
||||
|
||||
|
||||
class W2LAudioPreprocessor(AudioPreprocessor):
|
||||
|
||||
def __init__(self, mfcc, model_input_size, stride):
|
||||
self.model_input_size = model_input_size
|
||||
self.stride = stride
|
||||
|
||||
super().__init__(self, model_input_size, stride)
|
||||
# Savitzky - Golay differential filters
|
||||
self.savgol_order1_coeffs = np.array([6.66666667e-02, 5.00000000e-02, 3.33333333e-02,
|
||||
1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
|
||||
-3.33333333e-02, -5.00000000e-02, -6.66666667e-02])
|
||||
|
||||
self.savgol_order2_coeffs = np.array([0.06060606, 0.01515152, -0.01731602,
|
||||
-0.03679654, -0.04329004, -0.03679654,
|
||||
-0.01731602, 0.01515152, 0.06060606])
|
||||
self._mfcc_calc = mfcc
|
||||
|
||||
def mfcc_delta_calc(self, features):
|
||||
"""Over-riding parent class with ASR specific MFCC derivative features"""
|
||||
mfcc_delta_np = np.zeros_like(features)
|
||||
mfcc_delta2_np = np.zeros_like(features)
|
||||
|
||||
for i in range(features.shape[1]):
|
||||
idelta = np.convolve(features[:, i], self.savgol_order1_coeffs, 'same')
|
||||
mfcc_delta_np[:, i] = idelta
|
||||
ideltadelta = np.convolve(features[:, i], self.savgol_order2_coeffs, 'same')
|
||||
mfcc_delta2_np[:, i] = ideltadelta
|
||||
|
||||
features = np.concatenate((self._normalize(features), self._normalize(mfcc_delta_np),
|
||||
self._normalize(mfcc_delta2_np)), axis=1)
|
||||
|
||||
return features
|
39
arch/arm/ARMnn/python/pyarmnn/examples/tests/conftest.py
Normal file
39
arch/arm/ARMnn/python/pyarmnn/examples/tests/conftest.py
Normal file
@ -0,0 +1,39 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
import ntpath
|
||||
|
||||
import urllib.request
|
||||
import zipfile
|
||||
import pytest
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_data_folder():
|
||||
"""
|
||||
This fixture returns path to folder with shared test resources among all tests
|
||||
"""
|
||||
|
||||
data_dir = os.path.join(script_dir, "testdata")
|
||||
if not os.path.exists(data_dir):
|
||||
os.mkdir(data_dir)
|
||||
files_to_download = ["https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/messi5.jpg",
|
||||
"https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/basketball1.png",
|
||||
"https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/Megamind.avi",
|
||||
"https://github.com/ARM-software/ML-zoo/raw/master/models/object_detection/ssd_mobilenet_v1/tflite_uint8/ssd_mobilenet_v1.tflite",
|
||||
"https://git.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit.git/plain/resources/kws/samples/yes.wav",
|
||||
"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav"
|
||||
]
|
||||
|
||||
for file in files_to_download:
|
||||
path, filename = ntpath.split(file)
|
||||
file_path = os.path.join(data_dir, filename)
|
||||
if not os.path.exists(file_path):
|
||||
print("\nDownloading test file: " + file_path + "\n")
|
||||
urllib.request.urlretrieve(file, file_path)
|
||||
|
||||
|
||||
return data_dir
|
22
arch/arm/ARMnn/python/pyarmnn/examples/tests/context.py
Normal file
22
arch/arm/ARMnn/python/pyarmnn/examples/tests/context.py
Normal file
@ -0,0 +1,22 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
script_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(0, os.path.join(script_dir, '..'))
|
||||
|
||||
import common.cv_utils as cv_utils
|
||||
import common.network_executor as network_executor
|
||||
import common.utils as utils
|
||||
import common.audio_capture as audio_capture
|
||||
import common.mfcc as mfcc
|
||||
|
||||
import speech_recognition.wav2letter_mfcc as wav2letter_mfcc
|
||||
import speech_recognition.audio_utils as audio_utils
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,19 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
|
||||
from context import cv_utils
|
||||
from context import utils
|
||||
|
||||
|
||||
def test_get_source_encoding(test_data_folder):
|
||||
video_file = os.path.join(test_data_folder, "Megamind.avi")
|
||||
video, video_writer, frame_count = cv_utils.init_video_file_capture(video_file, "/tmp")
|
||||
assert cv_utils.get_source_encoding_int(video) == 1145656920
|
||||
|
||||
|
||||
def test_read_existing_labels_file(test_data_folder):
|
||||
label_file = os.path.join(test_data_folder, "labelmap.txt")
|
||||
labels_map = utils.dict_labels(label_file)
|
||||
assert labels_map is not None
|
247
arch/arm/ARMnn/python/pyarmnn/examples/tests/test_mfcc.py
Normal file
247
arch/arm/ARMnn/python/pyarmnn/examples/tests/test_mfcc.py
Normal file
@ -0,0 +1,247 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
import pytest
|
||||
import collections
|
||||
|
||||
from context import mfcc
|
||||
from context import wav2letter_mfcc
|
||||
from context import audio_capture
|
||||
|
||||
# Elements relevant to MFCC filter bank & feature extraction
|
||||
MFCC_TEST_PARAMS = collections.namedtuple('mfcc_test_params',
|
||||
['algo_params', 'mfcc_constructor', 'audio_proc_constructor'])
|
||||
|
||||
|
||||
def kws_test_params():
|
||||
kws_algo_params = mfcc.MFCCParams(sampling_freq=16000, num_fbank_bins=40, mel_lo_freq=20, mel_hi_freq=4000,
|
||||
num_mfcc_feats=10, frame_len=640, use_htk_method=True, n_fft=1024)
|
||||
return MFCC_TEST_PARAMS(kws_algo_params, mfcc.MFCC, mfcc.AudioPreprocessor)
|
||||
|
||||
|
||||
def asr_test_params():
|
||||
asr_algo_params = mfcc.MFCCParams(sampling_freq=16000, num_fbank_bins=128, mel_lo_freq=0, mel_hi_freq=8000,
|
||||
num_mfcc_feats=13, frame_len=512, use_htk_method=False, n_fft=512)
|
||||
return MFCC_TEST_PARAMS(asr_algo_params, wav2letter_mfcc.Wav2LetterMFCC, wav2letter_mfcc.W2LAudioPreprocessor)
|
||||
|
||||
|
||||
def kws_cap_params():
|
||||
return audio_capture.AudioCaptureParams(dtype=np.float32, overlap=0, min_samples=16000, sampling_freq=16000,
|
||||
mono=True)
|
||||
|
||||
|
||||
def asr_cap_params():
|
||||
return audio_capture.AudioCaptureParams(dtype=np.float32, overlap=31712, min_samples=47712,
|
||||
sampling_freq=16000, mono=True)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def audio_data(test_data_folder, file, audio_cap_params):
|
||||
audio_file = os.path.join(test_data_folder, file)
|
||||
capture = audio_capture.capture_audio(audio_file, audio_cap_params)
|
||||
yield next(capture)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("file", ["yes.wav", "myVoiceIsMyPassportVerifyMe04.wav"])
|
||||
@pytest.mark.parametrize("audio_cap_params", [kws_cap_params(), asr_cap_params()])
|
||||
def test_audio_file(audio_data, test_data_folder, file, audio_cap_params):
|
||||
assert audio_data.shape == (audio_cap_params.min_samples,)
|
||||
assert audio_data.dtype == audio_cap_params.dtype
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params, test_out", [(kws_test_params(), 25.470010570730597),
|
||||
(asr_test_params(), 0.24)])
|
||||
def test_mel_scale_function(mfcc_test_params, test_out):
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
mel = mfcc_inst.mel_scale(16, mfcc_test_params.algo_params.use_htk_method)
|
||||
assert np.isclose(mel, test_out)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params, test_out", [(kws_test_params(), 10.008767240008943),
|
||||
(asr_test_params(), 1071.170287494467)])
|
||||
def test_inverse_mel_scale_function(mfcc_test_params, test_out):
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
mel = mfcc_inst.inv_mel_scale(16, mfcc_test_params.algo_params.use_htk_method)
|
||||
assert np.isclose(mel, test_out)
|
||||
|
||||
|
||||
mel_filter_test_data_kws = {0: [0.33883214, 0.80088392, 0.74663128, 0.30332531],
|
||||
1: [0.25336872, 0.69667469, 0.86883317, 0.44281119, 0.02493546],
|
||||
2: [0.13116683, 0.55718881, 0.97506454, 0.61490026, 0.21241678],
|
||||
5: [0.32725038, 0.69579596, 0.9417706, 0.58524989, 0.23445207],
|
||||
-1: [0.02433275, 0.10371618, 0.1828123, 0.26162319, 0.34015089, 0.41839743,
|
||||
0.49636481, 0.57405503, 0.65147004, 0.72861179, 0.8054822, 0.88208318,
|
||||
0.95841659, 0.96551568, 0.88971181, 0.81416996, 0.73888833, 0.66386514,
|
||||
0.58909861, 0.514587, 0.44032856, 0.3663216, 0.29256441, 0.21905531,
|
||||
0.14579264, 0.07277474]}
|
||||
|
||||
mel_filter_test_data_asr = {0: [0.02837754],
|
||||
1: [0.01438901, 0.01398853],
|
||||
2: [0.02877802],
|
||||
5: [0.01478948, 0.01358806],
|
||||
-1: [4.82151203e-05, 9.48791110e-04, 1.84569875e-03, 2.73896782e-03,
|
||||
3.62862771e-03, 4.51470746e-03, 5.22215439e-03, 4.34314914e-03,
|
||||
3.46763895e-03, 2.59559614e-03, 1.72699334e-03, 8.61803536e-04]}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params, test_out",
|
||||
[(kws_test_params(), mel_filter_test_data_kws),
|
||||
(asr_test_params(), mel_filter_test_data_asr)])
|
||||
def test_create_mel_filter_bank(mfcc_test_params, test_out):
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
mel_filter_bank = mfcc_inst.create_mel_filter_bank()
|
||||
assert len(mel_filter_bank) == mfcc_test_params.algo_params.num_fbank_bins
|
||||
for indx, data in test_out.items():
|
||||
assert np.allclose(mel_filter_bank[indx], data)
|
||||
|
||||
|
||||
mfcc_test_data_kws = (-22.671347398982626, -0.6161543999707211, 2.072326974167832,
|
||||
0.5813741475362223, 1.0165529747334272, 0.8581560719988703,
|
||||
0.4603911069624896, 0.03392820944377398, 1.1651093266902361,
|
||||
0.007200025869960908)
|
||||
|
||||
mfcc_test_data_asr = (-735.46345398, 69.50331943, 16.39159347, 22.74874819, 24.84782893,
|
||||
10.67559303, 12.82828618, -3.51084271, 4.66633677, 10.20079095, 11.34782948, 3.90499354,
|
||||
9.32322384)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params, test_out, file, audio_cap_params",
|
||||
[(kws_test_params(), mfcc_test_data_kws, "yes.wav", kws_cap_params()),
|
||||
(asr_test_params(), mfcc_test_data_asr, "myVoiceIsMyPassportVerifyMe04.wav",
|
||||
asr_cap_params())])
|
||||
def test_mfcc_compute_first_frame(audio_data, mfcc_test_params, test_out, file, audio_cap_params):
|
||||
audio_data = np.array(audio_data)[0:mfcc_test_params.algo_params.frame_len]
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
mfcc_feats = mfcc_inst.mfcc_compute(audio_data)
|
||||
assert np.allclose((mfcc_feats[0:mfcc_test_params.algo_params.num_mfcc_feats]), test_out)
|
||||
|
||||
|
||||
extract_test_data_kws = {0: [-2.2671347e+01, -6.1615437e-01, 2.0723269e+00, 5.8137417e-01,
|
||||
1.0165529e+00, 8.5815609e-01, 4.6039110e-01, 3.3928208e-02,
|
||||
1.1651093e+00, 7.2000260e-03],
|
||||
1: [-23.488806, -1.1687667, 3.0548365, 1.5129884, 1.4142203,
|
||||
0.6869772, 1.1875846, 0.5743369, 1.202258, -0.12133602],
|
||||
2: [-23.909292, -1.5186096, 1.8721082, 0.7378916, 0.44974303,
|
||||
0.17609395, 0.5183161, 0.37109664, 0.14186797, 0.58400506],
|
||||
-1: [-23.752186, -0.1796912, 1.9514247, 0.32554424, 1.8425112,
|
||||
0.8763608, 0.78326845, 0.27808753, 0.73788685, 0.30338883]}
|
||||
|
||||
extract_test_data_asr = {0: [-4.98830318e+00, 6.86444461e-01, 3.12024504e-01, 3.56840312e-01,
|
||||
3.71638149e-01, 2.71728605e-01, 2.86904365e-01, 1.71718955e-01,
|
||||
2.29365349e-01, 2.68381387e-01, 2.76467651e-01, 2.23998129e-01,
|
||||
2.62194842e-01, -1.48247385e+01, 1.21875501e+00, 4.20235842e-01,
|
||||
5.39400637e-01, 6.09882712e-01, 1.68513224e-01, 3.75330061e-01,
|
||||
8.57576132e-02, 1.92831963e-01, 1.41814977e-01, 1.57615796e-01,
|
||||
7.19076321e-02, 1.98729336e-01, 3.92199278e+00, -5.76856315e-01,
|
||||
1.17938723e-02, -9.25096497e-02, -3.59488949e-02, 1.13284402e-03,
|
||||
1.51282102e-01, 1.13404110e-01, -8.69824737e-02, -1.48449212e-01,
|
||||
-1.24230251e-01, -1.90728232e-01, -5.37525006e-02],
|
||||
1: [-4.96694946e+00, 6.69411421e-01, 2.86189795e-01, 3.65071595e-01,
|
||||
3.92671198e-01, 2.44258150e-01, 2.52177566e-01, 2.16024980e-01,
|
||||
2.79812217e-01, 2.79687315e-01, 2.95228422e-01, 2.83991724e-01,
|
||||
2.46358261e-01, -1.33618221e+01, 1.08920455e+00, 3.88707787e-01,
|
||||
5.05674303e-01, 6.08285785e-01, 1.68113053e-01, 3.54529470e-01,
|
||||
6.68609440e-02, 1.52882755e-01, 6.89579248e-02, 1.18375972e-01,
|
||||
5.86742274e-02, 1.15678251e-01, 1.07892036e+01, -1.07193100e+00,
|
||||
-2.18140319e-01, -3.35950345e-01, -2.57241666e-01, -5.54431602e-02,
|
||||
-8.38544443e-02, -5.79114584e-03, -2.23973781e-01, -2.91451365e-01,
|
||||
-2.11069033e-01, -1.90297231e-01, -2.76504964e-01],
|
||||
2: [-4.98664522e+00, 6.54802263e-01, 3.70355755e-01, 4.06837821e-01,
|
||||
4.05175537e-01, 2.29149669e-01, 2.83312678e-01, 2.17573136e-01,
|
||||
3.07824671e-01, 2.48388007e-01, 2.25399241e-01, 2.52003014e-01,
|
||||
2.83968121e-01, -1.05043650e+01, 7.91533887e-01, 3.11546475e-01,
|
||||
4.36079264e-01, 5.93271911e-01, 2.02480286e-01, 3.24254721e-01,
|
||||
6.29674867e-02, 9.67641100e-02, -1.62826646e-02, 5.47595806e-02,
|
||||
2.90475693e-02, 2.62522381e-02, 1.38787737e+01, -1.32597208e+00,
|
||||
-3.73900205e-01, -4.38065380e-01, -3.05983245e-01, 1.14390980e-02,
|
||||
-2.10821658e-01, -6.22789040e-02, -2.88273603e-01, -3.29794526e-01,
|
||||
-2.43764088e-01, -1.70954674e-01, -3.65193188e-01],
|
||||
-1: [-2.1894817, 1.583355, -0.45024827, 0.11657667, 0.08940444, 0.09041209,
|
||||
0.2003613, 0.11800499, 0.18838657, 0.29271516, 0.22758003, 0.10634928,
|
||||
-0.04019014, 7.203311, -2.414309, 0.28750962, -0.24222863, 0.04680864,
|
||||
-0.12129474, 0.18059334, 0.06250379, 0.11363743, -0.2561094, -0.08132717,
|
||||
-0.08500769, 0.18916495, 1.3529671, -3.7919693, 1.937804, 0.6845761,
|
||||
0.15381853, 0.41106734, -0.28207013, 0.2195526, 0.06716935, -0.02886542,
|
||||
-0.22860551, 0.24788341, 0.63940096]}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params, model_input_size, stride, min_samples, file, audio_cap_params, test_out",
|
||||
[(kws_test_params(), 49, 320, 16000, "yes.wav", kws_cap_params(),
|
||||
extract_test_data_kws),
|
||||
(asr_test_params(), 296, 160, 47712, "myVoiceIsMyPassportVerifyMe04.wav", asr_cap_params(),
|
||||
extract_test_data_asr)])
|
||||
def test_feat_extraction_full_sized_input(audio_data,
|
||||
mfcc_test_params,
|
||||
model_input_size,
|
||||
stride,
|
||||
min_samples, file, audio_cap_params,
|
||||
test_out):
|
||||
"""
|
||||
Test out values were gathered by printing the mfcc features collected during the first full inference
|
||||
on the test wav files. Note the extract_features() function simply calls the mfcc_compute() from previous
|
||||
test but feeds in enough samples for an inference rather than a single frame. It also computes the 1st & 2nd
|
||||
derivative features hence the shape (13*3 = 39).
|
||||
Specific model_input_size and stride parameters are also required as additional arguments.
|
||||
"""
|
||||
audio_data = np.array(audio_data)
|
||||
# Pad with zeros to ensure min_samples for inference
|
||||
audio_data.resize(min_samples)
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
preprocessor = mfcc_test_params.audio_proc_constructor(mfcc_inst, model_input_size, stride)
|
||||
# extract_features passes the audio data to mfcc_compute frame by frame and concatenates results
|
||||
input_tensor = preprocessor.extract_features(audio_data)
|
||||
assert len(input_tensor) == model_input_size
|
||||
for indx, data in test_out.items():
|
||||
assert np.allclose(input_tensor[indx], data)
|
||||
|
||||
|
||||
# Expected contents of input tensors for inference on a silent wav file
|
||||
extract_features_zeros_kws = {0: [-2.05949466e+02, -4.88498131e-15, 8.15428020e-15, -5.77315973e-15,
|
||||
7.03142511e-15, -1.11022302e-14, 2.18015108e-14, -1.77635684e-15,
|
||||
1.06581410e-14, 2.75335310e-14],
|
||||
-1: [-2.05949466e+02, -4.88498131e-15, 8.15428020e-15, -5.77315973e-15,
|
||||
7.03142511e-15, -1.11022302e-14, 2.18015108e-14, -1.77635684e-15,
|
||||
1.06581410e-14, 2.75335310e-14]}
|
||||
|
||||
extract_features_zeros_asr = {
|
||||
0: [-3.46410162e+00, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.79662980e+01, 1.75638694e-15, -9.41313626e-16,
|
||||
9.66012817e-16, -1.23221521e-15, 1.75638694e-15, -1.59035349e-15,
|
||||
2.41503204e-15, -1.64798493e-15, 4.39096735e-16, -4.95356004e-16,
|
||||
-2.19548368e-16, -3.55668355e-15, 8.19843971e+00, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02],
|
||||
- 1: [-3.46410162e+00, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.88675135e-01, 2.88675135e-01, 2.88675135e-01,
|
||||
2.88675135e-01, 2.79662980e+01, 1.75638694e-15, -9.41313626e-16,
|
||||
9.66012817e-16, -1.23221521e-15, 1.75638694e-15, -1.59035349e-15,
|
||||
2.41503204e-15, -1.64798493e-15, 4.39096735e-16, -4.95356004e-16,
|
||||
-2.19548368e-16, -3.55668355e-15, 8.19843971e+00, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02, -4.28340672e-02,
|
||||
-4.28340672e-02, -4.28340672e-02, -4.28340672e-02]}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mfcc_test_params,model_input_size, stride, min_samples, test_out",
|
||||
[(kws_test_params(), 49, 320, 16000, extract_features_zeros_kws),
|
||||
(asr_test_params(), 296, 160, 47712, extract_features_zeros_asr)])
|
||||
def test_feat_extraction_full_sized_input_zeros(mfcc_test_params, model_input_size, stride, min_samples, test_out):
|
||||
audio_data = np.zeros(min_samples).astype(np.float32)
|
||||
mfcc_inst = mfcc_test_params.mfcc_constructor(mfcc_test_params.algo_params)
|
||||
|
||||
preprocessor = mfcc_test_params.audio_proc_constructor(mfcc_inst, model_input_size,
|
||||
stride)
|
||||
input_tensor = preprocessor.extract_features(audio_data)
|
||||
assert len(input_tensor) == model_input_size
|
||||
for indx, data in test_out.items():
|
||||
# Element 14 of feature extraction vector differs minutely during
|
||||
# inference on a silent wav file compared to array of 0's
|
||||
# Workarounds were to skip this sample or add large tolerance argument (atol=10)
|
||||
assert np.allclose(input_tensor[indx][0:13], data[0:13])
|
||||
assert np.allclose(input_tensor[indx][15:], data[15:])
|
@ -0,0 +1,24 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
|
||||
import cv2
|
||||
|
||||
from context import network_executor
|
||||
from context import cv_utils
|
||||
|
||||
|
||||
def test_execute_network(test_data_folder):
|
||||
model_path = os.path.join(test_data_folder, "ssd_mobilenet_v1.tflite")
|
||||
backends = ["CpuAcc", "CpuRef"]
|
||||
|
||||
executor = network_executor.ArmnnNetworkExecutor(model_path, backends)
|
||||
img = cv2.imread(os.path.join(test_data_folder, "messi5.jpg"))
|
||||
input_tensors = cv_utils.preprocess(img, executor.input_binding_info, True)
|
||||
|
||||
output_result = executor.run(input_tensors)
|
||||
|
||||
# Ensure it detects a person
|
||||
classes = output_result[1]
|
||||
assert classes[0][0] == 0
|
9
arch/arm/ARMnn/python/pyarmnn/examples/tests/testdata/labelmap.txt
vendored
Normal file
9
arch/arm/ARMnn/python/pyarmnn/examples/tests/testdata/labelmap.txt
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
person
|
||||
motorcycle
|
||||
airplane
|
||||
bicycle
|
||||
train
|
||||
boat
|
||||
truck
|
||||
bus
|
||||
|
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright 2020 NXP
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""Downloads and extracts resources for unit tests.
|
||||
|
||||
It is mandatory to run this script prior to running unit tests. Resources are stored as a tar.gz or a tar.bz2 archive and
|
||||
extracted into the test/testdata/shared folder.
|
||||
"""
|
||||
|
||||
import tarfile
|
||||
import requests
|
||||
import os
|
||||
import uuid
|
||||
|
||||
SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
EXTRACT_DIR = os.path.join(SCRIPTS_DIR, "..", "test")
|
||||
ARCHIVE_URL = "https://snapshots.linaro.org/components/pyarmnn-tests/pyarmnn_testdata_201100_20201022.tar.bz2"
|
||||
|
||||
|
||||
def download_resources(url, save_path):
|
||||
# download archive - only support tar.gz or tar.bz2
|
||||
print("Downloading '{}'".format(url))
|
||||
temp_filename = str(uuid.uuid4())
|
||||
if url.endswith(".tar.bz2"):
|
||||
temp_filename += ".tar.bz2"
|
||||
elif url.endswith(".tar.gz"):
|
||||
temp_filename += ".tar.gz"
|
||||
else:
|
||||
raise RuntimeError("Unsupported file.")
|
||||
try:
|
||||
r = requests.get(url, stream=True)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise RuntimeError("Unable to download file: {}".format(e))
|
||||
file_path = os.path.join(save_path, temp_filename)
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(r.content)
|
||||
|
||||
# extract and delete temp file
|
||||
with tarfile.open(file_path, "r:bz2" if temp_filename.endswith(".tar.bz2") else "r:gz") as tar:
|
||||
print("Extracting '{}'".format(file_path))
|
||||
tar.extractall(save_path)
|
||||
if os.path.exists(file_path):
|
||||
print("Removing '{}'".format(file_path))
|
||||
os.remove(file_path)
|
||||
|
||||
|
||||
download_resources(ARCHIVE_URL, EXTRACT_DIR)
|
52
arch/arm/ARMnn/python/pyarmnn/scripts/generate_docs.py
Normal file
52
arch/arm/ARMnn/python/pyarmnn/scripts/generate_docs.py
Normal file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""Generate PyArmNN documentation."""
|
||||
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
import pyarmnn as ann
|
||||
import shutil
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from pdoc.cli import main
|
||||
|
||||
|
||||
def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
|
||||
file_paths = [] + file_paths
|
||||
|
||||
if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
|
||||
os.makedirs(target_dir_path)
|
||||
|
||||
for file_path in file_paths:
|
||||
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
|
||||
raise RuntimeError('Not a file: {}'.format(file_path))
|
||||
|
||||
file_name = os.path.basename(file_path)
|
||||
shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
|
||||
|
||||
|
||||
def copy_doc_images():
|
||||
__copy_file_to_dir(file_paths=['../../docs/pyarmnn.png'],
|
||||
target_dir_path='docs')
|
||||
|
||||
|
||||
def archive_docs(path, version):
|
||||
|
||||
output_filename = f'pyarmnn_docs-{version}.tar'
|
||||
|
||||
with tarfile.open(output_filename, "w") as tar:
|
||||
tar.add(path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
with open('./README.md', 'r') as readme_file:
|
||||
top_level_pyarmnn_doc = ''.join(readme_file.readlines())
|
||||
ann.__doc__ = top_level_pyarmnn_doc
|
||||
|
||||
main()
|
||||
|
||||
copy_doc_images()
|
||||
archive_docs('./docs', ann.__version__)
|
2
arch/arm/ARMnn/python/pyarmnn/scripts/requirements.txt
Normal file
2
arch/arm/ARMnn/python/pyarmnn/scripts/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
requests==2.23.0
|
||||
pdoc3==0.6.3
|
141
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/__init__.py
Normal file
141
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/__init__.py
Normal file
@ -0,0 +1,141 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import inspect
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from ._generated.pyarmnn_version import GetVersion, GetMajorVersion, GetMinorVersion
|
||||
|
||||
# Parsers
|
||||
|
||||
try:
|
||||
from ._generated.pyarmnn_onnxparser import IOnnxParser
|
||||
except ImportError as err:
|
||||
logger = logging.getLogger(__name__)
|
||||
message = "Your ArmNN library instance does not support Onnx models parser functionality. "
|
||||
logger.warning("%s Skipped IOnnxParser import.", message)
|
||||
logger.debug(str(err))
|
||||
|
||||
|
||||
def IOnnxParser():
|
||||
"""In case people try importing without having Arm NN built with this parser."""
|
||||
raise RuntimeError(message)
|
||||
|
||||
try:
|
||||
from ._generated.pyarmnn_tfliteparser import ITfLiteParser, TfLiteParserOptions
|
||||
except ImportError as err:
|
||||
logger = logging.getLogger(__name__)
|
||||
message = "Your ArmNN library instance does not support TF lite models parser functionality. "
|
||||
logger.warning("%s Skipped ITfLiteParser import.", message)
|
||||
logger.debug(str(err))
|
||||
|
||||
|
||||
def ITfLiteParser():
|
||||
"""In case people try importing without having Arm NN built with this parser."""
|
||||
raise RuntimeError(message)
|
||||
|
||||
try:
|
||||
from ._generated.pyarmnn_deserializer import IDeserializer
|
||||
except ImportError as err:
|
||||
logger = logging.getLogger(__name__)
|
||||
message = "Your ArmNN library instance does not have an armnn models parser funcionality. "
|
||||
logger.warning("%s Skipped IDeserializer import.", message)
|
||||
logger.debug(str(err))
|
||||
|
||||
def IDeserializer():
|
||||
"""In case people try importing without having ArmNN built with this parser."""
|
||||
raise RuntimeError(message)
|
||||
|
||||
# Network
|
||||
from ._generated.pyarmnn import Optimize, OptimizerOptions, IOptimizedNetwork, IInputSlot, \
|
||||
IOutputSlot, IConnectableLayer, INetwork
|
||||
|
||||
# Backend
|
||||
from ._generated.pyarmnn import BackendId
|
||||
from ._generated.pyarmnn import IDeviceSpec
|
||||
from ._generated.pyarmnn import BackendOptions, BackendOption
|
||||
|
||||
# Tensors
|
||||
from ._generated.pyarmnn import TensorInfo, TensorShape
|
||||
|
||||
# Runtime
|
||||
from ._generated.pyarmnn import IRuntime, CreationOptions, INetworkProperties
|
||||
|
||||
# Profiler
|
||||
from ._generated.pyarmnn import IProfiler
|
||||
|
||||
# Types
|
||||
from ._generated.pyarmnn import DataType_Float16, DataType_Float32, DataType_QAsymmU8, DataType_Signed32, \
|
||||
DataType_Boolean, DataType_QSymmS16, DataType_QSymmS8, DataType_QAsymmS8, ShapeInferenceMethod_ValidateOnly, \
|
||||
ShapeInferenceMethod_InferAndValidate
|
||||
from ._generated.pyarmnn import DataLayout_NCHW, DataLayout_NHWC, DataLayout_NCDHW, DataLayout_NDHWC
|
||||
from ._generated.pyarmnn import MemorySource_Malloc, MemorySource_Undefined, MemorySource_DmaBuf, \
|
||||
MemorySource_DmaBufProtected
|
||||
from ._generated.pyarmnn import ProfilingDetailsMethod_Undefined, ProfilingDetailsMethod_DetailsWithEvents, \
|
||||
ProfilingDetailsMethod_DetailsOnly
|
||||
|
||||
from ._generated.pyarmnn import ActivationFunction_Abs, ActivationFunction_BoundedReLu, ActivationFunction_LeakyReLu, \
|
||||
ActivationFunction_Linear, ActivationFunction_ReLu, ActivationFunction_Sigmoid, ActivationFunction_SoftReLu, \
|
||||
ActivationFunction_Sqrt, ActivationFunction_Square, ActivationFunction_TanH, ActivationDescriptor
|
||||
from ._generated.pyarmnn import ArgMinMaxFunction_Max, ArgMinMaxFunction_Min, ArgMinMaxDescriptor
|
||||
from ._generated.pyarmnn import BatchNormalizationDescriptor, BatchToSpaceNdDescriptor
|
||||
from ._generated.pyarmnn import ChannelShuffleDescriptor, ComparisonDescriptor, ComparisonOperation_Equal, \
|
||||
ComparisonOperation_Greater, ComparisonOperation_GreaterOrEqual, ComparisonOperation_Less, \
|
||||
ComparisonOperation_LessOrEqual, ComparisonOperation_NotEqual
|
||||
from ._generated.pyarmnn import UnaryOperation_Abs, UnaryOperation_Exp, UnaryOperation_Sqrt, UnaryOperation_Rsqrt, \
|
||||
UnaryOperation_Neg, ElementwiseUnaryDescriptor
|
||||
from ._generated.pyarmnn import LogicalBinaryOperation_LogicalAnd, LogicalBinaryOperation_LogicalOr, \
|
||||
LogicalBinaryDescriptor
|
||||
from ._generated.pyarmnn import Convolution2dDescriptor, Convolution3dDescriptor, DepthToSpaceDescriptor, \
|
||||
DepthwiseConvolution2dDescriptor, DetectionPostProcessDescriptor, FakeQuantizationDescriptor, FillDescriptor, \
|
||||
FullyConnectedDescriptor, GatherDescriptor, InstanceNormalizationDescriptor, LstmDescriptor, \
|
||||
L2NormalizationDescriptor, MeanDescriptor
|
||||
from ._generated.pyarmnn import NormalizationAlgorithmChannel_Across, NormalizationAlgorithmChannel_Within, \
|
||||
NormalizationAlgorithmMethod_LocalBrightness, NormalizationAlgorithmMethod_LocalContrast, NormalizationDescriptor
|
||||
from ._generated.pyarmnn import PaddingMode_Constant, PaddingMode_Reflect, PaddingMode_Symmetric, PadDescriptor
|
||||
from ._generated.pyarmnn import PermutationVector, PermuteDescriptor
|
||||
from ._generated.pyarmnn import OutputShapeRounding_Ceiling, OutputShapeRounding_Floor, \
|
||||
PaddingMethod_Exclude, PaddingMethod_IgnoreValue, PoolingAlgorithm_Average, PoolingAlgorithm_L2, \
|
||||
PoolingAlgorithm_Max, Pooling2dDescriptor
|
||||
from ._generated.pyarmnn import ReduceDescriptor, ReduceOperation_Prod, ReduceOperation_Max, ReduceOperation_Mean, \
|
||||
ReduceOperation_Min, ReduceOperation_Sum
|
||||
from ._generated.pyarmnn import ResizeMethod_Bilinear, ResizeMethod_NearestNeighbor, ResizeDescriptor, \
|
||||
ReshapeDescriptor, SliceDescriptor, SpaceToBatchNdDescriptor, SpaceToDepthDescriptor, StandInDescriptor, \
|
||||
StackDescriptor, StridedSliceDescriptor, SoftmaxDescriptor, TransposeConvolution2dDescriptor, \
|
||||
TransposeDescriptor, SplitterDescriptor
|
||||
from ._generated.pyarmnn import ConcatDescriptor, CreateDescriptorForConcatenation
|
||||
|
||||
from ._generated.pyarmnn import LstmInputParams, QuantizedLstmInputParams
|
||||
|
||||
# Public API
|
||||
# Quantization
|
||||
from ._quantization.quantize_and_dequantize import quantize, dequantize
|
||||
|
||||
# Tensor
|
||||
from ._tensor.tensor import Tensor
|
||||
from ._tensor.const_tensor import ConstTensor
|
||||
from ._tensor.workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray
|
||||
|
||||
# Utilities
|
||||
from ._utilities.profiling_helper import ProfilerData, get_profiling_data
|
||||
|
||||
from ._version import __version__, __arm_ml_version__
|
||||
|
||||
ARMNN_VERSION = GetVersion()
|
||||
|
||||
|
||||
def __check_version():
|
||||
from ._version import check_armnn_version
|
||||
check_armnn_version(ARMNN_VERSION)
|
||||
|
||||
|
||||
__check_version()
|
||||
|
||||
__all__ = []
|
||||
|
||||
__private_api_names = ['__check_version']
|
||||
|
||||
for name, obj in inspect.getmembers(sys.modules[__name__]):
|
||||
if inspect.isclass(obj) or inspect.isfunction(obj):
|
||||
if name not in __private_api_names:
|
||||
__all__.append(name)
|
@ -0,0 +1,2 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
@ -0,0 +1,4 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from .quantize_and_dequantize import quantize, dequantize
|
@ -0,0 +1,74 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""
|
||||
This file contains functions relating to quantizing and dequantizing values.
|
||||
"""
|
||||
from .._generated.pyarmnn import Quantize_uint8_t, Quantize_int8_t, Quantize_int16_t, Quantize_int32_t, \
|
||||
Dequantize_uint8_t, Dequantize_int8_t, Dequantize_int16_t, Dequantize_int32_t
|
||||
|
||||
__DTYPE_TO_QUANTIZE_FUNCTION = {
|
||||
'uint8': Quantize_uint8_t,
|
||||
'int8': Quantize_int8_t,
|
||||
'int16': Quantize_int16_t,
|
||||
'int32': Quantize_int32_t
|
||||
}
|
||||
|
||||
__DTYPE_TO_DEQUANTIZE_FUNCTION = {
|
||||
'uint8': ((0, 255), Dequantize_uint8_t),
|
||||
'int8': ((-128, 127), Dequantize_int8_t),
|
||||
'int16': ((-32768, 32767), Dequantize_int16_t),
|
||||
'int32': ((-2147483648, 2147483647), Dequantize_int32_t)
|
||||
}
|
||||
|
||||
|
||||
def quantize(value: float, scale: float, offset: int, target_dtype: str) -> int:
|
||||
"""Quantize the given value to the given target datatype using Arm NN.
|
||||
|
||||
This function can be used to convert a 32-bit floating point value into 8/16/32-bit signed
|
||||
integer or 8-bit unsigned integer values.
|
||||
|
||||
Args:
|
||||
value (float): The value to be quantized.
|
||||
scale (float): A numeric constant that the value is multiplied by.
|
||||
offset (int): A 'zero-point' used to 'shift' the integer range.
|
||||
target_dtype (str): The target data type. Supported values: 'unit8', 'int8', 'int16', 'int32'.
|
||||
|
||||
Returns:
|
||||
int: A quantized 8-bit unsigned integer value or 8/16/32-bit signed integer value.
|
||||
"""
|
||||
|
||||
if target_dtype not in __DTYPE_TO_QUANTIZE_FUNCTION:
|
||||
raise ValueError("""Unexpected target datatype {} given.
|
||||
Armnn currently supports quantization to {} values.""".format(target_dtype, list(__DTYPE_TO_QUANTIZE_FUNCTION.keys())))
|
||||
|
||||
return __DTYPE_TO_QUANTIZE_FUNCTION[target_dtype](float(value), scale, offset)
|
||||
|
||||
|
||||
def dequantize(value: int, scale: float, offset: float, from_dtype: str) -> float:
|
||||
"""Dequantize the given value from the given datatype using Arm NN.
|
||||
|
||||
This function can be used to convert an 8-bit unsigned integer value or 8/16/32-bit signed
|
||||
integer value into a 32-bit floating point value. Typically used when decoding an
|
||||
output value from an output tensor on a quantized model.
|
||||
|
||||
Args:
|
||||
value (int): The value to be dequantized. Value could be numpy numeric data type.
|
||||
scale (float): A numeric constant that the value is multiplied by.
|
||||
offset (float): A 'zero-point' used to 'shift' the integer range.
|
||||
from_dtype (str): The data type 'value' represents. Supported values: 'unit8', 'int8', 'int16', 'int32'.
|
||||
|
||||
Returns:
|
||||
float: A dequantized 32-bit floating-point value.
|
||||
"""
|
||||
|
||||
# specifies which function to use with given datatype and the value range for that data type.
|
||||
if from_dtype not in __DTYPE_TO_DEQUANTIZE_FUNCTION:
|
||||
raise ValueError("""Unexpected value datatype {} given.
|
||||
Armnn currently supports dequantization from {} values.""".format(from_dtype, list(__DTYPE_TO_DEQUANTIZE_FUNCTION.keys())))
|
||||
|
||||
input_range = __DTYPE_TO_DEQUANTIZE_FUNCTION[from_dtype][0]
|
||||
|
||||
if not input_range[0] <= value <= input_range[1]:
|
||||
raise ValueError('Value is not within range of the given datatype {}'.format(from_dtype))
|
||||
|
||||
return __DTYPE_TO_DEQUANTIZE_FUNCTION[from_dtype][1](int(value), scale, offset)
|
@ -0,0 +1,6 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from .const_tensor import ConstTensor
|
||||
from .tensor import Tensor
|
||||
from .workload_tensors import make_input_tensors, make_output_tensors, workload_tensors_to_ndarray
|
@ -0,0 +1,178 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""
|
||||
This file contains the custom python implementation for Arm NN Const Tensor objects.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
from .._generated.pyarmnn import DataType_QAsymmU8, DataType_QSymmS8, DataType_QSymmS16, DataType_Signed32, \
|
||||
DataType_QAsymmS8, DataType_Float32, DataType_Float16
|
||||
from .._generated.pyarmnn import ConstTensor as AnnConstTensor, TensorInfo, Tensor
|
||||
|
||||
|
||||
class ConstTensor(AnnConstTensor):
|
||||
"""Creates a PyArmNN ConstTensor object.
|
||||
|
||||
A ConstTensor is a Tensor with an immutable data store. Typically, a ConstTensor
|
||||
is used to input data into a network when running inference.
|
||||
|
||||
This class overrides the swig generated Tensor class. The aim of
|
||||
this is to have an easy to use public API for the ConstTensor objects.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
"""
|
||||
Supported tensor data types:
|
||||
`DataType_QAsymmU8`,
|
||||
`DataType_QAsymmS8`,
|
||||
`DataType_QSymmS16`,
|
||||
`DataType_QSymmS8`,
|
||||
`DataType_Signed32`,
|
||||
`DataType_Float32`,
|
||||
`DataType_Float16`
|
||||
|
||||
Examples:
|
||||
Create empty ConstTensor
|
||||
>>> import pyarmnn as ann
|
||||
>>> import numpy as np
|
||||
>>> ann.ConstTensor()
|
||||
|
||||
Create ConstTensor given tensor info and input data
|
||||
>>> input_data = np.array(...)
|
||||
>>> ann.ConstTensor(ann.TensorInfo(...), input_data)
|
||||
|
||||
Create ConstTensor from another ConstTensor i.e. copy ConstTensor
|
||||
>>> ann.ConstTensor(ann.ConstTensor())
|
||||
|
||||
Create ConstTensor from tensor
|
||||
>>> ann.ConstTensor(ann.Tensor())
|
||||
|
||||
Args:
|
||||
tensor (Tensor, optional): Create a ConstTensor from a Tensor.
|
||||
const_tensor (ConstTensor, optional): Create a ConstTensor from a ConstTensor i.e. copy.
|
||||
tensor_info (TensorInfo, optional): Tensor information.
|
||||
input_data (ndarray): The numpy array will be transformed to a
|
||||
buffer according to type returned by `TensorInfo.GetDataType`.
|
||||
Input data values type must correspond to data type returned by
|
||||
`TensorInfo.GetDataType`.
|
||||
|
||||
Raises:
|
||||
TypeError: Unsupported input data type.
|
||||
ValueError: Unsupported tensor data type, incorrect input data size and creation of ConstTensor from non-constant TensorInfo.
|
||||
"""
|
||||
self.__memory_area = None
|
||||
|
||||
# TensorInfo as first argument and numpy array as second
|
||||
if len(args) > 1 and isinstance(args[0], TensorInfo):
|
||||
if not isinstance(args[1], np.ndarray):
|
||||
raise TypeError('Data must be provided as a numpy array.')
|
||||
# if TensorInfo IsConstant is false
|
||||
elif not args[0].IsConstant():
|
||||
raise ValueError('TensorInfo when initializing ConstTensor must be set to constant.')
|
||||
else:
|
||||
self.__create_memory_area(args[0].GetDataType(), args[0].GetNumBytes(), args[0].GetNumElements(),
|
||||
args[1])
|
||||
super().__init__(args[0], self.__memory_area.data)
|
||||
|
||||
# copy constructor - reference to memory area is passed from copied const
|
||||
# tensor and armnn's copy constructor is called
|
||||
elif len(args) > 0 and isinstance(args[0], (ConstTensor, Tensor)):
|
||||
# if TensorInfo IsConstant is false
|
||||
if not args[0].GetInfo().IsConstant():
|
||||
raise ValueError('TensorInfo of Tensor when initializing ConstTensor must be set to constant.')
|
||||
else:
|
||||
self.__memory_area = args[0].get_memory_area()
|
||||
super().__init__(args[0])
|
||||
|
||||
# empty tensor
|
||||
elif len(args) == 0:
|
||||
super().__init__()
|
||||
|
||||
else:
|
||||
raise ValueError('Incorrect number of arguments or type of arguments provided to create Const Tensor.')
|
||||
|
||||
def __copy__(self) -> 'ConstTensor':
|
||||
""" Make copy of a const tensor.
|
||||
|
||||
Make const tensor copyable using the python copy operation.
|
||||
|
||||
Note:
|
||||
The tensor memory area is NOT copied. Instead, the new tensor maintains a
|
||||
reference to the same memory area as the old tensor.
|
||||
|
||||
Example:
|
||||
Copy empty tensor
|
||||
>>> from copy import copy
|
||||
>>> import pyarmnn as ann
|
||||
>>> tensor = ann.ConstTensor()
|
||||
>>> copied_tensor = copy(tensor)
|
||||
|
||||
Returns:
|
||||
Tensor: a copy of the tensor object provided.
|
||||
|
||||
"""
|
||||
return ConstTensor(self)
|
||||
|
||||
@staticmethod
|
||||
def __check_size(data: np.ndarray, num_bytes: int, num_elements: int):
|
||||
""" Check the size of the input data against the number of bytes provided by tensor info.
|
||||
|
||||
Args:
|
||||
data (ndarray): Input data.
|
||||
num_bytes (int): Number of bytes required by tensor info.
|
||||
num_elements: Number of elements required by tensor info.
|
||||
|
||||
Raises:
|
||||
ValueError: number of bytes in input data does not match tensor info.
|
||||
|
||||
"""
|
||||
size_in_bytes = data.nbytes
|
||||
elements = data.size
|
||||
|
||||
if size_in_bytes != num_bytes:
|
||||
raise ValueError(
|
||||
"ConstTensor requires {} bytes, {} provided. "
|
||||
"Is your input array data type ({}) aligned with TensorInfo?".format(num_bytes, size_in_bytes,
|
||||
data.dtype))
|
||||
if elements != num_elements:
|
||||
raise ValueError("ConstTensor requires {} elements, {} provided.".format(num_elements, elements))
|
||||
|
||||
def __create_memory_area(self, data_type: int, num_bytes: int, num_elements: int, data: np.ndarray):
|
||||
""" Create the memory area used by the tensor to output its results.
|
||||
|
||||
Args:
|
||||
data_type (int): The type of data that will be stored in the memory area.
|
||||
See DataType_*.
|
||||
num_bytes (int): Determines the size of the memory area that will be created.
|
||||
num_elements (int): Determines number of elements in memory area.
|
||||
data (ndarray): Input data as numpy array.
|
||||
|
||||
"""
|
||||
np_data_type_mapping = {DataType_QAsymmU8: np.uint8,
|
||||
DataType_QAsymmS8: np.int8,
|
||||
DataType_QSymmS8: np.int8,
|
||||
DataType_Float32: np.float32,
|
||||
DataType_QSymmS16: np.int16,
|
||||
DataType_Signed32: np.int32,
|
||||
DataType_Float16: np.float16}
|
||||
|
||||
if data_type not in np_data_type_mapping:
|
||||
raise ValueError("The data type provided for this Tensor is not supported: {}".format(data_type))
|
||||
|
||||
if np_data_type_mapping[data_type] != data.dtype:
|
||||
raise TypeError("Expected data to have type {} for type {} but instead got numpy.{}".format(np_data_type_mapping[data_type], data_type, data.dtype))
|
||||
|
||||
self.__check_size(data, num_bytes, num_elements)
|
||||
|
||||
self.__memory_area = data
|
||||
self.__memory_area.flags.writeable = False
|
||||
|
||||
def get_memory_area(self) -> np.ndarray:
|
||||
""" Get values that are stored by the tensor.
|
||||
|
||||
Returns:
|
||||
ndarray: Tensor data (as numpy array).
|
||||
|
||||
"""
|
||||
return self.__memory_area
|
126
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/_tensor/tensor.py
Normal file
126
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/_tensor/tensor.py
Normal file
@ -0,0 +1,126 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""
|
||||
This file contains the custom python implementation for Arm NN Tensor objects.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
from .._generated.pyarmnn import Tensor as annTensor, TensorInfo, DataType_QAsymmU8, DataType_QSymmS8, \
|
||||
DataType_QAsymmS8, DataType_Float32, DataType_QSymmS16, DataType_Signed32, DataType_Float16
|
||||
|
||||
|
||||
class Tensor(annTensor):
|
||||
"""Creates a PyArmNN Tensor object.
|
||||
|
||||
This class overrides the swig generated Tensor class. The aim of
|
||||
this is to create an easy to use public api for the Tensor object.
|
||||
|
||||
Memory is allocated and managed by this class, avoiding the need to manage
|
||||
a separate memory area for the tensor compared to the swig generated api.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
""" Create Tensor object.
|
||||
|
||||
Supported tensor data types:
|
||||
`DataType_QAsymmU8`,
|
||||
`DataType_QAsymmS8`,
|
||||
`DataType_QSymmS16`,
|
||||
`DataType_QSymmS8`,
|
||||
`DataType_Signed32`,
|
||||
`DataType_Float32`,
|
||||
`DataType_Float16`
|
||||
|
||||
Examples:
|
||||
Create an empty tensor
|
||||
>>> import pyarmnn as ann
|
||||
>>> ann.Tensor()
|
||||
|
||||
Create tensor given tensor information
|
||||
>>> ann.Tensor(ann.TensorInfo(...))
|
||||
|
||||
Create tensor from another tensor i.e. copy a tensor
|
||||
>>> ann.Tensor(ann.Tensor())
|
||||
|
||||
Args:
|
||||
tensor(Tensor, optional): Create Tensor from a Tensor i.e. copy.
|
||||
tensor_info (TensorInfo, optional): Tensor information.
|
||||
|
||||
Raises:
|
||||
TypeError: unsupported input data type.
|
||||
ValueError: appropriate constructor could not be found with provided arguments.
|
||||
|
||||
"""
|
||||
self.__memory_area = None
|
||||
|
||||
# TensorInfo as first argument, we need to create memory area manually
|
||||
if len(args) > 0 and isinstance(args[0], TensorInfo):
|
||||
self.__create_memory_area(args[0].GetDataType(), args[0].GetNumElements())
|
||||
super().__init__(args[0], self.__memory_area.data)
|
||||
|
||||
# copy constructor - reference to memory area is passed from copied tensor
|
||||
# and armnn's copy constructor is called
|
||||
elif len(args) > 0 and isinstance(args[0], Tensor):
|
||||
self.__memory_area = args[0].get_memory_area()
|
||||
super().__init__(args[0])
|
||||
|
||||
# empty constructor
|
||||
elif len(args) == 0:
|
||||
super().__init__()
|
||||
|
||||
else:
|
||||
raise ValueError('Incorrect number of arguments or type of arguments provided to create Tensor.')
|
||||
|
||||
def __copy__(self) -> 'Tensor':
|
||||
""" Make copy of a tensor.
|
||||
|
||||
Make tensor copyable using the python copy operation.
|
||||
|
||||
Note:
|
||||
The tensor memory area is NOT copied. Instead, the new tensor maintains a
|
||||
reference to the same memory area as the old tensor.
|
||||
|
||||
Example:
|
||||
Copy empty tensor
|
||||
>>> from copy import copy
|
||||
>>> import pyarmnn as ann
|
||||
>>> tensor = ann.Tensor()
|
||||
>>> copied_tensor = copy(tensor)
|
||||
|
||||
Returns:
|
||||
Tensor: a copy of the tensor object provided.
|
||||
|
||||
"""
|
||||
return Tensor(self)
|
||||
|
||||
def __create_memory_area(self, data_type: int, num_elements: int):
|
||||
""" Create the memory area used by the tensor to output its results.
|
||||
|
||||
Args:
|
||||
data_type (int): The type of data that will be stored in the memory area.
|
||||
See DataType_*.
|
||||
num_elements (int): Determines the size of the memory area that will be created.
|
||||
|
||||
"""
|
||||
np_data_type_mapping = {DataType_QAsymmU8: np.uint8,
|
||||
DataType_QAsymmS8: np.int8,
|
||||
DataType_QSymmS8: np.int8,
|
||||
DataType_Float32: np.float32,
|
||||
DataType_QSymmS16: np.int16,
|
||||
DataType_Signed32: np.int32,
|
||||
DataType_Float16: np.float16}
|
||||
|
||||
if data_type not in np_data_type_mapping:
|
||||
raise ValueError("The data type provided for this Tensor is not supported.")
|
||||
|
||||
self.__memory_area = np.empty(shape=(num_elements,), dtype=np_data_type_mapping[data_type])
|
||||
|
||||
def get_memory_area(self) -> np.ndarray:
|
||||
""" Get values that are stored by the tensor.
|
||||
|
||||
Returns:
|
||||
ndarray : Tensor data (as numpy array).
|
||||
|
||||
"""
|
||||
return self.__memory_area
|
@ -0,0 +1,128 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""
|
||||
This file contains functions relating to WorkloadTensors.
|
||||
WorkloadTensors are the inputTensors and outputTensors that are consumed by IRuntime.EnqueueWorkload.
|
||||
"""
|
||||
from typing import Union, List, Tuple
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .tensor import Tensor
|
||||
from .const_tensor import ConstTensor
|
||||
|
||||
|
||||
def make_input_tensors(inputs_binding_info: List[Tuple],
|
||||
input_data: List[np.ndarray]) -> List[Tuple[int, ConstTensor]]:
|
||||
"""Returns `inputTensors` to be used with `IRuntime.EnqueueWorkload`.
|
||||
|
||||
This is the primary function to call when you want to produce `inputTensors` for `IRuntime.EnqueueWorkload`.
|
||||
The output is a list of tuples containing ConstTensors with a corresponding input tensor id.
|
||||
The output should be used directly with `IRuntime.EnqueueWorkload`.
|
||||
This function works for single or multiple input data and binding information.
|
||||
|
||||
Examples:
|
||||
Creating inputTensors.
|
||||
>>> import pyarmnn as ann
|
||||
>>> import numpy as np
|
||||
>>>
|
||||
>>> parser = ann.ITfLiteParser()
|
||||
>>> ...
|
||||
>>> example_image = np.array(...)
|
||||
>>> input_binding_info = parser.GetNetworkInputBindingInfo(...)
|
||||
>>>
|
||||
>>> input_tensors = ann.make_input_tensors([input_binding_info], [example_image])
|
||||
|
||||
Args:
|
||||
inputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for input tensors obtained from
|
||||
`GetNetworkInputBindingInfo`.
|
||||
input_data (list ndarrays): Tensor data to be used for inference.
|
||||
|
||||
Returns:
|
||||
list: `inputTensors` - A list of tuples (`int` , `ConstTensor`).
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If length of `inputs_binding_info` and `input_data` are not the same.
|
||||
"""
|
||||
if len(inputs_binding_info) != len(input_data):
|
||||
raise ValueError("Length of 'inputs_binding_info' does not match length of 'input_data'")
|
||||
|
||||
input_tensors = []
|
||||
|
||||
for in_bind_info, in_data in zip(inputs_binding_info, input_data):
|
||||
in_tensor_id = in_bind_info[0]
|
||||
in_tensor_info = in_bind_info[1]
|
||||
in_tensor_info.SetConstant()
|
||||
input_tensors.append((in_tensor_id, ConstTensor(in_tensor_info, in_data)))
|
||||
|
||||
return input_tensors
|
||||
|
||||
|
||||
def make_output_tensors(outputs_binding_info: List[Tuple]) -> List[Tuple[int, Tensor]]:
|
||||
"""Returns `outputTensors` to be used with `IRuntime.EnqueueWorkload`.
|
||||
|
||||
This is the primary function to call when you want to produce `outputTensors` for `IRuntime.EnqueueWorkload`.
|
||||
The output is a list of tuples containing Tensors with a corresponding output tensor id.
|
||||
The output should be used directly with `IRuntime.EnqueueWorkload`.
|
||||
|
||||
Examples:
|
||||
Creating outputTensors.
|
||||
>>> import pyarmnn as ann
|
||||
>>>
|
||||
>>> parser = ann.ITfLiteParser()
|
||||
>>> ...
|
||||
>>> output_binding_info = parser.GetNetworkOutputBindingInfo(...)
|
||||
>>>
|
||||
>>> output_tensors = ann.make_output_tensors([output_binding_info])
|
||||
|
||||
Args:
|
||||
outputs_binding_info (list of tuples): (int, `TensorInfo`) Binding information for output tensors obtained from
|
||||
`GetNetworkOutputBindingInfo`.
|
||||
|
||||
Returns:
|
||||
list: `outputTensors` - A list of tuples (`int`, `Tensor`).
|
||||
"""
|
||||
output_tensors = []
|
||||
|
||||
for out_bind_info in outputs_binding_info:
|
||||
out_tensor_id = out_bind_info[0]
|
||||
out_tensor_info = out_bind_info[1]
|
||||
output_tensors.append((out_tensor_id, Tensor(out_tensor_info)))
|
||||
|
||||
return output_tensors
|
||||
|
||||
|
||||
def workload_tensors_to_ndarray(workload_tensors: List[Tuple[int, Union[Tensor, ConstTensor]]]) -> List[np.ndarray]:
|
||||
"""Returns a list of the underlying tensor data as ndarrays from `inputTensors` or `outputTensors`.
|
||||
|
||||
We refer to `inputTensors` and `outputTensors` as workload tensors because
|
||||
they are used with `IRuntime.EnqueueWorkload`.
|
||||
Although this function can be used on either `inputTensors` or `outputTensors` the main use of this function
|
||||
is to collect results from `outputTensors` after `IRuntime.EnqueueWorkload` has been called.
|
||||
|
||||
Examples:
|
||||
Getting results after inference.
|
||||
>>> import pyarmnn as ann
|
||||
>>>
|
||||
>>> ...
|
||||
>>> runtime = ann.IRuntime(...)
|
||||
>>> ...
|
||||
>>> runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
>>>
|
||||
>>> inference_results = workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
Args:
|
||||
workload_tensors (inputTensors or outputTensors): `inputTensors` or `outputTensors` to get data from. See
|
||||
`make_input_tensors` and `make_output_tensors`.
|
||||
|
||||
Returns:
|
||||
list: List of `ndarrays` for the underlying tensor data from given `inputTensors` or `outputTensors`.
|
||||
"""
|
||||
arrays = []
|
||||
for index, (_, tensor) in enumerate(workload_tensors):
|
||||
arrays.append(tensor.get_memory_area().reshape(list(tensor.GetShape())))
|
||||
logging.info("Workload tensor {} shape: {}".format(index, tensor.GetShape()))
|
||||
|
||||
return arrays
|
@ -0,0 +1,4 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from .profiling_helper import ProfilerData, get_profiling_data
|
@ -0,0 +1,100 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
"""
|
||||
This file contains functions relating to the use of the Arm NN profiler within PyArmNN.
|
||||
"""
|
||||
import json
|
||||
from collections import namedtuple
|
||||
|
||||
ProfilerData = namedtuple('ProfilerData', ['inference_data', 'per_workload_execution_data'])
|
||||
ProfilerData.__doc__ = """Container to hold the profiling inference data, and the profiling data per workload.
|
||||
|
||||
Contains:
|
||||
inference_data (dict): holds end-to-end inference performance data. Keys:
|
||||
'time_unit' - timer units.
|
||||
'execution_time' - list of total inference execution times for each inference run.
|
||||
per_workload_execution_data (dict): holds per operation performance data, key is a operation name
|
||||
Each operation has
|
||||
'time_unit' - timer units.
|
||||
'execution_time' - list of total execution times for each inference run.
|
||||
'backend' - backend used for this operation.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> data = get_profiling_data(profiler)
|
||||
>>> print(data)
|
||||
>>> ProfilerData(inference_data={'time_unit': 'us',
|
||||
'execution_time': [8901372.972]},
|
||||
per_workload_execution_data={'CopyMemGeneric_Execute_#3': {'time_unit': 'us',
|
||||
'execution_time': [28.941],
|
||||
'backend': 'Unknown'},
|
||||
'RefConvolution2dWorkload_Execute_#5': {'time_unit': 'us',
|
||||
'execution_time': [126838.071],
|
||||
'backend': 'CpuRef'},
|
||||
'RefDepthwiseConvolution2dWorkload_Execute_#6': {'time_unit': 'us',
|
||||
'execution_time': [49886.208],
|
||||
'backend': 'CpuRef'}
|
||||
...etc
|
||||
}
|
||||
)
|
||||
"""
|
||||
|
||||
|
||||
def get_profiling_data(profiler: 'IProfiler') -> ProfilerData:
|
||||
"""Reads IProfiler object passed in, extracts the relevant data
|
||||
and returns it in a ProfilerData container.
|
||||
|
||||
Args:
|
||||
profiler (IProfiler): The IProfiler object to be parsed.
|
||||
|
||||
Returns:
|
||||
ProfilerData: A container containing the relevant data extracted from the Profiler output.
|
||||
"""
|
||||
|
||||
top_level_dict = json.loads(profiler.as_json())
|
||||
armnn_data = top_level_dict["ArmNN"]
|
||||
#Get the inference measurements dict, this will be just one value for key starting with "inference_measurements"
|
||||
inference_measurements = [v for k, v in armnn_data.items() if k.startswith("inference_measurements_")][0]
|
||||
|
||||
#Get the execution data dict, this will be just one value for key starting with "Execute_"
|
||||
execution_data = [v for k, v in inference_measurements.items() if k.startswith("Execute_")][0]
|
||||
|
||||
workload_data = {}
|
||||
inference_data = {}
|
||||
for exec_key, exec_value in execution_data.items():
|
||||
# Check all items with a type.
|
||||
if "type" in exec_value and exec_value["type"] == "Event":
|
||||
for event_key, event_value in exec_value.items():
|
||||
if event_key.startswith("Wall clock time_#") and event_value["type"] == "Measurement":
|
||||
time_data = __get_wall_clock_times__(event_value)
|
||||
time_data["backend"] = __get_backend(exec_key)
|
||||
workload_data[exec_key] = time_data
|
||||
# This is the total inference time map
|
||||
if exec_key.startswith("Wall clock time_#") and exec_value["type"] == "Measurement":
|
||||
time_data = __get_wall_clock_times__(exec_value)
|
||||
inference_data.update(time_data)
|
||||
return ProfilerData(inference_data=inference_data, per_workload_execution_data=workload_data)
|
||||
|
||||
|
||||
def __get_wall_clock_times__(wall_clock_item):
|
||||
execution_times = wall_clock_item["raw"]
|
||||
time_data = {}
|
||||
raw_data = []
|
||||
for time in execution_times:
|
||||
raw_data.append(time)
|
||||
time_data["time_unit"] = wall_clock_item["unit"]
|
||||
time_data["execution_time"] = raw_data
|
||||
return time_data
|
||||
|
||||
|
||||
def __get_backend(exec_key):
|
||||
if "ref" in exec_key.lower():
|
||||
return "CpuRef"
|
||||
elif "neon" in exec_key.lower():
|
||||
return "CpuAcc"
|
||||
elif "cl" in exec_key.lower():
|
||||
return "GpuAcc"
|
||||
elif "ethos" in exec_key.lower():
|
||||
return "EthosNAcc"
|
||||
else:
|
||||
return "Unknown"
|
38
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/_version.py
Normal file
38
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/_version.py
Normal file
@ -0,0 +1,38 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# Copyright 2020 NXP
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
version_info = (28, 0, 0)
|
||||
|
||||
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
|
||||
|
||||
if __dev_version_env:
|
||||
__dev_version = "dev0"
|
||||
try:
|
||||
__dev_version = "dev{}".format(int(__dev_version_env))
|
||||
except ValueError:
|
||||
__dev_version = str(__dev_version_env)
|
||||
|
||||
version_info = (*version_info, __dev_version)
|
||||
|
||||
__version__ = '.'.join(str(c) for c in version_info)
|
||||
__arm_ml_version__ = '{}.{}.{}'.format(version_info[0], version_info[1], version_info[2])
|
||||
|
||||
|
||||
def check_armnn_version(installed_armnn_version: str, expected_armnn_version: str = __arm_ml_version__):
|
||||
"""Compares expected Arm NN version and Arm NN version used to build the package.
|
||||
|
||||
Args:
|
||||
installed_armnn_version (str): Arm NN version used to generate the package (e.g. 28.0.0)
|
||||
expected_armnn_version (str): Expected Arm NN version
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
expected = expected_armnn_version.split('.', 2)
|
||||
installed = installed_armnn_version.split('.', 2)
|
||||
|
||||
# only compare major and minor versions, not patch
|
||||
assert (expected[0] == installed[0]) and (expected[1] == installed[1]), \
|
||||
"Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version)
|
29
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/swig/armnn.i
Normal file
29
arch/arm/ARMnn/python/pyarmnn/src/pyarmnn/swig/armnn.i
Normal file
@ -0,0 +1,29 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%module pyarmnn
|
||||
%{
|
||||
#define SWIG_FILE_WITH_INIT
|
||||
#include "armnn/Types.hpp"
|
||||
#include "ProfilingGuid.hpp"
|
||||
%}
|
||||
|
||||
//typemap definitions and other common stuff
|
||||
%include "standard_header.i"
|
||||
|
||||
//armnn api submodules
|
||||
%include "modules/armnn_backend.i"
|
||||
%include "modules/armnn_backend_opt.i"
|
||||
%include "modules/armnn_types.i"
|
||||
%include "modules/armnn_descriptors.i"
|
||||
%include "modules/armnn_lstmparam.i"
|
||||
%include "modules/armnn_network.i"
|
||||
%include "modules/armnn_profiler.i"
|
||||
%include "modules/armnn_runtime.i"
|
||||
%include "modules/armnn_tensor.i"
|
||||
%include "modules/armnn_types_utils.i"
|
||||
|
||||
// Clear exception typemap.
|
||||
%exception;
|
||||
|
@ -0,0 +1,120 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%module pyarmnn_deserializer
|
||||
%{
|
||||
#include "armnnDeserializer/IDeserializer.hpp"
|
||||
#include "armnn/Types.hpp"
|
||||
#include "armnn/INetwork.hpp"
|
||||
#include "armnn/Exceptions.hpp"
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
%}
|
||||
|
||||
//typemap definitions and other common stuff
|
||||
%include "standard_header.i"
|
||||
|
||||
namespace std {
|
||||
%template(BindingPointInfo) pair<int, armnn::TensorInfo>;
|
||||
%template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
|
||||
%template(StringVector) vector<string>;
|
||||
}
|
||||
|
||||
namespace armnnDeserializer
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for creating a parser object using ArmNN files.
|
||||
|
||||
Parsers are used to automatically construct ArmNN graphs from model files.
|
||||
|
||||
") IDeserializer;
|
||||
%nodefaultctor IDeserializer;
|
||||
class IDeserializer
|
||||
{
|
||||
public:
|
||||
};
|
||||
|
||||
%extend IDeserializer {
|
||||
// This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
|
||||
// method for ArmNN python object that will use static factory method to do the job.
|
||||
|
||||
IDeserializer() {
|
||||
return armnnDeserializer::IDeserializer::CreateRaw();
|
||||
}
|
||||
|
||||
// The following does not replace a real destructor of the Armnn class.
|
||||
// It creates a functions that will be called when swig object goes out of the scope to clean resources.
|
||||
// so the user doesn't need to call IDeserializer::Destroy himself.
|
||||
// $self` is a pointer to extracted ArmNN IDeserializer object.
|
||||
|
||||
~IDeserializer() {
|
||||
armnnDeserializer::IDeserializer::Destroy($self);
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Create the network from a armnn binary file.
|
||||
|
||||
Args:
|
||||
graphFile (str): Path to the armnn model to be parsed.
|
||||
|
||||
Returns:
|
||||
INetwork: Parsed network.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If model file was not found.
|
||||
") CreateNetworkFromBinaryFile;
|
||||
|
||||
%newobject CreateNetworkFromBinary;
|
||||
armnn::INetwork* CreateNetworkFromBinary(const char *graphFile) {
|
||||
std::ifstream is(graphFile, std::ifstream::binary);
|
||||
if (!is.good()) {
|
||||
std::string locationString = CHECK_LOCATION().AsString();
|
||||
std::stringstream msg;
|
||||
msg << "Cannot read the file " << graphFile << locationString;
|
||||
throw armnn::FileNotFoundException(msg.str());
|
||||
}
|
||||
return $self->CreateNetworkFromBinary(is).release();
|
||||
}
|
||||
|
||||
// Make both GetNetworkInputBindingInfo and GetNetworkOutputBindingInfo return a std::pair like other parsers instead of struct.
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.
|
||||
Args:
|
||||
subgraphId (int): The layer id. Any value is acceptable since it is unused in the current implementation.
|
||||
name (str): Name of the input.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`).
|
||||
") GetNetworkInputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(unsigned int layerId, const std::string& name){
|
||||
armnnDeserializer::BindingPointInfo info = $self->GetNetworkInputBindingInfo(layerId, name);
|
||||
return std::make_pair(info.m_BindingId, info.m_TensorInfo);
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name and subgraph id.
|
||||
|
||||
Args:
|
||||
layerId (int): The layer id. Any value is acceptable since it is unused in the current implementation.
|
||||
name (str): Name of the output.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`).
|
||||
") GetNetworkOutputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(unsigned int layerId, const std::string& name){
|
||||
armnnDeserializer::BindingPointInfo info = $self->GetNetworkOutputBindingInfo(layerId, name);
|
||||
return std::make_pair(info.m_BindingId, info.m_TensorInfo);
|
||||
}
|
||||
}
|
||||
|
||||
} // end of namespace armnnDeserializer
|
||||
|
||||
// Clear exception typemap.
|
||||
%exception;
|
@ -0,0 +1,96 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%module pyarmnn_onnxparser
|
||||
%{
|
||||
#define SWIG_FILE_WITH_INIT
|
||||
#include "armnnOnnxParser/IOnnxParser.hpp"
|
||||
#include "armnn/INetwork.hpp"
|
||||
%}
|
||||
|
||||
//typemap definitions and other common stuff
|
||||
%include "standard_header.i"
|
||||
|
||||
namespace std {
|
||||
%template(BindingPointInfo) pair<int, armnn::TensorInfo>;
|
||||
%template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
|
||||
%template(StringVector) vector<string>;
|
||||
}
|
||||
|
||||
namespace armnnOnnxParser
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for creating a parser object using ONNX (https://onnx.ai/) onnx files.
|
||||
|
||||
Parsers are used to automatically construct Arm NN graphs from model files.
|
||||
|
||||
") IOnnxParser;
|
||||
|
||||
%nodefaultctor IOnnxParser;
|
||||
class IOnnxParser
|
||||
{
|
||||
public:
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
|
||||
|
||||
Args:
|
||||
name (string): Name of the input node.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`)
|
||||
") GetNetworkInputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(const std::string& name);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name.
|
||||
|
||||
Args:
|
||||
name (string): Name of the output node.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`)
|
||||
") GetNetworkOutputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(const std::string& name);
|
||||
};
|
||||
|
||||
%extend IOnnxParser {
|
||||
// This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
|
||||
// method for IOnnxParser python object that will use static factory method to do the job.
|
||||
IOnnxParser() {
|
||||
return armnnOnnxParser::IOnnxParser::CreateRaw();
|
||||
}
|
||||
|
||||
// The following does not replace a real destructor of the Armnn class.
|
||||
// It creates a functions that will be called when swig object goes out of the scope to clean resources.
|
||||
// so the user doesn't need to call IOnnxParser::Destroy himself.
|
||||
// $self` is a pointer to extracted ArmNN IOnnxParser object.
|
||||
~IOnnxParser() {
|
||||
armnnOnnxParser::IOnnxParser::Destroy($self);
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Create the network from a binary file on disk.
|
||||
|
||||
Args:
|
||||
graphFile (str): Path to the onnx model to be parsed.
|
||||
|
||||
Returns:
|
||||
INetwork: Parsed network.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If model file was not found.
|
||||
") CreateNetworkFromBinaryFile;
|
||||
%newobject CreateNetworkFromBinaryFile;
|
||||
armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) {
|
||||
return $self->CreateNetworkFromBinaryFile(graphFile).release();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Clear exception typemap.
|
||||
%exception;
|
@ -0,0 +1,156 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%module pyarmnn_tfliteparser
|
||||
%{
|
||||
#include "armnnTfLiteParser/ITfLiteParser.hpp"
|
||||
#include "armnn/Types.hpp"
|
||||
#include "armnn/INetwork.hpp"
|
||||
%}
|
||||
|
||||
//typemap definitions and other common stuff
|
||||
%include "standard_header.i"
|
||||
|
||||
namespace std {
|
||||
%template(BindingPointInfo) pair<int, armnn::TensorInfo>;
|
||||
%template(MapStringTensorShape) map<std::string, armnn::TensorShape>;
|
||||
%template(StringVector) vector<string>;
|
||||
}
|
||||
|
||||
namespace armnnTfLiteParser
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for creating a parser object using TfLite (https://www.tensorflow.org/lite) tflite files.
|
||||
|
||||
Parsers are used to automatically construct Arm NN graphs from model files.
|
||||
|
||||
") ITfLiteParser;
|
||||
%nodefaultctor ITfLiteParser;
|
||||
class ITfLiteParser
|
||||
{
|
||||
public:
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.
|
||||
Args:
|
||||
subgraphId (int): The subgraph id.
|
||||
name (str): Name of the input.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`).
|
||||
") GetNetworkInputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkInputBindingInfo(size_t subgraphId, const std::string& name);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Retrieve binding info (layer id and `TensorInfo`) for the network output identified by the given layer name and subgraph id.
|
||||
|
||||
Args:
|
||||
subgraphId (int): The subgraph id.
|
||||
name (str): Name of the output.
|
||||
|
||||
Returns:
|
||||
tuple: (`int`, `TensorInfo`).
|
||||
") GetNetworkOutputBindingInfo;
|
||||
std::pair<int, armnn::TensorInfo> GetNetworkOutputBindingInfo(size_t subgraphId, const std::string& name);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Return the number of subgraphs in the parsed model.
|
||||
Returns:
|
||||
int: The number of subgraphs.
|
||||
") GetSubgraphCount;
|
||||
size_t GetSubgraphCount();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Return the input tensor names for a given subgraph.
|
||||
|
||||
Args:
|
||||
subgraphId (int): The subgraph id.
|
||||
|
||||
Returns:
|
||||
list: A list of the input tensor names for the given model.
|
||||
") GetSubgraphInputTensorNames;
|
||||
std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Return the output tensor names for a given subgraph.
|
||||
|
||||
Args:
|
||||
subgraphId (int): The subgraph id
|
||||
|
||||
Returns:
|
||||
list: A list of the output tensor names for the given model.
|
||||
") GetSubgraphOutputTensorNames;
|
||||
std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId);
|
||||
|
||||
%feature("flatnested");
|
||||
%feature("docstring",
|
||||
"
|
||||
Options for TfLiteParser.
|
||||
|
||||
Contains:
|
||||
m_StandInLayerForUnsupported (bool): Add StandInLayers as placeholders for unsupported operators.
|
||||
Default: False
|
||||
m_InferAndValidate (bool): Infer output shape of operations based on their input shape. Default: False
|
||||
")TfLiteParserOptions;
|
||||
struct TfLiteParserOptions
|
||||
{
|
||||
TfLiteParserOptions();
|
||||
|
||||
bool m_StandInLayerForUnsupported;
|
||||
bool m_InferAndValidate;
|
||||
};
|
||||
};
|
||||
|
||||
%extend ITfLiteParser {
|
||||
// This is not a substitution of the default constructor of the Armnn class. It tells swig to create custom __init__
|
||||
// method for ITfLiteParser python object that will use static factory method to do the job.
|
||||
|
||||
ITfLiteParser(const armnnTfLiteParser::ITfLiteParser::TfLiteParserOptions* options = nullptr) {
|
||||
if (options) {
|
||||
return armnnTfLiteParser::ITfLiteParser::CreateRaw(
|
||||
armnn::Optional<armnnTfLiteParser::ITfLiteParser::TfLiteParserOptions>(*options));
|
||||
} else {
|
||||
return armnnTfLiteParser::ITfLiteParser::CreateRaw();
|
||||
}
|
||||
}
|
||||
|
||||
// The following does not replace a real destructor of the Armnn class.
|
||||
// It creates a functions that will be called when swig object goes out of the scope to clean resources.
|
||||
// so the user doesn't need to call ITfLiteParser::Destroy himself.
|
||||
// $self` is a pointer to extracted ArmNN ITfLiteParser object.
|
||||
|
||||
~ITfLiteParser() {
|
||||
armnnTfLiteParser::ITfLiteParser::Destroy($self);
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Create the network from a flatbuffers binary file.
|
||||
|
||||
Args:
|
||||
graphFile (str): Path to the tflite model to be parsed.
|
||||
|
||||
Returns:
|
||||
INetwork: Parsed network.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If model file was not found.
|
||||
") CreateNetworkFromBinaryFile;
|
||||
|
||||
%newobject CreateNetworkFromBinaryFile;
|
||||
armnn::INetwork* CreateNetworkFromBinaryFile(const char* graphFile) {
|
||||
return $self->CreateNetworkFromBinaryFile(graphFile).release();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} // end of namespace armnnTfLiteParser
|
||||
|
||||
// Clear exception typemap.
|
||||
%exception;
|
@ -0,0 +1,58 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%module pyarmnn_version
|
||||
|
||||
%include "std_string.i"
|
||||
|
||||
%{
|
||||
#define SWIG_FILE_WITH_INIT
|
||||
#include "armnn/Version.hpp"
|
||||
%}
|
||||
|
||||
%{
|
||||
std::string GetVersion()
|
||||
{
|
||||
return ARMNN_VERSION;
|
||||
};
|
||||
|
||||
std::string GetMajorVersion()
|
||||
{
|
||||
return STRINGIFY_VALUE(ARMNN_MAJOR_VERSION);
|
||||
};
|
||||
|
||||
std::string GetMinorVersion()
|
||||
{
|
||||
return STRINGIFY_VALUE(ARMNN_MINOR_VERSION);
|
||||
};
|
||||
%}
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns Arm NN library full version: MAJOR + MINOR + INCREMENTAL.
|
||||
|
||||
Returns:
|
||||
str: Full version of Arm NN installed.
|
||||
|
||||
") GetVersion;
|
||||
std::string GetVersion();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns Arm NN library major version.
|
||||
|
||||
Returns:
|
||||
str: Major version of Arm NN installed.
|
||||
|
||||
") GetMajorVersion;
|
||||
std::string GetMajorVersion();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns Arm NN library minor version.
|
||||
|
||||
Returns:
|
||||
str: Minor version of Arm NN installed.
|
||||
|
||||
") GetMinorVersion;
|
||||
std::string GetMinorVersion();
|
@ -0,0 +1,65 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/BackendId.hpp"
|
||||
%}
|
||||
|
||||
namespace std {
|
||||
%template(BackendIdVector) vector<armnn::BackendId>;
|
||||
%template(BackendIdSet) unordered_set<armnn::BackendId>;
|
||||
}
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
class BackendId
|
||||
{
|
||||
public:
|
||||
%feature("docstring",
|
||||
"
|
||||
Creates backend id instance.
|
||||
Supported backend ids: 'CpuRef', 'CpuAcc', 'GpuAcc', 'EthosNAcc'.
|
||||
|
||||
Args:
|
||||
id (str): Computation backend identification.
|
||||
") BackendId;
|
||||
|
||||
BackendId(const std::string& id);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Checks if backend is cpu reference implementation.
|
||||
Returns:
|
||||
bool: True if backend supports cpu reference implementation, False otherwise.
|
||||
|
||||
") IsCpuRef;
|
||||
bool IsCpuRef();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns backend identification.
|
||||
|
||||
>>> backendId = BackendId('CpuRef')
|
||||
>>> assert 'CpuRef' == str(backendId)
|
||||
>>> assert 'CpuRef' == backendId.Get()
|
||||
|
||||
Returns:
|
||||
str: Backend identification.
|
||||
|
||||
") Get;
|
||||
const std::string& Get();
|
||||
};
|
||||
|
||||
%extend BackendId {
|
||||
|
||||
std::string __str__() {
|
||||
return $self->Get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
using BackendIdVector = std::vector<armnn::BackendId>;
|
||||
using BackendIdSet = std::unordered_set<armnn::BackendId>;
|
||||
}
|
@ -0,0 +1,103 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/BackendId.hpp"
|
||||
#include "armnn/BackendOptions.hpp"
|
||||
%}
|
||||
|
||||
#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS
|
||||
|
||||
%{
|
||||
typedef armnn::BackendOptions::BackendOption BackendOption;
|
||||
%}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Struct for the users to pass backend specific option.
|
||||
") BackendOption;
|
||||
%nodefaultctor BackendOption;
|
||||
struct BackendOption
|
||||
{
|
||||
BackendOption(std::string name, bool value);
|
||||
BackendOption(std::string name, int value);
|
||||
BackendOption(std::string name, unsigned int value);
|
||||
BackendOption(std::string name, float value);
|
||||
BackendOption(std::string name, std::string value);
|
||||
|
||||
std::string GetName();
|
||||
};
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Struct for backend specific options, see `BackendOption`.
|
||||
Options are assigned to a specific backend by providing a backend id.
|
||||
|
||||
") BackendOptions;
|
||||
%nodefaultctor BackendOptions;
|
||||
struct BackendOptions
|
||||
{
|
||||
BackendOptions(BackendId backend);
|
||||
|
||||
BackendOptions(const BackendOptions& other);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Add backend option.
|
||||
|
||||
Args:
|
||||
option (`BackendOption`): backend option
|
||||
") AddOption;
|
||||
void AddOption(const BackendOption& option);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get a backend id.
|
||||
|
||||
Returns:
|
||||
BackendId: assigned backend id.
|
||||
") GetBackendId;
|
||||
const BackendId& GetBackendId();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get backend options count.
|
||||
|
||||
Returns:
|
||||
int: number of options for a backend.
|
||||
") GetOptionCount;
|
||||
size_t GetOptionCount();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get backend option by index.
|
||||
|
||||
Args:
|
||||
idx (int): backend option index
|
||||
|
||||
Returns:
|
||||
BackendOption: backend option.
|
||||
") GetOption;
|
||||
const BackendOption& GetOption(size_t idx);
|
||||
|
||||
%pythoncode %{
|
||||
def __iter__(self):
|
||||
for count in range(self.GetOptionCount()):
|
||||
yield self[count]
|
||||
%}
|
||||
};
|
||||
|
||||
%extend BackendOptions {
|
||||
|
||||
const BackendOption& __getitem__(size_t i) const {
|
||||
return $self->GetOption(i);
|
||||
}
|
||||
|
||||
size_t __len__() const {
|
||||
return $self->GetOptionCount();
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,159 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/LstmParams.hpp"
|
||||
#include "armnn/QuantizedLstmParams.hpp"
|
||||
%}
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Long Short-Term Memory layer input parameters.
|
||||
|
||||
See `INetwork.AddLstmLayer()`.
|
||||
Operation described by the following equations:
|
||||
|
||||
\[i_t=\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) \\\\
|
||||
f_t=\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) \\\\
|
||||
C_t=clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) \\\\
|
||||
o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) \\\\
|
||||
h_t = clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})\ if\ there\ is\ a\ projection; \\\\
|
||||
h_t = o_t \odot g(C_t)\ otherwise. \]
|
||||
Where:
|
||||
\(x_t\) - input;
|
||||
\(i_t\) - input gate;
|
||||
\(f_t\) - forget gate;
|
||||
\(C_t\) - cell state;
|
||||
\(o_t\) - output;
|
||||
\(h_t\) - output state;
|
||||
\(\sigma\) - logistic sigmoid function;
|
||||
\(g\) - cell input and cell output activation function, see `LstmDescriptor.m_ActivationFunc`;
|
||||
\(t_{cell}\) - threshold for clipping the cell state, see `LstmDescriptor.m_ClippingThresCell`;
|
||||
\(t_{proj}\) - threshold for clipping the projected output, see `LstmDescriptor.m_ClippingThresProj`;
|
||||
|
||||
Contains:
|
||||
m_InputToInputWeights (ConstTensor): \(W_{xi}\), input-to-input weight matrix.
|
||||
m_InputToForgetWeights (ConstTensor): \(W_{xf}\), input-to-forget weight matrix.
|
||||
m_InputToCellWeights (ConstTensor): \(W_{xc}\), input-to-cell weight matrix.
|
||||
m_InputToOutputWeights (ConstTensor): \(W_{xo}\), input-to-output weight matrix.
|
||||
|
||||
m_RecurrentToInputWeights (ConstTensor): \(W_{hi}\), recurrent-to-input weight matrix.
|
||||
m_RecurrentToForgetWeights (ConstTensor): \(W_{hf}\), recurrent-to-forget weight matrix.
|
||||
m_RecurrentToCellWeights (ConstTensor): \(W_{hc}\), recurrent-to-cell weight matrix.
|
||||
m_RecurrentToOutputWeights (ConstTensor): \(W_{ho}\), recurrent-to-output weight matrix.
|
||||
|
||||
m_CellToInputWeights (ConstTensor): \(W_{ci}\), cell-to-input weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
|
||||
m_CellToForgetWeights (ConstTensor): \(W_{cf}\), cell-to-forget weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
|
||||
m_CellToOutputWeights (ConstTensor): \(W_{co}\), cell-to-output weight matrix. Has effect if `LstmDescriptor.m_PeepholeEnabled`.
|
||||
|
||||
m_InputGateBias (ConstTensor): \(b_i\), input gate bias.
|
||||
m_ForgetGateBias (ConstTensor): \(b_f\), forget gate bias.
|
||||
m_CellBias (ConstTensor): \(b_c\), cell bias.
|
||||
m_OutputGateBias (ConstTensor): \(b_o\), output gate bias.
|
||||
|
||||
m_ProjectionWeights (ConstTensor): \(W_{proj}\), projection weight matrix.
|
||||
Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True.
|
||||
m_ProjectionBias (ConstTensor): \(b_{proj}\), projection bias.
|
||||
Has effect if `LstmDescriptor.m_ProjectionEnabled` is set to True.
|
||||
m_InputLayerNormWeights (ConstTensor): normalisation weights for input,
|
||||
has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
|
||||
m_ForgetLayerNormWeights (ConstTensor): normalisation weights for forget gate,
|
||||
has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
|
||||
m_CellLayerNormWeights (ConstTensor): normalisation weights for current cell,
|
||||
has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
|
||||
m_OutputLayerNormWeights (ConstTensor): normalisation weights for output gate,
|
||||
has effect if `LstmDescriptor.m_LayerNormEnabled` set to True.
|
||||
|
||||
") LstmInputParams;
|
||||
struct LstmInputParams
|
||||
{
|
||||
LstmInputParams();
|
||||
|
||||
const armnn::ConstTensor* m_InputToInputWeights;
|
||||
const armnn::ConstTensor* m_InputToForgetWeights;
|
||||
const armnn::ConstTensor* m_InputToCellWeights;
|
||||
const armnn::ConstTensor* m_InputToOutputWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToInputWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToForgetWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToCellWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToOutputWeights;
|
||||
const armnn::ConstTensor* m_CellToInputWeights;
|
||||
const armnn::ConstTensor* m_CellToForgetWeights;
|
||||
const armnn::ConstTensor* m_CellToOutputWeights;
|
||||
const armnn::ConstTensor* m_InputGateBias;
|
||||
const armnn::ConstTensor* m_ForgetGateBias;
|
||||
const armnn::ConstTensor* m_CellBias;
|
||||
const armnn::ConstTensor* m_OutputGateBias;
|
||||
const armnn::ConstTensor* m_ProjectionWeights;
|
||||
const armnn::ConstTensor* m_ProjectionBias;
|
||||
const armnn::ConstTensor* m_InputLayerNormWeights;
|
||||
const armnn::ConstTensor* m_ForgetLayerNormWeights;
|
||||
const armnn::ConstTensor* m_CellLayerNormWeights;
|
||||
const armnn::ConstTensor* m_OutputLayerNormWeights;
|
||||
};
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Quantized Long Short-Term Memory layer input parameters.
|
||||
|
||||
See `INetwork.AddQuantizedLstmLayer()`.
|
||||
Operation described by the following equations:
|
||||
|
||||
\[i_t=\sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) \\\\
|
||||
f_t=\sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) \\\\
|
||||
C_t=clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) \\\\
|
||||
o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) \\\\
|
||||
h_t = clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})\ if\ there\ is\ a\ projection; \\\\
|
||||
h_t = o_t \odot g(C_t)\ otherwise. \]
|
||||
Where:
|
||||
\(x_t\) - input;
|
||||
\(i_t\) - input gate;
|
||||
\(f_t\) - forget gate;
|
||||
\(C_t\) - cell state;
|
||||
\(o_t\) - output;
|
||||
\(h_t\) - output state;
|
||||
\(\sigma\) - logistic sigmoid function;
|
||||
\(g\) - cell input and cell output activation function, see `LstmDescriptor.m_ActivationFunc`;
|
||||
\(t_{cell}\) - threshold for clipping the cell state, see `LstmDescriptor.m_ClippingThresCell`;
|
||||
\(t_{proj}\) - threshold for clipping the projected output, see `LstmDescriptor.m_ClippingThresProj`;
|
||||
|
||||
Contains:
|
||||
m_InputToInputWeights (ConstTensor): \(W_{xi}\), input-to-input weight matrix.
|
||||
m_InputToForgetWeights (ConstTensor): \(W_{xf}\), input-to-forget weight matrix.
|
||||
m_InputToCellWeights (ConstTensor): \(W_{xc}\), input-to-cell weight matrix.
|
||||
m_InputToOutputWeights (ConstTensor): \(W_{xo}\), input-to-output weight matrix.
|
||||
|
||||
m_RecurrentToInputWeights (ConstTensor): \(W_{hi}\), recurrent-to-input weight matrix.
|
||||
m_RecurrentToForgetWeights (ConstTensor): \(W_{hf}\), recurrent-to-forget weight matrix.
|
||||
m_RecurrentToCellWeights (ConstTensor): \(W_{hc}\), recurrent-to-cell weight matrix.
|
||||
m_RecurrentToOutputWeights (ConstTensor): \(W_{ho}\), recurrent-to-output weight matrix.
|
||||
|
||||
m_InputGateBias (ConstTensor): \(b_i\), input gate bias.
|
||||
m_ForgetGateBias (ConstTensor): \(b_f\), forget gate bias.
|
||||
m_CellBias (ConstTensor): \(b_c\), cell bias.
|
||||
m_OutputGateBias (ConstTensor): \(b_o\), output gate bias.
|
||||
") QuantizedLstmInputParams;
|
||||
struct QuantizedLstmInputParams
|
||||
{
|
||||
QuantizedLstmInputParams();
|
||||
|
||||
const armnn::ConstTensor* m_InputToInputWeights;
|
||||
const armnn::ConstTensor* m_InputToForgetWeights;
|
||||
const armnn::ConstTensor* m_InputToCellWeights;
|
||||
const armnn::ConstTensor* m_InputToOutputWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToInputWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToForgetWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToCellWeights;
|
||||
const armnn::ConstTensor* m_RecurrentToOutputWeights;
|
||||
const armnn::ConstTensor* m_InputGateBias;
|
||||
const armnn::ConstTensor* m_ForgetGateBias;
|
||||
const armnn::ConstTensor* m_CellBias;
|
||||
const armnn::ConstTensor* m_OutputGateBias;
|
||||
};
|
||||
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,82 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/IProfiler.hpp"
|
||||
%}
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for profiling Arm NN. See `IRuntime.GetProfiler`.
|
||||
|
||||
IProfiler object allows you to enable profiling and get various profiling results.
|
||||
|
||||
") IProfiler;
|
||||
%nodefaultctor IProfiler;
|
||||
%nodefaultdtor IProfiler;
|
||||
class IProfiler
|
||||
{
|
||||
public:
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Sets the profiler to start/stop profiling.
|
||||
|
||||
Args:
|
||||
enableProfiling (bool): Flag to enable/disable profiling.
|
||||
|
||||
") EnableProfiling;
|
||||
|
||||
void EnableProfiling(bool enableProfiling);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Checks if profiling is enabled.
|
||||
|
||||
Returns:
|
||||
bool: If profiling is enabled or not.
|
||||
|
||||
") IsProfilingEnabled;
|
||||
|
||||
bool IsProfilingEnabled();
|
||||
};
|
||||
|
||||
%extend IProfiler {
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Gets the string value of the profiling events analysis log.
|
||||
|
||||
Returns:
|
||||
str: The profiling events analysis log.
|
||||
|
||||
") event_log;
|
||||
|
||||
std::string event_log()
|
||||
{
|
||||
std::ostringstream oss;
|
||||
$self->AnalyzeEventsAndWriteResults(oss);
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Gets the profiling log as the JSON string.
|
||||
|
||||
Returns:
|
||||
str: Profiling log as JSON formatted string.
|
||||
|
||||
") as_json;
|
||||
|
||||
std::string as_json()
|
||||
{
|
||||
std::ostringstream oss;
|
||||
$self->Print(oss);
|
||||
return oss.str();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,316 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/IRuntime.hpp"
|
||||
#include "armnn/Deprecated.hpp"
|
||||
#include <iostream>
|
||||
#include <ostream>
|
||||
#include <sstream>
|
||||
%}
|
||||
|
||||
namespace std {
|
||||
%template() pair<int, string>;
|
||||
%template(IntPair) pair<int, int>;
|
||||
%template(ConstTensorPair) pair<int, armnn::ConstTensor>;
|
||||
%template(TensorPair) pair<int, armnn::Tensor>;
|
||||
|
||||
%template(InputTensorsVector) vector<pair<int, armnn::ConstTensor>>;
|
||||
%template(OutputTensorsVector) vector<pair<int, armnn::Tensor>>;
|
||||
}
|
||||
|
||||
%include <std_shared_ptr.i>
|
||||
|
||||
%shared_ptr(IGpuAccTunedParameters);
|
||||
|
||||
#pragma SWIG nowarn=SWIGWARN_PARSE_NESTED_CLASS
|
||||
|
||||
%{
|
||||
typedef armnn::IRuntime::CreationOptions::ExternalProfilingOptions ExternalProfilingOptions;
|
||||
%}
|
||||
|
||||
struct ExternalProfilingOptions
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Structure for holding ExternalProfiling options.
|
||||
|
||||
Contains:
|
||||
m_EnableProfiling (bool): If set enables profiling in armnn
|
||||
|
||||
m_OutgoingCaptureFile (string): If specified the outgoing external profiling packets will be captured
|
||||
in this file, in the specified format
|
||||
|
||||
m_IncomingCaptureFile (string): If specified the incoming external profiling packets will be
|
||||
captured in this file
|
||||
|
||||
m_FileOnly (bool): If enabled, then the 'file-only' test mode of external profiling will be enabled
|
||||
|
||||
m_CapturePeriod (uint32_t): If profiling is enabled in 'file-only' mode this is the
|
||||
capture period that will be used in the test
|
||||
|
||||
m_FileFormat (string): If profiling is enabled, this specifies the output file format
|
||||
|
||||
m_TimelineEnabled: Set if timeline reporting is enabled or not
|
||||
|
||||
") ExternalProfilingOptions;
|
||||
|
||||
ExternalProfilingOptions();
|
||||
bool m_EnableProfiling;
|
||||
std::string m_OutgoingCaptureFile;
|
||||
std::string m_IncomingCaptureFile;
|
||||
bool m_FileOnly;
|
||||
uint32_t m_CapturePeriod;
|
||||
std::string m_FileFormat;
|
||||
bool m_TimelineEnabled;
|
||||
};
|
||||
|
||||
%{
|
||||
typedef armnn::IRuntime::CreationOptions CreationOptions;
|
||||
%}
|
||||
|
||||
struct CreationOptions
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Structure for holding creation options. For majority of cases it is fine to leave values at default.
|
||||
|
||||
Contains:
|
||||
m_GpuAccTunedParameters (IGpuAccTunedParameters): If set, uses the GpuAcc tuned parameters from the given object
|
||||
when executing GPU workloads. It will also be updated with new
|
||||
tuned parameters if it is configured to do so.
|
||||
|
||||
m_EnableGpuProfiling (bool): Setting this flag will allow the user to obtain GPU profiling information from
|
||||
the runtime.
|
||||
|
||||
m_DynamicBackendsPath (string): Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS
|
||||
compiler directive. Only a single path is allowed for the override.
|
||||
|
||||
m_ProfilingOptions (ExternalProfilingOptions): Struct to set the profiling options
|
||||
|
||||
") CreationOptions;
|
||||
|
||||
CreationOptions();
|
||||
std::shared_ptr<armnn::IGpuAccTunedParameters> m_GpuAccTunedParameters;
|
||||
bool m_EnableGpuProfiling;
|
||||
std::string m_DynamicBackendsPath;
|
||||
ExternalProfilingOptions m_ProfilingOptions;
|
||||
};
|
||||
|
||||
%{
|
||||
typedef armnn::INetworkProperties INetworkProperties;
|
||||
%}
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
%nodefaultctor INetworkProperties;
|
||||
struct INetworkProperties
|
||||
{
|
||||
%feature("docstring",
|
||||
"
|
||||
Structure for holding network properties.
|
||||
|
||||
Contains:
|
||||
m_AsyncEnabled (bool): Enable asynchronous execution of multiple network.
|
||||
m_InputSource (MemorySource): When inputs are imported this defines the type of the imported memory.
|
||||
m_OutputSource (MemorySource): When outputs are imported this defines the type of the imported memory.
|
||||
m_ProfilingEnabled (bool): Enable profiling.
|
||||
ProfilingDetailsMethod (ProfilingDetailsMethod): Customize profiling details.
|
||||
|
||||
") INetworkProperties;
|
||||
INetworkProperties(bool asyncEnabled,
|
||||
MemorySource inputSource,
|
||||
MemorySource outputSource,
|
||||
bool profilingEnabled = false,
|
||||
ProfilingDetailsMethod detailsMethod = ProfilingDetailsMethod::Undefined);
|
||||
|
||||
|
||||
const bool m_AsyncEnabled;
|
||||
|
||||
const bool m_ProfilingEnabled;
|
||||
|
||||
const ProfilingDetailsMethod m_OutputNetworkDetailsMethod;
|
||||
|
||||
const MemorySource m_InputSource;
|
||||
const MemorySource m_OutputSource;
|
||||
};
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for runtime objects.
|
||||
|
||||
Runtime objects are responsible for performing inference on an `IOptimizedNetwork`.
|
||||
|
||||
Args:
|
||||
options (CreationOptions): CreationOptions data struct.
|
||||
|
||||
") IRuntime;
|
||||
%nodefaultctor IRuntime;
|
||||
class IRuntime
|
||||
{
|
||||
public:
|
||||
|
||||
%ignore
|
||||
armnn::IRuntime::UnloadNetwork(NetworkId networkId);
|
||||
|
||||
%ignore
|
||||
armnn::IRuntime::EnqueueWorkload(NetworkId networkId,
|
||||
const std::vector<std::pair<int, armnn::ConstTensor>>& inputTensors,
|
||||
const std::vector<std::pair<int, armnn::Tensor>>& outputTensors);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get information relating to networks input tensor.
|
||||
|
||||
Args:
|
||||
networkId (int): Unique ID of the network being run.
|
||||
layerId (int): Unique ID of the input layer.
|
||||
|
||||
Returns:
|
||||
TensorInfo: Information relating to the input tensor a network.
|
||||
") GetInputTensorInfo;
|
||||
armnn::TensorInfo GetInputTensorInfo(int networkId, int layerId);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get information relating to networks output tensor.
|
||||
|
||||
Args:
|
||||
networkId (int): Unique ID of the network being run.
|
||||
layerId (int): Unique ID of the output layer.
|
||||
|
||||
Returns:
|
||||
TensorInfo: Information relating to the output tensor a network.
|
||||
") GetOutputTensorInfo;
|
||||
armnn::TensorInfo GetOutputTensorInfo(int networkId, int layerId);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get information relating supported compute backends on current device.
|
||||
|
||||
Returns:
|
||||
IDeviceSpec: Device spec information detailing all supported backends on current platform.
|
||||
") GetDeviceSpec;
|
||||
const IDeviceSpec& GetDeviceSpec();
|
||||
};
|
||||
|
||||
%extend IRuntime {
|
||||
//tell python to disown the IOptimizedNetwork pointer
|
||||
//because IRuntime takes ownership
|
||||
%typemap(in) armnn::IOptimizedNetwork* {
|
||||
if (!SWIG_IsOK(SWIG_ConvertPtr($input, (void **) &$1, $1_descriptor, SWIG_POINTER_DISOWN))) {
|
||||
SWIG_exception_fail(SWIG_TypeError, "in method '$symname', argument 2 of type armnn::IOptimizedNetwork*");
|
||||
}
|
||||
}
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Loads a complete network into the IRuntime.
|
||||
The runtime takes ownership of the network once passed in.
|
||||
Args:
|
||||
network (IOptimizedNetwork): An optimized network to load into the IRuntime.
|
||||
networkProperties (INetworkProperties): Properties that allows the user to opt-in to import/export behavior. Default: None.
|
||||
Returns:
|
||||
tuple: (int, str) Network id and non fatal failure or warning messsages.
|
||||
Raises:
|
||||
RuntimeError: If process fails.
|
||||
") LoadNetwork;
|
||||
|
||||
std::pair<int, std::string> LoadNetwork(armnn::IOptimizedNetwork* network,
|
||||
const INetworkProperties* networkProperties = nullptr)
|
||||
{
|
||||
armnn::IOptimizedNetworkPtr netPtr(network, &armnn::IOptimizedNetwork::Destroy);
|
||||
armnn::NetworkId networkIdOut;
|
||||
std::string errorString;
|
||||
armnn::Status status;
|
||||
|
||||
if (networkProperties) {
|
||||
status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString, *networkProperties);
|
||||
} else {
|
||||
status = $self->LoadNetwork(networkIdOut, std::move(netPtr), errorString);
|
||||
}
|
||||
|
||||
if(status == armnn::Status::Failure)
|
||||
{
|
||||
throw armnn::Exception(errorString);
|
||||
}
|
||||
|
||||
auto net_id_int = static_cast<int>(networkIdOut);
|
||||
return std::make_pair(net_id_int, errorString);
|
||||
};
|
||||
|
||||
%typemap(in) armnn::IOptimizedNetwork*;
|
||||
%feature("docstring",
|
||||
"
|
||||
Calling this function will perform an inference on your network.
|
||||
|
||||
Args:
|
||||
networkId (int): Unique ID of the network to run.
|
||||
inputTensors (list): A list of tuples (int, `ConstTensor`), see `make_input_tensors`.
|
||||
outputTensors (list): A list of tuples (int, `Tensor`), see `make_output_tensors`.
|
||||
|
||||
") EnqueueWorkload;
|
||||
void EnqueueWorkload(int networkId, const std::vector<std::pair<int, armnn::ConstTensor>>& inputTensors,
|
||||
const std::vector<std::pair<int, armnn::Tensor>>& outputTensors) {
|
||||
armnn::Status status = $self->EnqueueWorkload(networkId, inputTensors, outputTensors);
|
||||
|
||||
if(status == armnn::Status::Failure)
|
||||
{
|
||||
throw armnn::Exception("Failed to enqueue workload for network.");
|
||||
}
|
||||
};
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Unload a currently loaded network from the runtime.
|
||||
|
||||
Args:
|
||||
networkId (int): Unique ID of the network to unload.
|
||||
|
||||
") UnloadNetwork;
|
||||
void UnloadNetwork(int networkId) {
|
||||
armnn::Status status = $self->UnloadNetwork(networkId);
|
||||
if(status == armnn::Status::Failure)
|
||||
{
|
||||
throw armnn::Exception("Failed to unload network.");
|
||||
}
|
||||
};
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the IProfiler instance registered against the working thread, and stored on the loaded network.
|
||||
Be aware that if the runtime has unloaded the network, or if the runtime is destroyed,
|
||||
that the IProfiler instance will also be destroyed, and will cause a segmentation fault.
|
||||
|
||||
Args:
|
||||
networkId (int): The ID of the loaded network you want to profile.
|
||||
|
||||
Returns:
|
||||
IProfiler: IProfiler instance the given loaded network has stored.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If no profiler is found.
|
||||
") GetProfiler;
|
||||
|
||||
armnn::IProfiler* GetProfiler(int networkId) {
|
||||
std::shared_ptr<armnn::IProfiler> profiler = $self->GetProfiler(networkId);
|
||||
if (nullptr == profiler) {
|
||||
throw armnn::Exception("Failed to get profiler");
|
||||
}
|
||||
return profiler.get();
|
||||
};
|
||||
|
||||
~IRuntime() {
|
||||
armnn::IRuntime::Destroy($self);
|
||||
}
|
||||
|
||||
IRuntime(const CreationOptions& options) {
|
||||
return armnn::IRuntime::CreateRaw(options);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,355 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/Tensor.hpp"
|
||||
%}
|
||||
|
||||
%include <typemaps/tensor_memory.i>
|
||||
%include <typemaps/tensor_shape.i>
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Class for holding the shape information of an Arm NN tensor.
|
||||
|
||||
This class is iterable. You can iterate over it to get each value of the Tensor shape.
|
||||
|
||||
Examples:
|
||||
Obtain tensor shape information as a list.
|
||||
>>> import pyarmnn as ann
|
||||
>>> import numpy as np
|
||||
>>>
|
||||
>>> tensor_info = ann.TensorInfo(ann.TensorShape((4, 2, 1, 3)), ann.DataType_Float32)
|
||||
>>> tensor = ann.ConstTensor(tensor_info, np.ones([4, 2, 1, 3], dtype=np.float32))
|
||||
>>> print(list(tensor.GetShape()))
|
||||
[4, 2, 1, 3]
|
||||
|
||||
") TensorShape;
|
||||
class TensorShape
|
||||
{
|
||||
// Make TensorShape iterable so we can return shape dims easily.
|
||||
%pythoncode %{
|
||||
def __iter__(self):
|
||||
for dim in range(self.GetNumDimensions()):
|
||||
yield self[dim]
|
||||
%}
|
||||
|
||||
|
||||
public:
|
||||
%tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes);
|
||||
TensorShape(unsigned int numDimensions, const unsigned int* dimensionSizes);
|
||||
%clear_tensor_shape_typemap(unsigned int numDimensions, const unsigned int* dimensionSizes);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the number of dimensions in this TensorShape.
|
||||
|
||||
Returns:
|
||||
int: The number of dimensions in this TensorShape.
|
||||
|
||||
") GetNumDimensions;
|
||||
unsigned int GetNumDimensions() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the total number of elements for a tensor with this TensorShape.
|
||||
|
||||
Returns:
|
||||
int: The total number of elements for a tensor with this TensorShape.
|
||||
|
||||
") GetNumElements;
|
||||
unsigned int GetNumElements() const;
|
||||
|
||||
};
|
||||
|
||||
%extend TensorShape {
|
||||
|
||||
unsigned int __getitem__(unsigned int i) const {
|
||||
return $self->operator[](i);
|
||||
}
|
||||
void __setitem__(unsigned int i, unsigned int val) {
|
||||
$self->operator[](i) = val;
|
||||
}
|
||||
|
||||
std::string __str__() {
|
||||
std::string dim = "NumDimensions: " + std::to_string($self->GetNumDimensions());
|
||||
std::string elm = "NumElements: " + std::to_string($self->GetNumElements());
|
||||
|
||||
std::string shapeStr = "TensorShape{Shape(";
|
||||
|
||||
auto numDimensions = $self->GetNumDimensions();
|
||||
auto sizeDims = $self->GetNumDimensions();
|
||||
for (unsigned int i = 0; i < numDimensions; i++) {
|
||||
shapeStr += std::to_string($self->operator[](i));
|
||||
|
||||
if (sizeDims - 1 > 0) {
|
||||
shapeStr += ", ";
|
||||
}
|
||||
sizeDims--;
|
||||
}
|
||||
shapeStr = shapeStr + "), " + dim + ", " + elm + "}";
|
||||
return shapeStr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Class for holding the tensor information of an Arm NN tensor such as quantization, datatype, shape etc.
|
||||
|
||||
") TensorInfo;
|
||||
class TensorInfo
|
||||
{
|
||||
public:
|
||||
TensorInfo();
|
||||
|
||||
TensorInfo(const TensorInfo& other);
|
||||
|
||||
TensorInfo(const TensorShape& shape, DataType dataType,
|
||||
float quantizationScale = 0.0f, int32_t quantizationOffset = 0,
|
||||
bool isConstant = False);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the tensor shape.
|
||||
|
||||
Return:
|
||||
TensorShape: Current shape of the tensor.
|
||||
|
||||
") GetShape;
|
||||
TensorShape& GetShape();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Set the tensor shape. Must have the same number of elements as current tensor.
|
||||
|
||||
Args:
|
||||
newShape (TensorShape): New tensor shape to reshape to.
|
||||
|
||||
") SetShape;
|
||||
void SetShape(const TensorShape& newShape);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the number of dimensions in this Tensor.
|
||||
|
||||
Returns:
|
||||
int: The number of dimensions in this Tensor.
|
||||
|
||||
") GetNumDimensions;
|
||||
unsigned int GetNumDimensions() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the total number of elements for this Tensor.
|
||||
|
||||
Returns:
|
||||
int: The total number of elements for this Tensor.
|
||||
|
||||
") GetNumElements;
|
||||
unsigned int GetNumElements() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the tensor datatype.
|
||||
|
||||
Returns:
|
||||
DataType: Current tensor DataType.
|
||||
|
||||
") GetDataType;
|
||||
DataType GetDataType() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Set the tensor datatype.
|
||||
|
||||
Args:
|
||||
type (DataType): DataType to set the tensor to.
|
||||
|
||||
") SetDataType;
|
||||
void SetDataType(DataType type);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the value of the tensors quantization scale.
|
||||
|
||||
Returns:
|
||||
float: Tensor quantization scale value.
|
||||
|
||||
") GetQuantizationScale;
|
||||
float GetQuantizationScale() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the value of the tensors quantization offset.
|
||||
|
||||
Returns:
|
||||
int: Tensor quantization offset value.
|
||||
|
||||
") GetQuantizationOffset;
|
||||
int32_t GetQuantizationOffset() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Set the value of the tensors quantization scale.
|
||||
|
||||
Args:
|
||||
scale (float): Scale value to set.
|
||||
|
||||
") SetQuantizationScale;
|
||||
void SetQuantizationScale(float scale);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Set the value of the tensors quantization offset.
|
||||
|
||||
Args:
|
||||
offset (int): Offset value to set.
|
||||
|
||||
") SetQuantizationOffset;
|
||||
void SetQuantizationOffset(int32_t offset);
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns true if the tensor is a quantized data type.
|
||||
|
||||
Returns:
|
||||
bool: True if the tensor is a quantized data type.
|
||||
|
||||
") IsQuantized;
|
||||
bool IsQuantized() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns true if the tensor info is constant.
|
||||
|
||||
Returns:
|
||||
bool: True if the tensor info is constant.
|
||||
|
||||
") IsConstant;
|
||||
bool IsConstant() const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Sets the tensor info to be constant.
|
||||
|
||||
Args:
|
||||
IsConstant (bool): Sets tensor info to constant.
|
||||
|
||||
") SetConstant;
|
||||
void SetConstant(const bool IsConstant = True);
|
||||
|
||||
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Check that the types are the same and, if quantize, that the quantization parameters are the same.
|
||||
|
||||
Returns:
|
||||
bool: True if matched, else False.
|
||||
|
||||
") IsTypeSpaceMatch;
|
||||
bool IsTypeSpaceMatch(const TensorInfo& other) const;
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the number of bytes needed for this tensor.
|
||||
|
||||
Returns:
|
||||
int: Number of bytes consumed by this tensor.
|
||||
|
||||
") GetNumBytes;
|
||||
unsigned int GetNumBytes() const;
|
||||
|
||||
};
|
||||
|
||||
%extend TensorInfo {
|
||||
|
||||
std::string __str__() {
|
||||
const std::string tmp = "TensorInfo{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
|
||||
+ ", IsQuantized: " + std::to_string($self->IsQuantized())
|
||||
+ ", QuantizationScale: " + std::to_string( $self->GetQuantizationScale())
|
||||
+ ", QuantizationOffset: " + std::to_string($self->GetQuantizationOffset())
|
||||
+ ", IsConstant: " + std::to_string($self->IsConstant())
|
||||
+ ", NumDimensions: " + std::to_string($self->GetNumDimensions())
|
||||
+ ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
|
||||
return tmp;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class Tensor
|
||||
{
|
||||
public:
|
||||
~Tensor();
|
||||
Tensor();
|
||||
Tensor(const Tensor& other);
|
||||
|
||||
%mutable_memory(void* memory);
|
||||
Tensor(const TensorInfo& info, void* memory);
|
||||
%clear_mutable_memory(void* memory);
|
||||
|
||||
const TensorInfo& GetInfo() const;
|
||||
const TensorShape& GetShape() const;
|
||||
|
||||
DataType GetDataType() const;
|
||||
unsigned int GetNumDimensions() const;
|
||||
unsigned int GetNumBytes() const;
|
||||
unsigned int GetNumElements() const;
|
||||
|
||||
/* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api.
|
||||
void* GetMemoryArea() const;*/
|
||||
};
|
||||
|
||||
%extend Tensor {
|
||||
|
||||
std::string __str__() {
|
||||
const std::string tmp = "Tensor{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
|
||||
+ ", NumBytes: " + std::to_string($self->GetNumBytes())
|
||||
+ ", NumDimensions: " + std::to_string( $self->GetNumDimensions())
|
||||
+ ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
class ConstTensor
|
||||
{
|
||||
public:
|
||||
~ConstTensor();
|
||||
ConstTensor();
|
||||
ConstTensor(const Tensor& other);
|
||||
ConstTensor(const ConstTensor& other);
|
||||
|
||||
%const_memory(const void* memory);
|
||||
ConstTensor(const TensorInfo& info, const void* memory);
|
||||
%clear_const_memory(const void* memory);
|
||||
|
||||
const TensorInfo& GetInfo() const;
|
||||
const TensorShape& GetShape() const;
|
||||
|
||||
DataType GetDataType() const;
|
||||
unsigned int GetNumDimensions() const;
|
||||
unsigned int GetNumBytes() const;
|
||||
unsigned int GetNumElements() const;
|
||||
|
||||
/* we want to disable getting the memory area from here - forcing use of get_memory_area() in public api.
|
||||
void* GetMemoryArea() const;*/
|
||||
};
|
||||
|
||||
%extend ConstTensor {
|
||||
|
||||
std::string __str__() {
|
||||
const std::string tmp = "ConstTensor{DataType: " + std::to_string(static_cast<int>($self->GetDataType()))
|
||||
+ ", NumBytes: " + std::to_string($self->GetNumBytes())
|
||||
+ ", NumDimensions: " + std::to_string( $self->GetNumDimensions())
|
||||
+ ", NumElements: " + std::to_string($self->GetNumElements()) + "}";
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,138 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/Types.hpp"
|
||||
%}
|
||||
|
||||
%include <typemaps/permutation_vector.i>
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Vector used to permute a tensor.
|
||||
|
||||
For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
|
||||
which is to be passed as an input to Arm NN, each source dimension is mapped to the corresponding
|
||||
Arm NN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
|
||||
to the location of the Arm NN Height dimension (1 -> 2). Similar arguments are made for the Width and
|
||||
Channels (2 -> 3 and 3 -> 1). This will lead to m_DimMappings pointing to the following array:
|
||||
[ 0, 2, 3, 1 ].
|
||||
|
||||
Note that the mapping should be reversed if considering the case of Arm NN 4-d outputs (Batch Element,
|
||||
Channels, Height, Width) being written to a destination with the format mentioned above. We now have
|
||||
0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following m_DimMappings contents:
|
||||
[ 0, 3, 1, 2 ].
|
||||
|
||||
Args:
|
||||
dimMappings (list): Indicates how to translate tensor elements from a given source into the target destination,
|
||||
when source and target potentially have different memory layouts.
|
||||
") PermutationVector;
|
||||
|
||||
class PermutationVector
|
||||
{
|
||||
public:
|
||||
using ValueType = unsigned int;
|
||||
using SizeType = unsigned int;
|
||||
|
||||
%permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings);
|
||||
PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
|
||||
%clear_permutation_vector_typemap(const ValueType *dimMappings, SizeType numDimMappings);
|
||||
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Get the PermutationVector size.
|
||||
|
||||
Return:
|
||||
SizeType: Current size of the PermutationVector.
|
||||
|
||||
") GetSize;
|
||||
SizeType GetSize();
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Checks if a specified permutation vector is its inverse
|
||||
|
||||
Return:
|
||||
bool: returns true if the specified Permutation vector is its inverse.
|
||||
|
||||
") IsInverse;
|
||||
bool IsInverse(const PermutationVector& other);
|
||||
};
|
||||
|
||||
%extend PermutationVector {
|
||||
|
||||
unsigned int __getitem__(unsigned int i) const {
|
||||
return $self->operator[](i);
|
||||
}
|
||||
|
||||
bool __eq__(PermutationVector other) {
|
||||
int size = $self->GetSize();
|
||||
int otherSize = other.GetSize();
|
||||
if(size != otherSize)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for(int i = 0; i < size; ++i){
|
||||
if($self->operator[](i) != other[i])
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
%feature("docstring",
|
||||
"
|
||||
Interface for device specifications. Main use is to get information relating to what compute capability the device being used has.
|
||||
") IDeviceSpec;
|
||||
|
||||
|
||||
%feature("docstring",
|
||||
"
|
||||
Returns the backends supported by this compute device.
|
||||
|
||||
Returns:
|
||||
set: This devices supported backends.
|
||||
|
||||
") GetSupportedBackends;
|
||||
|
||||
%ignore PermutationVector;
|
||||
#define ARMNN_DEPRECATED_ENUM // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
|
||||
#define ARMNN_DEPRECATED_ENUM_MSG(message) // SWIG does not support C++ attributes, need this to help generate from Deprecated.hpp.
|
||||
%include "armnn/Types.hpp"
|
||||
|
||||
|
||||
|
||||
%extend armnn::IDeviceSpec {
|
||||
|
||||
|
||||
std::string __str__() {
|
||||
|
||||
std::string deviceStr = "IDeviceSpec { supportedBackends: [";
|
||||
|
||||
auto bends = $self->GetSupportedBackends();
|
||||
auto sizeBends = $self->GetSupportedBackends().size();
|
||||
for (std::unordered_set<armnn::BackendId>::const_iterator p = bends.begin(); p != bends.end(); ++p) {
|
||||
|
||||
deviceStr += p->Get();
|
||||
|
||||
if (sizeBends - 1 > 0) {
|
||||
deviceStr += ", ";
|
||||
}
|
||||
sizeBends--;
|
||||
|
||||
}
|
||||
deviceStr = deviceStr + "]}";
|
||||
|
||||
return deviceStr;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%{
|
||||
#include "armnn/TypesUtils.hpp"
|
||||
%}
|
||||
|
||||
namespace armnn
|
||||
{
|
||||
constexpr unsigned int GetDataTypeSize(DataType dataType);
|
||||
|
||||
constexpr const char* GetDataTypeName(DataType dataType);
|
||||
|
||||
template<typename QuantizedType>
|
||||
QuantizedType Quantize(float value, float scale, int32_t offset);
|
||||
%template(Quantize_uint8_t) Quantize<uint8_t>;
|
||||
%template(Quantize_int8_t) Quantize<int8_t>;
|
||||
%template(Quantize_int16_t) Quantize<int16_t>;
|
||||
%template(Quantize_int32_t) Quantize<int32_t>;
|
||||
|
||||
template <typename QuantizedType>
|
||||
float Dequantize(QuantizedType value, float scale, int32_t offset);
|
||||
%template(Dequantize_uint8_t) Dequantize<uint8_t>;
|
||||
%template(Dequantize_int8_t) Dequantize<int8_t>;
|
||||
%template(Dequantize_int16_t) Dequantize<int16_t>;
|
||||
%template(Dequantize_int32_t) Dequantize<int32_t>;
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%include "stl.i"
|
||||
%include "cstring.i"
|
||||
%include "std_string.i"
|
||||
%include "std_vector.i"
|
||||
%include "std_unordered_set.i"
|
||||
%include "std_pair.i"
|
||||
%include "stdint.i"
|
||||
%include "carrays.i"
|
||||
%include "exception.i"
|
||||
%include "typemaps.i"
|
||||
%include "std_iostream.i"
|
||||
|
||||
%ignore *::operator=;
|
||||
%ignore *::operator[];
|
||||
|
||||
|
||||
// Define exception typemap to wrap armnn exception into python exception.
|
||||
|
||||
%exception{
|
||||
try {
|
||||
$action
|
||||
} catch (const armnn::Exception& e) {
|
||||
SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
|
||||
}
|
||||
};
|
||||
|
||||
%exception __getitem__ {
|
||||
try {
|
||||
$action
|
||||
} catch (const armnn::InvalidArgumentException &e) {
|
||||
SWIG_exception(SWIG_ValueError, const_cast<char*>(e.what()));
|
||||
} catch (const std::out_of_range &e) {
|
||||
SWIG_exception(SWIG_IndexError, const_cast<char*>(e.what()));
|
||||
} catch (const std::exception &e) {
|
||||
SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
|
||||
}
|
||||
};
|
||||
|
||||
%exception __setitem__ {
|
||||
try {
|
||||
$action
|
||||
} catch (const armnn::InvalidArgumentException &e) {
|
||||
SWIG_exception(SWIG_ValueError, const_cast<char*>(e.what()));
|
||||
} catch (const std::out_of_range &e) {
|
||||
SWIG_exception(SWIG_IndexError, const_cast<char*>(e.what()));
|
||||
} catch (const std::exception &e) {
|
||||
SWIG_exception(SWIG_RuntimeError, const_cast<char*>(e.what()));
|
||||
}
|
||||
};
|
@ -0,0 +1,55 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%inline %{
|
||||
|
||||
static PyObject* from_model_options_to_python(std::vector<armnn::BackendOptions>* input) {
|
||||
Py_ssize_t size = input->size();
|
||||
PyObject* localList = PyList_New(size);
|
||||
|
||||
if (!localList) {
|
||||
Py_XDECREF(localList);
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
for(Py_ssize_t i = 0; i < size; ++i) {
|
||||
|
||||
PyObject* obj = SWIG_NewPointerObj(SWIG_as_voidptr(&input->at(i)), SWIGTYPE_p_armnn__BackendOptions, 0 | 0 );
|
||||
|
||||
PyList_SET_ITEM(localList, i, obj);
|
||||
}
|
||||
return localList;
|
||||
}
|
||||
%}
|
||||
|
||||
%define %model_options_typemap
|
||||
|
||||
// this typemap works for struct argument get
|
||||
|
||||
%typemap(out) std::vector<armnn::BackendOptions>* {
|
||||
$result = from_model_options_to_python($1);
|
||||
}
|
||||
|
||||
// this typemap works for struct argument set
|
||||
%typemap(in) std::vector<armnn::BackendOptions>* {
|
||||
if (PySequence_Check($input)) {
|
||||
|
||||
int res = swig::asptr($input, &$1);
|
||||
if (!SWIG_IsOK(res) || !$1) {
|
||||
SWIG_exception_fail(SWIG_ArgError(($1 ? res : SWIG_TypeError)),
|
||||
"in method '" "OptimizerOptions_m_ModelOptions_set" "', argument " "2"" of type '" "std::vector< armnn::BackendOptions,std::allocator< armnn::BackendOptions > > *""'");
|
||||
}
|
||||
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol.");
|
||||
SWIG_fail;
|
||||
}
|
||||
}
|
||||
|
||||
%enddef
|
||||
|
||||
%define %model_options_clear
|
||||
%typemap(out) std::vector<armnn::BackendOptions>*;
|
||||
%typemap(in) std::vector<armnn::BackendOptions>*;
|
||||
%enddef
|
@ -0,0 +1,41 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%define %optimize_typemap_out
|
||||
%typemap(out) (std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>>) {
|
||||
PyObject * network = SWIG_NewPointerObj(SWIG_as_voidptr($1.first), SWIGTYPE_p_armnn__IOptimizedNetwork, SWIG_POINTER_OWN);
|
||||
$result = PyTuple_New(2);
|
||||
|
||||
// Convert vector to fixed-size tuple
|
||||
std::vector<std::string> strings = $1.second;
|
||||
Py_ssize_t size = strings.size();
|
||||
|
||||
// New reference. Need to Py_DECREF
|
||||
PyObject* errMsgTuple = PyTuple_New(size);
|
||||
|
||||
if (!errMsgTuple) {
|
||||
Py_XDECREF(errMsgTuple);
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
for (Py_ssize_t i = 0; i < size; i++) {
|
||||
// New reference. Need to Py_DECREF
|
||||
PyObject *string = PyString_FromString(strings[i].c_str());
|
||||
|
||||
if (!string) {
|
||||
Py_XDECREF(string);
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
PyTuple_SetItem(errMsgTuple, i, string);
|
||||
}
|
||||
|
||||
// Create result tuple
|
||||
PyTuple_SetItem($result, 0, network);
|
||||
PyTuple_SetItem($result, 1, errMsgTuple);
|
||||
}
|
||||
%enddef
|
||||
|
||||
%define %clear_optimize_typemap_out
|
||||
%typemap(out) (std::pair<armnn::IOptimizedNetwork*, std::vector<std::string>>)
|
||||
%enddef
|
@ -0,0 +1,52 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%define %permutation_vector_typemap(TYPE1, TYPE2)
|
||||
%typemap(in) (TYPE1, TYPE2) {
|
||||
if (PyTuple_Check($input)) {
|
||||
PyObject* seq = $input;
|
||||
|
||||
$2 = PySequence_Fast_GET_SIZE(seq);
|
||||
$1 = (unsigned int*)PyMem_RawMalloc($2*sizeof(unsigned int));
|
||||
|
||||
|
||||
if(!$1) {
|
||||
PyErr_NoMemory();
|
||||
SWIG_fail;
|
||||
}
|
||||
int size = (int)$2;
|
||||
for(int i=0; i < size; i++) {
|
||||
PyObject *longItem;
|
||||
// Borrowed reference. No need to Py_DECREF
|
||||
PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
|
||||
if(!item) {
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple");
|
||||
SWIG_fail;
|
||||
}
|
||||
// New reference. Need to Py_DECREF
|
||||
longItem = PyNumber_Long(item);
|
||||
if(!longItem) {
|
||||
Py_XDECREF(longItem);
|
||||
PyErr_SetString(PyExc_TypeError, "All elements must be numbers");
|
||||
SWIG_fail;
|
||||
}
|
||||
$1[i] = (unsigned int)PyLong_AsUnsignedLong(longItem);
|
||||
Py_XDECREF(longItem);
|
||||
}
|
||||
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument is not a tuple");
|
||||
SWIG_fail;
|
||||
}
|
||||
}
|
||||
|
||||
%typemap(freearg) (TYPE1, TYPE2) {
|
||||
PyMem_RawFree($1);
|
||||
}
|
||||
%enddef
|
||||
|
||||
%define %clear_permutation_vector_typemap(TYPE1, TYPE2)
|
||||
%typemap(in) (TYPE1, TYPE2);
|
||||
%typemap(freearg) (TYPE1, TYPE2);
|
||||
%enddef
|
@ -0,0 +1,52 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%define %mutable_memory(TYPEMAP)
|
||||
%typemap(in) (TYPEMAP) {
|
||||
int res; void *buf = 0;
|
||||
Py_buffer view;
|
||||
res = PyObject_GetBuffer($input, &view, PyBUF_WRITABLE);
|
||||
buf = view.buf;
|
||||
PyBuffer_Release(&view);
|
||||
if (res < 0) {
|
||||
PyErr_Clear();
|
||||
%argument_fail(res, "(TYPEMAP)", $symname, $argnum);
|
||||
}
|
||||
$1 = buf;
|
||||
}
|
||||
|
||||
%typemap(typecheck) (TYPEMAP) {
|
||||
$1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0;
|
||||
}
|
||||
%enddef
|
||||
|
||||
%define %clear_mutable_memory(TYPEMAP)
|
||||
%typemap(in) (TYPEMAP);
|
||||
%typemap(typecheck) (TYPEMAP);
|
||||
%enddef
|
||||
|
||||
%define %const_memory(TYPEMAP)
|
||||
%typemap(in) (TYPEMAP) {
|
||||
int res; void *buf = 0;
|
||||
Py_buffer view;
|
||||
res = PyObject_GetBuffer($input, &view, PyBUF_CONTIG_RO);
|
||||
buf = view.buf;
|
||||
PyBuffer_Release(&view);
|
||||
if (res < 0) {
|
||||
PyErr_Clear();
|
||||
%argument_fail(res, "(TYPEMAP)", $symname, $argnum);
|
||||
}
|
||||
$1 = buf;
|
||||
}
|
||||
|
||||
%typemap(typecheck) (TYPEMAP) {
|
||||
$1 = PyObject_CheckBuffer($input) || PyTuple_Check($input) ? 1 : 0;
|
||||
}
|
||||
%enddef
|
||||
|
||||
%define %clear_const_memory(TYPEMAP)
|
||||
%typemap(in) (TYPEMAP);
|
||||
%typemap(typecheck) (TYPEMAP);
|
||||
%enddef
|
||||
|
@ -0,0 +1,51 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%define %tensor_shape_typemap(TYPE1, TYPE2)
|
||||
%typemap(in) (TYPE1, TYPE2) {
|
||||
if (PyTuple_Check($input)) {
|
||||
PyObject* seq = $input;
|
||||
|
||||
$1 = PySequence_Fast_GET_SIZE(seq);
|
||||
$2 = (unsigned int*)PyMem_RawMalloc($1*sizeof(unsigned int));
|
||||
|
||||
if(!$2) {
|
||||
PyErr_NoMemory();
|
||||
SWIG_fail;
|
||||
}
|
||||
int size = (int)$1;
|
||||
for(int i=0; i < size; i++) {
|
||||
PyObject *longItem;
|
||||
// Borrowed reference. No need to Py_DECREF
|
||||
PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
|
||||
if(!item) {
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to read data from tuple");
|
||||
SWIG_fail;
|
||||
}
|
||||
// New reference. Need to Py_DECREF
|
||||
longItem = PyNumber_Long(item);
|
||||
if(!longItem) {
|
||||
Py_XDECREF(longItem);
|
||||
PyErr_SetString(PyExc_TypeError, "All elements must be numbers");
|
||||
SWIG_fail;
|
||||
}
|
||||
$2[i] = (unsigned int)PyLong_AsUnsignedLong(longItem);
|
||||
Py_XDECREF(longItem);
|
||||
}
|
||||
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument is not a tuple");
|
||||
SWIG_fail;
|
||||
}
|
||||
}
|
||||
|
||||
%typemap(freearg) (TYPE1, TYPE2) {
|
||||
PyMem_RawFree($2);
|
||||
}
|
||||
%enddef
|
||||
|
||||
%define %clear_tensor_shape_typemap(TYPE1, TYPE2)
|
||||
%typemap(in) (TYPE1, TYPE2);
|
||||
%typemap(freearg) (TYPE1, TYPE2);
|
||||
%enddef
|
@ -0,0 +1,235 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
%inline %{
|
||||
//-------------------------from_python_to_cpp-----------------------------
|
||||
int from_python_to_cpp(PyObject *obj, long* val) {
|
||||
return SWIG_AsVal_long(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, int* val) {
|
||||
return SWIG_AsVal_int(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, unsigned int* val) {
|
||||
return SWIG_AsVal_unsigned_SS_int(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, unsigned short* val) {
|
||||
return SWIG_AsVal_unsigned_SS_short(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, float* val) {
|
||||
return SWIG_AsVal_float(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, double* val) {
|
||||
return SWIG_AsVal_double(obj, val);
|
||||
}
|
||||
#ifdef SWIG_LONG_LONG_AVAILABLE
|
||||
int from_python_to_cpp(PyObject *obj, unsigned long long* val) {
|
||||
return SWIG_AsVal_unsigned_SS_long_SS_long(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, long long* val) {
|
||||
return SWIG_AsVal_long_SS_long(obj, val);
|
||||
}
|
||||
#endif
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, unsigned long* val) {
|
||||
return SWIG_AsVal_unsigned_SS_long(obj, val);
|
||||
}
|
||||
|
||||
int from_python_to_cpp(PyObject *obj, short* val) {
|
||||
return SWIG_AsVal_short(obj, val);
|
||||
}
|
||||
//-------------------------from_cpp_to_python-----------------------------
|
||||
PyObject* from_cpp_to_python(long& val){
|
||||
return PyLong_FromLong(val);
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(unsigned long& val){
|
||||
return PyLong_FromUnsignedLong(val);
|
||||
}
|
||||
#ifdef SWIG_LONG_LONG_AVAILABLE
|
||||
PyObject* from_cpp_to_python(long long& val){
|
||||
return PyLong_FromLongLong(val);
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(unsigned long long& val){
|
||||
return PyLong_FromUnsignedLongLong(val);
|
||||
}
|
||||
#endif
|
||||
|
||||
PyObject* from_cpp_to_python(int& val){
|
||||
return PyLong_FromLong(static_cast<long>(val));
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(unsigned int& val){
|
||||
return PyLong_FromUnsignedLong(static_cast<unsigned long>(val));
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(unsigned short& val){
|
||||
return PyLong_FromUnsignedLong(static_cast<unsigned long>(val));
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(float& val){
|
||||
return PyFloat_FromDouble(static_cast<double>(val));
|
||||
}
|
||||
|
||||
PyObject* from_cpp_to_python(double& val){
|
||||
return PyFloat_FromDouble(val);
|
||||
}
|
||||
|
||||
template<class U, class V>
|
||||
PyObject* from_cpp_to_python(std::pair<U, V>& pair){
|
||||
|
||||
PyObject* first = from_cpp_to_python(pair.first);
|
||||
PyObject* second = from_cpp_to_python(pair.second);
|
||||
|
||||
PyObject* localTuple = PyTuple_New(2);
|
||||
|
||||
if (!localTuple) {
|
||||
Py_XDECREF(localTuple);
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
PyTuple_SetItem(localTuple, 0, first);
|
||||
PyTuple_SetItem(localTuple, 1, second);
|
||||
|
||||
return localTuple;
|
||||
}
|
||||
|
||||
template<class K, class V>
|
||||
static int from_python_to_cpp(PyObject* tuple, std::pair<K,V>* out) {
|
||||
|
||||
if (PyTuple_Check(tuple)) {
|
||||
|
||||
auto size = PyTuple_Size(tuple);
|
||||
|
||||
if (size != 2) {
|
||||
return SWIG_ValueError;
|
||||
}
|
||||
|
||||
PyObject* firstPy = PyTuple_GetItem(tuple, 0);
|
||||
PyObject* secondPy = PyTuple_GetItem(tuple, 1);
|
||||
|
||||
if (!SWIG_IsOK(from_python_to_cpp(firstPy, &out->first))) {
|
||||
return SWIG_TypeError;
|
||||
}
|
||||
|
||||
if (!SWIG_IsOK(from_python_to_cpp(secondPy, &out->second))) {
|
||||
return SWIG_TypeError;
|
||||
}
|
||||
|
||||
} else {
|
||||
return SWIG_TypeError;
|
||||
}
|
||||
|
||||
return SWIG_OK;
|
||||
}
|
||||
//---------------std::vector <-> python list ---------------------
|
||||
template<class T>
|
||||
static PyObject* from_vector_to_python(std::vector<T>* input) {
|
||||
Py_ssize_t size = input->size();
|
||||
PyObject* localList = PyList_New(size);
|
||||
|
||||
if (!localList) {
|
||||
Py_XDECREF(localList);
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
for(Py_ssize_t i = 0; i < size; ++i) {
|
||||
|
||||
PyObject* obj = from_cpp_to_python(input->at(i));
|
||||
|
||||
PyList_SET_ITEM(localList, i, obj);
|
||||
}
|
||||
return localList;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
int from_python_to_vector(PyObject* seq, std::vector<T>& out) {
|
||||
Py_ssize_t size = PySequence_Fast_GET_SIZE(seq);
|
||||
|
||||
for(Py_ssize_t i=0; i < size; i++) {
|
||||
PyObject *item = PySequence_Fast_GET_ITEM(seq, i);
|
||||
if(!item) {
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to read data from given sequence");
|
||||
|
||||
return SWIG_NullReferenceError;
|
||||
}
|
||||
|
||||
T element;
|
||||
int res = from_python_to_cpp(item, &element);
|
||||
if (!SWIG_IsOK(res)) {
|
||||
PyObject* itemRepr = PyObject_Repr(item);
|
||||
PyObject* itemStrObj = PyUnicode_AsEncodedString(itemRepr, "utf-8", "replace");
|
||||
const char* itemStr = PyBytes_AS_STRING(itemStrObj);
|
||||
|
||||
auto pythonType = Py_TYPE(item)->tp_name;
|
||||
|
||||
PyErr_Format(PyExc_TypeError, "Failed to convert python input value %s of type '%s' to C type '%s'", itemStr, pythonType, typeid(T).name());
|
||||
Py_XDECREF(itemStrObj);
|
||||
Py_XDECREF(itemRepr);
|
||||
Py_DECREF(seq);
|
||||
return SWIG_TypeError;
|
||||
}
|
||||
out.push_back(element);
|
||||
}
|
||||
return SWIG_OK;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
%define %list_to_vector(TYPEMAP...)
|
||||
|
||||
// this typemap works for struct argument set
|
||||
%typemap(in) TYPEMAP* (TYPEMAP tmp) {
|
||||
if (PySequence_Check($input)) {
|
||||
|
||||
if (from_python_to_vector($input, tmp) < 0) {
|
||||
SWIG_fail;
|
||||
}
|
||||
|
||||
$1 = &tmp;
|
||||
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method.");
|
||||
SWIG_fail;
|
||||
}
|
||||
}
|
||||
|
||||
// this typemap works for constructor
|
||||
%typemap(in) TYPEMAP {
|
||||
if (PySequence_Check($input)) {
|
||||
if (from_python_to_vector($input, $1) < 0){
|
||||
SWIG_fail;
|
||||
}
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument value object does not provide sequence protocol, implement __getitem__() method.");
|
||||
SWIG_fail;
|
||||
}
|
||||
}
|
||||
|
||||
// this typemap works for struct argument get
|
||||
|
||||
%typemap(out) TYPEMAP* {
|
||||
$result = from_vector_to_python($1);
|
||||
}
|
||||
|
||||
// this typemap works for overloaded methods and ctors
|
||||
%typemap(typecheck) (TYPEMAP) {
|
||||
$1 = PySequence_Check($input) ? 1 : 0;
|
||||
}
|
||||
|
||||
%enddef
|
||||
|
||||
%define %list_to_vector_clear(TYPEMAP...)
|
||||
%typemap(in) (TYPEMAP);
|
||||
%typemap(in) TYPEMAP* (TYPEMAP tmp);
|
||||
%typemap(typecheck) (TYPEMAP);
|
||||
%typemap(out) TYPEMAP*;
|
||||
%enddef
|
||||
|
275
arch/arm/ARMnn/python/pyarmnn/test/test_const_tensor.py
Normal file
275
arch/arm/ARMnn/python/pyarmnn/test/test_const_tensor.py
Normal file
@ -0,0 +1,275 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def _get_const_tensor_info(dt):
|
||||
tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt, 0.0, 0, True)
|
||||
|
||||
return tensor_info
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 4)).astype(np.float32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 4)).astype(np.float16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 4)).astype(np.uint8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 4)).astype(np.int8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 4)).astype(np.int8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 4)).astype(np.int32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 4)).astype(np.int16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
def test_const_tensor_too_many_elements(dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert 'ConstTensor requires {} bytes, {} provided.'.format(num_bytes, data.nbytes) in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 2)).astype(np.float32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 2)).astype(np.float16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 2)).astype(np.uint8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 2)).astype(np.int8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 2)).astype(np.int8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 2)).astype(np.int32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2)).astype(np.int16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
def test_const_tensor_too_little_elements(dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert 'ConstTensor requires {} bytes, {} provided.'.format(num_bytes, data.nbytes) in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.float32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.float16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.uint8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 2, 3, 3)).astype(np.int16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
def test_const_tensor_multi_dimensional_input(dt, data):
|
||||
tensor = ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, True), data)
|
||||
|
||||
assert data.size == tensor.GetNumElements()
|
||||
assert data.nbytes == tensor.GetNumBytes()
|
||||
assert dt == tensor.GetDataType()
|
||||
assert tensor.get_memory_area().data
|
||||
|
||||
|
||||
def test_create_const_tensor_from_tensor():
|
||||
tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
copied_tensor = ann.ConstTensor(tensor)
|
||||
|
||||
assert copied_tensor != tensor, "Different objects"
|
||||
assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
|
||||
assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area"
|
||||
assert copied_tensor.GetNumElements() == tensor.GetNumElements()
|
||||
assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
|
||||
assert copied_tensor.GetDataType() == tensor.GetDataType()
|
||||
|
||||
|
||||
def test_const_tensor_from_tensor_has_memory_area_access_after_deletion_of_original_tensor():
|
||||
tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32, 0.0, 0, True)
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
|
||||
tensor.get_memory_area()[0] = 100
|
||||
|
||||
copied_mem = tensor.get_memory_area().copy()
|
||||
|
||||
assert 100 == copied_mem[0], "Memory was copied correctly"
|
||||
|
||||
copied_tensor = ann.ConstTensor(tensor)
|
||||
|
||||
tensor.get_memory_area()[0] = 200
|
||||
|
||||
assert 200 == tensor.get_memory_area()[0], "Tensor and copied Tensor point to the same memory"
|
||||
assert 200 == copied_tensor.get_memory_area()[0], "Tensor and copied Tensor point to the same memory"
|
||||
|
||||
assert 100 == copied_mem[0], "Copied test memory not affected"
|
||||
|
||||
copied_mem[0] = 200 # modify test memory to equal copied Tensor
|
||||
|
||||
del tensor
|
||||
np.testing.assert_array_equal(copied_tensor.get_memory_area(), copied_mem), "After initial tensor was deleted, " \
|
||||
"copied Tensor still has " \
|
||||
"its memory as expected"
|
||||
|
||||
|
||||
def test_create_const_tensor_incorrect_args():
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor('something', 'something')
|
||||
|
||||
expected_error_message = "Incorrect number of arguments or type of arguments provided to create Const Tensor."
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
# -1 not in data type enum
|
||||
(-1, np.random.randint(1, size=(2, 3)).astype(np.float32)),
|
||||
], ids=['unknown'])
|
||||
def test_const_tensor_unsupported_datatype(dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert 'The data type provided for this Tensor is not supported: -1' in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, [[1, 1, 1], [1, 1, 1]]),
|
||||
(ann.DataType_Float16, [[1, 1, 1], [1, 1, 1]]),
|
||||
(ann.DataType_QAsymmU8, [[1, 1, 1], [1, 1, 1]]),
|
||||
(ann.DataType_QAsymmS8, [[1, 1, 1], [1, 1, 1]]),
|
||||
(ann.DataType_QSymmS8, [[1, 1, 1], [1, 1, 1]])
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8'])
|
||||
def test_const_tensor_incorrect_input_datatype(dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert 'Data must be provided as a numpy array.' in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.float16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 3)).astype(np.int16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
class TestNumpyDataTypes:
|
||||
|
||||
def test_copy_const_tensor(self, dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
tensor = ann.ConstTensor(tensor_info, data)
|
||||
copied_tensor = ann.ConstTensor(tensor)
|
||||
|
||||
assert copied_tensor != tensor, "Different objects"
|
||||
assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
|
||||
assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area"
|
||||
assert copied_tensor.GetNumElements() == tensor.GetNumElements()
|
||||
assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
|
||||
assert copied_tensor.GetDataType() == tensor.GetDataType()
|
||||
|
||||
def test_const_tensor__str__(self, dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
d_type = tensor_info.GetDataType()
|
||||
num_dimensions = tensor_info.GetNumDimensions()
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
num_elements = tensor_info.GetNumElements()
|
||||
tensor = ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert str(tensor) == "ConstTensor{{DataType: {}, NumBytes: {}, NumDimensions: " \
|
||||
"{}, NumElements: {}}}".format(d_type, num_bytes, num_dimensions, num_elements)
|
||||
|
||||
def test_const_tensor_with_info(self, dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
elements = tensor_info.GetNumElements()
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
d_type = dt
|
||||
|
||||
tensor = ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert tensor_info != tensor.GetInfo(), "Different objects"
|
||||
assert elements == tensor.GetNumElements()
|
||||
assert num_bytes == tensor.GetNumBytes()
|
||||
assert d_type == tensor.GetDataType()
|
||||
|
||||
def test_immutable_memory(self, dt, data):
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
|
||||
tensor = ann.ConstTensor(tensor_info, data)
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
tensor.get_memory_area()[0] = 0
|
||||
|
||||
assert 'is read-only' in str(err.value)
|
||||
|
||||
def test_numpy_dtype_matches_ann_dtype(self, dt, data):
|
||||
np_data_type_mapping = {ann.DataType_QAsymmU8: np.uint8,
|
||||
ann.DataType_QAsymmS8: np.int8,
|
||||
ann.DataType_QSymmS8: np.int8,
|
||||
ann.DataType_Float32: np.float32,
|
||||
ann.DataType_QSymmS16: np.int16,
|
||||
ann.DataType_Signed32: np.int32,
|
||||
ann.DataType_Float16: np.float16}
|
||||
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
tensor = ann.ConstTensor(tensor_info, data)
|
||||
assert np_data_type_mapping[tensor.GetDataType()] == data.dtype
|
||||
|
||||
|
||||
# This test checks that mismatched numpy and PyArmNN datatypes with same number of bits raises correct error.
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.int16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 3)).astype(np.float16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
def test_numpy_dtype_mismatch_ann_dtype(dt, data):
|
||||
np_data_type_mapping = {ann.DataType_QAsymmU8: np.uint8,
|
||||
ann.DataType_QAsymmS8: np.int8,
|
||||
ann.DataType_QSymmS8: np.int8,
|
||||
ann.DataType_Float32: np.float32,
|
||||
ann.DataType_QSymmS16: np.int16,
|
||||
ann.DataType_Signed32: np.int32,
|
||||
ann.DataType_Float16: np.float16}
|
||||
|
||||
tensor_info = _get_const_tensor_info(dt)
|
||||
with pytest.raises(TypeError) as err:
|
||||
ann.ConstTensor(tensor_info, data)
|
||||
|
||||
assert str(err.value) == "Expected data to have type {} for type {} but instead got numpy.{}".format(
|
||||
np_data_type_mapping[dt], dt, data.dtype)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt, data",
|
||||
[
|
||||
(ann.DataType_Float32, np.random.randint(1, size=(2, 3)).astype(np.float32)),
|
||||
(ann.DataType_Float16, np.random.randint(1, size=(2, 3)).astype(np.float16)),
|
||||
(ann.DataType_QAsymmU8, np.random.randint(1, size=(2, 3)).astype(np.uint8)),
|
||||
(ann.DataType_QAsymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
|
||||
(ann.DataType_QSymmS8, np.random.randint(1, size=(2, 3)).astype(np.int8)),
|
||||
(ann.DataType_Signed32, np.random.randint(1, size=(2, 3)).astype(np.int32)),
|
||||
(ann.DataType_QSymmS16, np.random.randint(1, size=(2, 3)).astype(np.int16))
|
||||
], ids=['float32', 'float16', 'unsigned int8', 'signed int8', 'signed int8', 'int32', 'int16'])
|
||||
class TestConstTensorConstructorErrors:
|
||||
|
||||
def test_tensorinfo_isconstant_not_set(self, dt, data):
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data)
|
||||
|
||||
assert str(err.value) == "TensorInfo when initializing ConstTensor must be set to constant."
|
||||
|
||||
def test_tensor_tensorinfo_isconstant_not_set(self, dt, data):
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.ConstTensor(ann.Tensor(ann.TensorInfo(ann.TensorShape((2, 2, 3, 3)), dt, 0.0, 0, False), data))
|
||||
|
||||
assert str(err.value) == "TensorInfo of Tensor when initializing ConstTensor must be set to constant."
|
601
arch/arm/ARMnn/python/pyarmnn/test/test_descriptors.py
Normal file
601
arch/arm/ARMnn/python/pyarmnn/test/test_descriptors.py
Normal file
@ -0,0 +1,601 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import inspect
|
||||
|
||||
import pytest
|
||||
|
||||
import pyarmnn as ann
|
||||
import numpy as np
|
||||
import pyarmnn._generated.pyarmnn as generated
|
||||
|
||||
|
||||
def test_activation_descriptor_default_values():
|
||||
desc = ann.ActivationDescriptor()
|
||||
assert desc.m_Function == ann.ActivationFunction_Sigmoid
|
||||
assert desc.m_A == 0
|
||||
assert desc.m_B == 0
|
||||
|
||||
|
||||
def test_argminmax_descriptor_default_values():
|
||||
desc = ann.ArgMinMaxDescriptor()
|
||||
assert desc.m_Function == ann.ArgMinMaxFunction_Min
|
||||
assert desc.m_Axis == -1
|
||||
|
||||
|
||||
def test_batchnormalization_descriptor_default_values():
|
||||
desc = ann.BatchNormalizationDescriptor()
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
np.allclose(0.0001, desc.m_Eps)
|
||||
|
||||
|
||||
def test_batchtospacend_descriptor_default_values():
|
||||
desc = ann.BatchToSpaceNdDescriptor()
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
assert [1, 1] == desc.m_BlockShape
|
||||
assert [(0, 0), (0, 0)] == desc.m_Crops
|
||||
|
||||
|
||||
def test_batchtospacend_descriptor_assignment():
|
||||
desc = ann.BatchToSpaceNdDescriptor()
|
||||
desc.m_BlockShape = (1, 2, 3)
|
||||
|
||||
ololo = [(1, 2), (3, 4)]
|
||||
size_1 = len(ololo)
|
||||
desc.m_Crops = ololo
|
||||
|
||||
assert size_1 == len(ololo)
|
||||
desc.m_DataLayout = ann.DataLayout_NHWC
|
||||
assert ann.DataLayout_NHWC == desc.m_DataLayout
|
||||
assert [1, 2, 3] == desc.m_BlockShape
|
||||
assert [(1, 2), (3, 4)] == desc.m_Crops
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_shape, value, vtype", [([-1], -1, 'int'), (("one", "two"), "'one'", 'str'),
|
||||
([1.33, 4.55], 1.33, 'float'),
|
||||
([{1: "one"}], "{1: 'one'}", 'dict')], ids=lambda x: str(x))
|
||||
def test_batchtospacend_descriptor_rubbish_assignment_shape(input_shape, value, vtype):
|
||||
desc = ann.BatchToSpaceNdDescriptor()
|
||||
with pytest.raises(TypeError) as err:
|
||||
desc.m_BlockShape = input_shape
|
||||
|
||||
assert "Failed to convert python input value {} of type '{}' to C type 'j'".format(value, vtype) in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_crops, value, vtype", [([(1, 2), (3, 4, 5)], '(3, 4, 5)', 'tuple'),
|
||||
([(1, 'one')], "(1, 'one')", 'tuple'),
|
||||
([-1], -1, 'int'),
|
||||
([(1, (1, 2))], '(1, (1, 2))', 'tuple'),
|
||||
([[1, [1, 2]]], '[1, [1, 2]]', 'list')
|
||||
], ids=lambda x: str(x))
|
||||
def test_batchtospacend_descriptor_rubbish_assignment_crops(input_crops, value, vtype):
|
||||
desc = ann.BatchToSpaceNdDescriptor()
|
||||
with pytest.raises(TypeError) as err:
|
||||
desc.m_Crops = input_crops
|
||||
|
||||
assert "Failed to convert python input value {} of type '{}' to C type".format(value, vtype) in str(err.value)
|
||||
|
||||
|
||||
def test_batchtospacend_descriptor_empty_assignment():
|
||||
desc = ann.BatchToSpaceNdDescriptor()
|
||||
desc.m_BlockShape = []
|
||||
assert [] == desc.m_BlockShape
|
||||
|
||||
|
||||
def test_batchtospacend_descriptor_ctor():
|
||||
desc = ann.BatchToSpaceNdDescriptor([1, 2, 3], [(4, 5), (6, 7)])
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
assert [1, 2, 3] == desc.m_BlockShape
|
||||
assert [(4, 5), (6, 7)] == desc.m_Crops
|
||||
|
||||
|
||||
def test_channelshuffle_descriptor_default_values():
|
||||
desc = ann.ChannelShuffleDescriptor()
|
||||
assert desc.m_Axis == 0
|
||||
assert desc.m_NumGroups == 0
|
||||
|
||||
def test_convolution2d_descriptor_default_values():
|
||||
desc = ann.Convolution2dDescriptor()
|
||||
assert desc.m_PadLeft == 0
|
||||
assert desc.m_PadTop == 0
|
||||
assert desc.m_PadRight == 0
|
||||
assert desc.m_PadBottom == 0
|
||||
assert desc.m_StrideX == 1
|
||||
assert desc.m_StrideY == 1
|
||||
assert desc.m_DilationX == 1
|
||||
assert desc.m_DilationY == 1
|
||||
assert desc.m_BiasEnabled == False
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
def test_convolution3d_descriptor_default_values():
|
||||
desc = ann.Convolution3dDescriptor()
|
||||
assert desc.m_PadLeft == 0
|
||||
assert desc.m_PadTop == 0
|
||||
assert desc.m_PadRight == 0
|
||||
assert desc.m_PadBottom == 0
|
||||
assert desc.m_PadFront == 0
|
||||
assert desc.m_PadBack == 0
|
||||
assert desc.m_StrideX == 1
|
||||
assert desc.m_StrideY == 1
|
||||
assert desc.m_StrideZ == 1
|
||||
assert desc.m_DilationX == 1
|
||||
assert desc.m_DilationY == 1
|
||||
assert desc.m_DilationZ == 1
|
||||
assert desc.m_BiasEnabled == False
|
||||
assert desc.m_DataLayout == ann.DataLayout_NDHWC
|
||||
|
||||
|
||||
def test_depthtospace_descriptor_default_values():
|
||||
desc = ann.DepthToSpaceDescriptor()
|
||||
assert desc.m_BlockSize == 1
|
||||
assert desc.m_DataLayout == ann.DataLayout_NHWC
|
||||
|
||||
|
||||
def test_depthwise_convolution2d_descriptor_default_values():
|
||||
desc = ann.DepthwiseConvolution2dDescriptor()
|
||||
assert desc.m_PadLeft == 0
|
||||
assert desc.m_PadTop == 0
|
||||
assert desc.m_PadRight == 0
|
||||
assert desc.m_PadBottom == 0
|
||||
assert desc.m_StrideX == 1
|
||||
assert desc.m_StrideY == 1
|
||||
assert desc.m_DilationX == 1
|
||||
assert desc.m_DilationY == 1
|
||||
assert desc.m_BiasEnabled == False
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
|
||||
def test_detectionpostprocess_descriptor_default_values():
|
||||
desc = ann.DetectionPostProcessDescriptor()
|
||||
assert desc.m_MaxDetections == 0
|
||||
assert desc.m_MaxClassesPerDetection == 1
|
||||
assert desc.m_DetectionsPerClass == 1
|
||||
assert desc.m_NmsScoreThreshold == 0
|
||||
assert desc.m_NmsIouThreshold == 0
|
||||
assert desc.m_NumClasses == 0
|
||||
assert desc.m_UseRegularNms == False
|
||||
assert desc.m_ScaleH == 0
|
||||
assert desc.m_ScaleW == 0
|
||||
assert desc.m_ScaleX == 0
|
||||
assert desc.m_ScaleY == 0
|
||||
|
||||
|
||||
def test_fakequantization_descriptor_default_values():
|
||||
desc = ann.FakeQuantizationDescriptor()
|
||||
np.allclose(6, desc.m_Max)
|
||||
np.allclose(-6, desc.m_Min)
|
||||
|
||||
|
||||
def test_fill_descriptor_default_values():
|
||||
desc = ann.FillDescriptor()
|
||||
np.allclose(0, desc.m_Value)
|
||||
|
||||
|
||||
def test_gather_descriptor_default_values():
|
||||
desc = ann.GatherDescriptor()
|
||||
assert desc.m_Axis == 0
|
||||
|
||||
|
||||
def test_fully_connected_descriptor_default_values():
|
||||
desc = ann.FullyConnectedDescriptor()
|
||||
assert desc.m_BiasEnabled == False
|
||||
assert desc.m_TransposeWeightMatrix == False
|
||||
|
||||
|
||||
def test_instancenormalization_descriptor_default_values():
|
||||
desc = ann.InstanceNormalizationDescriptor()
|
||||
assert desc.m_Gamma == 1
|
||||
assert desc.m_Beta == 0
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
np.allclose(1e-12, desc.m_Eps)
|
||||
|
||||
|
||||
def test_lstm_descriptor_default_values():
|
||||
desc = ann.LstmDescriptor()
|
||||
assert desc.m_ActivationFunc == 1
|
||||
assert desc.m_ClippingThresCell == 0
|
||||
assert desc.m_ClippingThresProj == 0
|
||||
assert desc.m_CifgEnabled == True
|
||||
assert desc.m_PeepholeEnabled == False
|
||||
assert desc.m_ProjectionEnabled == False
|
||||
assert desc.m_LayerNormEnabled == False
|
||||
|
||||
|
||||
def test_l2normalization_descriptor_default_values():
|
||||
desc = ann.L2NormalizationDescriptor()
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
np.allclose(1e-12, desc.m_Eps)
|
||||
|
||||
|
||||
def test_mean_descriptor_default_values():
|
||||
desc = ann.MeanDescriptor()
|
||||
assert desc.m_KeepDims == False
|
||||
|
||||
|
||||
def test_normalization_descriptor_default_values():
|
||||
desc = ann.NormalizationDescriptor()
|
||||
assert desc.m_NormChannelType == ann.NormalizationAlgorithmChannel_Across
|
||||
assert desc.m_NormMethodType == ann.NormalizationAlgorithmMethod_LocalBrightness
|
||||
assert desc.m_NormSize == 0
|
||||
assert desc.m_Alpha == 0
|
||||
assert desc.m_Beta == 0
|
||||
assert desc.m_K == 0
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
|
||||
def test_origin_descriptor_default_values():
|
||||
desc = ann.ConcatDescriptor()
|
||||
assert 0 == desc.GetNumViews()
|
||||
assert 0 == desc.GetNumDimensions()
|
||||
assert 1 == desc.GetConcatAxis()
|
||||
|
||||
|
||||
def test_origin_descriptor_incorrect_views():
|
||||
desc = ann.ConcatDescriptor(2, 2)
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
desc.SetViewOriginCoord(1000, 100, 1000)
|
||||
assert "Failed to set view origin coordinates." in str(err.value)
|
||||
|
||||
|
||||
def test_origin_descriptor_ctor():
|
||||
desc = ann.ConcatDescriptor(2, 2)
|
||||
value = 5
|
||||
for i in range(desc.GetNumViews()):
|
||||
for j in range(desc.GetNumDimensions()):
|
||||
desc.SetViewOriginCoord(i, j, value+i)
|
||||
desc.SetConcatAxis(1)
|
||||
|
||||
assert 2 == desc.GetNumViews()
|
||||
assert 2 == desc.GetNumDimensions()
|
||||
assert [5, 5] == desc.GetViewOrigin(0)
|
||||
assert [6, 6] == desc.GetViewOrigin(1)
|
||||
assert 1 == desc.GetConcatAxis()
|
||||
|
||||
|
||||
def test_pad_descriptor_default_values():
|
||||
desc = ann.PadDescriptor()
|
||||
assert desc.m_PadValue == 0
|
||||
assert desc.m_PaddingMode == ann.PaddingMode_Constant
|
||||
|
||||
|
||||
def test_permute_descriptor_default_values():
|
||||
pv = ann.PermutationVector((0, 2, 3, 1))
|
||||
desc = ann.PermuteDescriptor(pv)
|
||||
assert desc.m_DimMappings.GetSize() == 4
|
||||
assert desc.m_DimMappings[0] == 0
|
||||
assert desc.m_DimMappings[1] == 2
|
||||
assert desc.m_DimMappings[2] == 3
|
||||
assert desc.m_DimMappings[3] == 1
|
||||
|
||||
|
||||
def test_pooling_descriptor_default_values():
|
||||
desc = ann.Pooling2dDescriptor()
|
||||
assert desc.m_PoolType == ann.PoolingAlgorithm_Max
|
||||
assert desc.m_PadLeft == 0
|
||||
assert desc.m_PadTop == 0
|
||||
assert desc.m_PadRight == 0
|
||||
assert desc.m_PadBottom == 0
|
||||
assert desc.m_PoolHeight == 0
|
||||
assert desc.m_PoolWidth == 0
|
||||
assert desc.m_StrideX == 0
|
||||
assert desc.m_StrideY == 0
|
||||
assert desc.m_OutputShapeRounding == ann.OutputShapeRounding_Floor
|
||||
assert desc.m_PaddingMethod == ann.PaddingMethod_Exclude
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
|
||||
def test_reshape_descriptor_default_values():
|
||||
desc = ann.ReshapeDescriptor()
|
||||
# check the empty Targetshape
|
||||
assert desc.m_TargetShape.GetNumDimensions() == 0
|
||||
|
||||
def test_reduce_descriptor_default_values():
|
||||
desc = ann.ReduceDescriptor()
|
||||
assert desc.m_KeepDims == False
|
||||
assert desc.m_vAxis == []
|
||||
assert desc.m_ReduceOperation == ann.ReduceOperation_Sum
|
||||
|
||||
def test_slice_descriptor_default_values():
|
||||
desc = ann.SliceDescriptor()
|
||||
assert desc.m_TargetWidth == 0
|
||||
assert desc.m_TargetHeight == 0
|
||||
assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
|
||||
def test_resize_descriptor_default_values():
|
||||
desc = ann.ResizeDescriptor()
|
||||
assert desc.m_TargetWidth == 0
|
||||
assert desc.m_TargetHeight == 0
|
||||
assert desc.m_Method == ann.ResizeMethod_NearestNeighbor
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
assert desc.m_AlignCorners == False
|
||||
|
||||
|
||||
def test_spacetobatchnd_descriptor_default_values():
|
||||
desc = ann.SpaceToBatchNdDescriptor()
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
|
||||
|
||||
def test_spacetodepth_descriptor_default_values():
|
||||
desc = ann.SpaceToDepthDescriptor()
|
||||
assert desc.m_BlockSize == 1
|
||||
assert desc.m_DataLayout == ann.DataLayout_NHWC
|
||||
|
||||
|
||||
def test_stack_descriptor_default_values():
|
||||
desc = ann.StackDescriptor()
|
||||
assert desc.m_Axis == 0
|
||||
assert desc.m_NumInputs == 0
|
||||
# check the empty Inputshape
|
||||
assert desc.m_InputShape.GetNumDimensions() == 0
|
||||
|
||||
|
||||
def test_slice_descriptor_default_values():
|
||||
desc = ann.SliceDescriptor()
|
||||
desc.m_Begin = [1, 2, 3, 4, 5]
|
||||
desc.m_Size = (1, 2, 3, 4)
|
||||
|
||||
assert [1, 2, 3, 4, 5] == desc.m_Begin
|
||||
assert [1, 2, 3, 4] == desc.m_Size
|
||||
|
||||
|
||||
def test_slice_descriptor_ctor():
|
||||
desc = ann.SliceDescriptor([1, 2, 3, 4, 5], (1, 2, 3, 4))
|
||||
|
||||
assert [1, 2, 3, 4, 5] == desc.m_Begin
|
||||
assert [1, 2, 3, 4] == desc.m_Size
|
||||
|
||||
|
||||
def test_strided_slice_descriptor_default_values():
|
||||
desc = ann.StridedSliceDescriptor()
|
||||
desc.m_Begin = [1, 2, 3, 4, 5]
|
||||
desc.m_End = [6, 7, 8, 9, 10]
|
||||
desc.m_Stride = (10, 10)
|
||||
desc.m_BeginMask = 1
|
||||
desc.m_EndMask = 2
|
||||
desc.m_ShrinkAxisMask = 3
|
||||
desc.m_EllipsisMask = 4
|
||||
desc.m_NewAxisMask = 5
|
||||
|
||||
assert [1, 2, 3, 4, 5] == desc.m_Begin
|
||||
assert [6, 7, 8, 9, 10] == desc.m_End
|
||||
assert [10, 10] == desc.m_Stride
|
||||
assert 1 == desc.m_BeginMask
|
||||
assert 2 == desc.m_EndMask
|
||||
assert 3 == desc.m_ShrinkAxisMask
|
||||
assert 4 == desc.m_EllipsisMask
|
||||
assert 5 == desc.m_NewAxisMask
|
||||
|
||||
|
||||
def test_strided_slice_descriptor_ctor():
|
||||
desc = ann.StridedSliceDescriptor([1, 2, 3, 4, 5], [6, 7, 8, 9, 10], (10, 10))
|
||||
desc.m_Begin = [1, 2, 3, 4, 5]
|
||||
desc.m_End = [6, 7, 8, 9, 10]
|
||||
desc.m_Stride = (10, 10)
|
||||
|
||||
assert [1, 2, 3, 4, 5] == desc.m_Begin
|
||||
assert [6, 7, 8, 9, 10] == desc.m_End
|
||||
assert [10, 10] == desc.m_Stride
|
||||
|
||||
|
||||
def test_softmax_descriptor_default_values():
|
||||
desc = ann.SoftmaxDescriptor()
|
||||
assert desc.m_Axis == -1
|
||||
np.allclose(1.0, desc.m_Beta)
|
||||
|
||||
|
||||
def test_space_to_batch_nd_descriptor_default_values():
|
||||
desc = ann.SpaceToBatchNdDescriptor()
|
||||
assert [1, 1] == desc.m_BlockShape
|
||||
assert [(0, 0), (0, 0)] == desc.m_PadList
|
||||
assert ann.DataLayout_NCHW == desc.m_DataLayout
|
||||
|
||||
|
||||
def test_space_to_batch_nd_descriptor_assigned_values():
|
||||
desc = ann.SpaceToBatchNdDescriptor()
|
||||
desc.m_BlockShape = (90, 100)
|
||||
desc.m_PadList = [(1, 2), (3, 4)]
|
||||
assert [90, 100] == desc.m_BlockShape
|
||||
assert [(1, 2), (3, 4)] == desc.m_PadList
|
||||
assert ann.DataLayout_NCHW == desc.m_DataLayout
|
||||
|
||||
|
||||
def test_space_to_batch_nd_descriptor_ctor():
|
||||
desc = ann.SpaceToBatchNdDescriptor((1, 2, 3), [(1, 2), (3, 4)])
|
||||
assert [1, 2, 3] == desc.m_BlockShape
|
||||
assert [(1, 2), (3, 4)] == desc.m_PadList
|
||||
assert ann.DataLayout_NCHW == desc.m_DataLayout
|
||||
|
||||
|
||||
def test_transpose_convolution2d_descriptor_default_values():
|
||||
desc = ann.TransposeConvolution2dDescriptor()
|
||||
assert desc.m_PadLeft == 0
|
||||
assert desc.m_PadTop == 0
|
||||
assert desc.m_PadRight == 0
|
||||
assert desc.m_PadBottom == 0
|
||||
assert desc.m_StrideX == 0
|
||||
assert desc.m_StrideY == 0
|
||||
assert desc.m_BiasEnabled == False
|
||||
assert desc.m_DataLayout == ann.DataLayout_NCHW
|
||||
assert desc.m_OutputShapeEnabled == False
|
||||
|
||||
def test_transpose_descriptor_default_values():
|
||||
pv = ann.PermutationVector((0, 3, 2, 1, 4))
|
||||
desc = ann.TransposeDescriptor(pv)
|
||||
assert desc.m_DimMappings.GetSize() == 5
|
||||
assert desc.m_DimMappings[0] == 0
|
||||
assert desc.m_DimMappings[1] == 3
|
||||
assert desc.m_DimMappings[2] == 2
|
||||
assert desc.m_DimMappings[3] == 1
|
||||
assert desc.m_DimMappings[4] == 4
|
||||
|
||||
def test_view_descriptor_default_values():
|
||||
desc = ann.SplitterDescriptor()
|
||||
assert 0 == desc.GetNumViews()
|
||||
assert 0 == desc.GetNumDimensions()
|
||||
|
||||
|
||||
def test_elementwise_unary_descriptor_default_values():
|
||||
desc = ann.ElementwiseUnaryDescriptor()
|
||||
assert desc.m_Operation == ann.UnaryOperation_Abs
|
||||
|
||||
|
||||
def test_logical_binary_descriptor_default_values():
|
||||
desc = ann.LogicalBinaryDescriptor()
|
||||
assert desc.m_Operation == ann.LogicalBinaryOperation_LogicalAnd
|
||||
|
||||
def test_view_descriptor_incorrect_input():
|
||||
desc = ann.SplitterDescriptor(2, 3)
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
desc.SetViewOriginCoord(1000, 100, 1000)
|
||||
assert "Failed to set view origin coordinates." in str(err.value)
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
desc.SetViewSize(1000, 100, 1000)
|
||||
assert "Failed to set view size." in str(err.value)
|
||||
|
||||
|
||||
def test_view_descriptor_ctor():
|
||||
desc = ann.SplitterDescriptor(2, 3)
|
||||
value_size = 1
|
||||
value_orig_coord = 5
|
||||
for i in range(desc.GetNumViews()):
|
||||
for j in range(desc.GetNumDimensions()):
|
||||
desc.SetViewOriginCoord(i, j, value_orig_coord+i)
|
||||
desc.SetViewSize(i, j, value_size+i)
|
||||
|
||||
assert 2 == desc.GetNumViews()
|
||||
assert 3 == desc.GetNumDimensions()
|
||||
assert [5, 5] == desc.GetViewOrigin(0)
|
||||
assert [6, 6] == desc.GetViewOrigin(1)
|
||||
assert [1, 1] == desc.GetViewSizes(0)
|
||||
assert [2, 2] == desc.GetViewSizes(1)
|
||||
|
||||
|
||||
def test_createdescriptorforconcatenation_ctor():
|
||||
input_shape_vector = [ann.TensorShape((2, 1)), ann.TensorShape((3, 1)), ann.TensorShape((4, 1))]
|
||||
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
|
||||
assert 3 == desc.GetNumViews()
|
||||
assert 0 == desc.GetConcatAxis()
|
||||
assert 2 == desc.GetNumDimensions()
|
||||
c = desc.GetViewOrigin(1)
|
||||
d = desc.GetViewOrigin(0)
|
||||
|
||||
|
||||
def test_createdescriptorforconcatenation_wrong_shape_for_axis():
|
||||
input_shape_vector = [ann.TensorShape((1, 2)), ann.TensorShape((3, 4)), ann.TensorShape((5, 6))]
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
|
||||
|
||||
assert "All inputs to concatenation must be the same size along all dimensions except the concatenation dimension" in str(
|
||||
err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_shape_vector", [([-1, "one"]),
|
||||
([1.33, 4.55]),
|
||||
([{1: "one"}])], ids=lambda x: str(x))
|
||||
def test_createdescriptorforconcatenation_rubbish_assignment_shape_vector(input_shape_vector):
|
||||
with pytest.raises(TypeError) as err:
|
||||
desc = ann.CreateDescriptorForConcatenation(input_shape_vector, 0)
|
||||
|
||||
assert "in method 'CreateDescriptorForConcatenation', argument 1 of type 'std::vector< armnn::TensorShape,std::allocator< armnn::TensorShape > >'" in str(
|
||||
err.value)
|
||||
|
||||
|
||||
generated_classes = inspect.getmembers(generated, inspect.isclass)
|
||||
generated_classes_names = list(map(lambda x: x[0], generated_classes))
|
||||
@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
|
||||
'ArgMinMaxDescriptor',
|
||||
'PermuteDescriptor',
|
||||
'SoftmaxDescriptor',
|
||||
'ConcatDescriptor',
|
||||
'SplitterDescriptor',
|
||||
'Pooling2dDescriptor',
|
||||
'FullyConnectedDescriptor',
|
||||
'Convolution2dDescriptor',
|
||||
'Convolution3dDescriptor',
|
||||
'DepthwiseConvolution2dDescriptor',
|
||||
'DetectionPostProcessDescriptor',
|
||||
'NormalizationDescriptor',
|
||||
'L2NormalizationDescriptor',
|
||||
'BatchNormalizationDescriptor',
|
||||
'InstanceNormalizationDescriptor',
|
||||
'BatchToSpaceNdDescriptor',
|
||||
'FakeQuantizationDescriptor',
|
||||
'ReduceDescriptor',
|
||||
'ResizeDescriptor',
|
||||
'ReshapeDescriptor',
|
||||
'SpaceToBatchNdDescriptor',
|
||||
'SpaceToDepthDescriptor',
|
||||
'LstmDescriptor',
|
||||
'MeanDescriptor',
|
||||
'PadDescriptor',
|
||||
'SliceDescriptor',
|
||||
'StackDescriptor',
|
||||
'StridedSliceDescriptor',
|
||||
'TransposeConvolution2dDescriptor',
|
||||
'TransposeDescriptor',
|
||||
'ElementwiseUnaryDescriptor',
|
||||
'FillDescriptor',
|
||||
'GatherDescriptor',
|
||||
'LogicalBinaryDescriptor',
|
||||
'ChannelShuffleDescriptor'])
|
||||
class TestDescriptorMassChecks:
|
||||
|
||||
def test_desc_implemented(self, desc_name):
|
||||
assert desc_name in generated_classes_names
|
||||
|
||||
def test_desc_equal(self, desc_name):
|
||||
desc_class = next(filter(lambda x: x[0] == desc_name, generated_classes))[1]
|
||||
|
||||
assert desc_class() == desc_class()
|
||||
|
||||
|
||||
generated_classes = inspect.getmembers(generated, inspect.isclass)
|
||||
generated_classes_names = list(map(lambda x: x[0], generated_classes))
|
||||
@pytest.mark.parametrize("desc_name", ['ActivationDescriptor',
|
||||
'ArgMinMaxDescriptor',
|
||||
'PermuteDescriptor',
|
||||
'SoftmaxDescriptor',
|
||||
'ConcatDescriptor',
|
||||
'SplitterDescriptor',
|
||||
'Pooling2dDescriptor',
|
||||
'FullyConnectedDescriptor',
|
||||
'Convolution2dDescriptor',
|
||||
'Convolution3dDescriptor',
|
||||
'DepthwiseConvolution2dDescriptor',
|
||||
'DetectionPostProcessDescriptor',
|
||||
'NormalizationDescriptor',
|
||||
'L2NormalizationDescriptor',
|
||||
'BatchNormalizationDescriptor',
|
||||
'InstanceNormalizationDescriptor',
|
||||
'BatchToSpaceNdDescriptor',
|
||||
'FakeQuantizationDescriptor',
|
||||
'ReduceDescriptor',
|
||||
'ResizeDescriptor',
|
||||
'ReshapeDescriptor',
|
||||
'SpaceToBatchNdDescriptor',
|
||||
'SpaceToDepthDescriptor',
|
||||
'LstmDescriptor',
|
||||
'MeanDescriptor',
|
||||
'PadDescriptor',
|
||||
'SliceDescriptor',
|
||||
'StackDescriptor',
|
||||
'StridedSliceDescriptor',
|
||||
'TransposeConvolution2dDescriptor',
|
||||
'TransposeDescriptor',
|
||||
'ElementwiseUnaryDescriptor',
|
||||
'FillDescriptor',
|
||||
'GatherDescriptor',
|
||||
'LogicalBinaryDescriptor',
|
||||
'ChannelShuffleDescriptor'])
|
||||
class TestDescriptorMassChecks:
|
||||
|
||||
def test_desc_implemented(self, desc_name):
|
||||
assert desc_name in generated_classes_names
|
||||
|
||||
def test_desc_equal(self, desc_name):
|
||||
desc_class = next(filter(lambda x: x[0] == desc_name, generated_classes))[1]
|
||||
|
||||
assert desc_class() == desc_class()
|
||||
|
120
arch/arm/ARMnn/python/pyarmnn/test/test_deserializer.py
Normal file
120
arch/arm/ARMnn/python/pyarmnn/test/test_deserializer.py
Normal file
@ -0,0 +1,120 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
import numpy as np
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def parser(shared_data_folder):
|
||||
"""
|
||||
Parse and setup the test network to be used for the tests below
|
||||
"""
|
||||
parser = ann.IDeserializer()
|
||||
parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, 'mock_model.armnn'))
|
||||
|
||||
yield parser
|
||||
|
||||
|
||||
def test_deserializer_swig_destroy():
|
||||
assert ann.IDeserializer.__swig_destroy__, "There is a swig python destructor defined"
|
||||
assert ann.IDeserializer.__swig_destroy__.__name__ == "delete_IDeserializer"
|
||||
|
||||
|
||||
def test_check_deserializer_swig_ownership(parser):
|
||||
# Check to see that SWIG has ownership for parser. This instructs SWIG to take
|
||||
# ownership of the return value. This allows the value to be automatically
|
||||
# garbage-collected when it is no longer in use
|
||||
assert parser.thisown
|
||||
|
||||
|
||||
def test_deserializer_get_network_input_binding_info(parser):
|
||||
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
|
||||
layer_id = 0
|
||||
input_name = 'input_1'
|
||||
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(layer_id, input_name)
|
||||
|
||||
tensor = input_binding_info[1]
|
||||
assert tensor.GetDataType() == 2
|
||||
assert tensor.GetNumDimensions() == 4
|
||||
assert tensor.GetNumElements() == 784
|
||||
assert tensor.GetQuantizationOffset() == 128
|
||||
assert tensor.GetQuantizationScale() == 0.007843137718737125
|
||||
|
||||
|
||||
def test_deserializer_get_network_output_binding_info(parser):
|
||||
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
|
||||
layer_id = 0
|
||||
output_name = "dense/Softmax"
|
||||
|
||||
output_binding_info1 = parser.GetNetworkOutputBindingInfo(layer_id, output_name)
|
||||
|
||||
# Check the tensor info retrieved from GetNetworkOutputBindingInfo
|
||||
tensor1 = output_binding_info1[1]
|
||||
|
||||
assert tensor1.GetDataType() == 2
|
||||
assert tensor1.GetNumDimensions() == 2
|
||||
assert tensor1.GetNumElements() == 10
|
||||
assert tensor1.GetQuantizationOffset() == 0
|
||||
assert tensor1.GetQuantizationScale() == 0.00390625
|
||||
|
||||
|
||||
def test_deserializer_filenotfound_exception(shared_data_folder):
|
||||
parser = ann.IDeserializer()
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, 'some_unknown_network.armnn'))
|
||||
|
||||
# Only check for part of the exception since the exception returns
|
||||
# absolute path which will change on different machines.
|
||||
assert 'Cannot read the file' in str(err.value)
|
||||
|
||||
|
||||
def test_deserializer_end_to_end(shared_data_folder):
|
||||
parser = ann.IDeserializer()
|
||||
|
||||
network = parser.CreateNetworkFromBinary(os.path.join(shared_data_folder, "mock_model.armnn"))
|
||||
|
||||
# use 0 as a dummy value for layer_id, which is unused in the actual implementation
|
||||
layer_id = 0
|
||||
input_name = 'input_1'
|
||||
output_name = 'dense/Softmax'
|
||||
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(layer_id, input_name)
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
|
||||
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
assert 0 == len(messages)
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
assert "" == messages
|
||||
|
||||
# Load test image data stored in input_lite.npy
|
||||
input_tensor_data = np.load(os.path.join(shared_data_folder, 'deserializer/input_lite.npy'))
|
||||
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
|
||||
|
||||
output_tensors = []
|
||||
out_bind_info = parser.GetNetworkOutputBindingInfo(layer_id, output_name)
|
||||
out_tensor_info = out_bind_info[1]
|
||||
out_tensor_id = out_bind_info[0]
|
||||
output_tensors.append((out_tensor_id,
|
||||
ann.Tensor(out_tensor_info)))
|
||||
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output_vectors = []
|
||||
for index, out_tensor in enumerate(output_tensors):
|
||||
output_vectors.append(out_tensor[1].get_memory_area())
|
||||
|
||||
# Load golden output file for result comparison.
|
||||
expected_outputs = np.load(os.path.join(shared_data_folder, 'deserializer/golden_output_lite.npy'))
|
||||
|
||||
# Check that output matches golden output
|
||||
assert (expected_outputs == output_vectors[0]).all()
|
50
arch/arm/ARMnn/python/pyarmnn/test/test_generated.py
Normal file
50
arch/arm/ARMnn/python/pyarmnn/test/test_generated.py
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import inspect
|
||||
from typing import Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
import pyarmnn._generated.pyarmnn as generated_armnn
|
||||
import pyarmnn._generated.pyarmnn as generated_deserializer
|
||||
import pyarmnn._generated.pyarmnn_onnxparser as generated_onnx
|
||||
import pyarmnn._generated.pyarmnn_tfliteparser as generated_tflite
|
||||
|
||||
swig_independent_classes = ('IBackend',
|
||||
'IDeviceSpec',
|
||||
'IConnectableLayer',
|
||||
'IInputSlot',
|
||||
'IOutputSlot',
|
||||
'IProfiler')
|
||||
|
||||
|
||||
def get_classes(swig_independent_classes: Tuple):
|
||||
# We need to ignore some swig generated_armnn classes. This is because some are abstract classes
|
||||
# They cannot be created with the swig generated_armnn wrapper, therefore they don't need a destructor.
|
||||
# Swig also generates its own meta class - this needs to be ignored.
|
||||
ignored_class_names = (*swig_independent_classes, '_SwigNonDynamicMeta')
|
||||
return list(filter(lambda x: x[0] not in ignored_class_names,
|
||||
inspect.getmembers(generated_armnn, inspect.isclass) +
|
||||
inspect.getmembers(generated_deserializer, inspect.isclass) +
|
||||
inspect.getmembers(generated_tflite, inspect.isclass) +
|
||||
inspect.getmembers(generated_onnx, inspect.isclass)))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("class_instance", get_classes(swig_independent_classes), ids=lambda x: 'class={}'.format(x[0]))
|
||||
class TestPyOwnedClasses:
|
||||
|
||||
def test_destructors_exist_per_class(self, class_instance):
|
||||
assert getattr(class_instance[1], '__swig_destroy__', None)
|
||||
|
||||
def test_owned(self, class_instance):
|
||||
assert getattr(class_instance[1], 'thisown', None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("class_instance", swig_independent_classes)
|
||||
class TestPyIndependentClasses:
|
||||
|
||||
def test_destructors_does_not_exist_per_class(self, class_instance):
|
||||
assert not getattr(class_instance[1], '__swig_destroy__', None)
|
||||
|
||||
def test_not_owned(self, class_instance):
|
||||
assert not getattr(class_instance[1], 'thisown', None)
|
142
arch/arm/ARMnn/python/pyarmnn/test/test_iconnectable.py
Normal file
142
arch/arm/ARMnn/python/pyarmnn/test/test_iconnectable.py
Normal file
@ -0,0 +1,142 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def network():
|
||||
return ann.INetwork()
|
||||
|
||||
|
||||
class TestIInputIOutputIConnectable:
|
||||
|
||||
def test_input_slot(self, network):
|
||||
# Create input, addition & output layer
|
||||
input1 = network.AddInputLayer(0, "input1")
|
||||
input2 = network.AddInputLayer(1, "input2")
|
||||
add = network.AddAdditionLayer("addition")
|
||||
output = network.AddOutputLayer(0, "output")
|
||||
|
||||
# Connect the input/output slots for each layer
|
||||
input1.GetOutputSlot(0).Connect(add.GetInputSlot(0))
|
||||
input2.GetOutputSlot(0).Connect(add.GetInputSlot(1))
|
||||
add.GetOutputSlot(0).Connect(output.GetInputSlot(0))
|
||||
|
||||
# Check IInputSlot GetConnection()
|
||||
input_slot = add.GetInputSlot(0)
|
||||
input_slot_connection = input_slot.GetConnection()
|
||||
|
||||
assert isinstance(input_slot_connection, ann.IOutputSlot)
|
||||
|
||||
del input_slot_connection
|
||||
|
||||
assert input_slot.GetConnection()
|
||||
assert isinstance(input_slot.GetConnection(), ann.IOutputSlot)
|
||||
|
||||
del input_slot
|
||||
|
||||
assert add.GetInputSlot(0)
|
||||
|
||||
def test_output_slot(self, network):
|
||||
|
||||
# Create input, addition & output layer
|
||||
input1 = network.AddInputLayer(0, "input1")
|
||||
input2 = network.AddInputLayer(1, "input2")
|
||||
add = network.AddAdditionLayer("addition")
|
||||
output = network.AddOutputLayer(0, "output")
|
||||
|
||||
# Connect the input/output slots for each layer
|
||||
input1.GetOutputSlot(0).Connect(add.GetInputSlot(0))
|
||||
input2.GetOutputSlot(0).Connect(add.GetInputSlot(1))
|
||||
add.GetOutputSlot(0).Connect(output.GetInputSlot(0))
|
||||
|
||||
# Check IInputSlot GetConnection()
|
||||
add_get_input_connection = add.GetInputSlot(0).GetConnection()
|
||||
output_get_input_connection = output.GetInputSlot(0).GetConnection()
|
||||
|
||||
# Check IOutputSlot GetConnection()
|
||||
add_get_output_connect = add.GetOutputSlot(0).GetConnection(0)
|
||||
assert isinstance(add_get_output_connect.GetConnection(), ann.IOutputSlot)
|
||||
|
||||
# Test IOutputSlot GetNumConnections() & CalculateIndexOnOwner()
|
||||
assert add_get_input_connection.GetNumConnections() == 1
|
||||
assert len(add_get_input_connection) == 1
|
||||
assert add_get_input_connection[0]
|
||||
assert add_get_input_connection.CalculateIndexOnOwner() == 0
|
||||
|
||||
# Check GetOwningLayerGuid(). Check that it is different for add and output layer
|
||||
assert add_get_input_connection.GetOwningLayerGuid() != output_get_input_connection.GetOwningLayerGuid()
|
||||
|
||||
# Set TensorInfo
|
||||
test_tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_Float32)
|
||||
|
||||
# Check IsTensorInfoSet()
|
||||
assert not add_get_input_connection.IsTensorInfoSet()
|
||||
add_get_input_connection.SetTensorInfo(test_tensor_info)
|
||||
assert add_get_input_connection.IsTensorInfoSet()
|
||||
|
||||
# Check GetTensorInfo()
|
||||
output_tensor_info = add_get_input_connection.GetTensorInfo()
|
||||
assert 2 == output_tensor_info.GetNumDimensions()
|
||||
assert 6 == output_tensor_info.GetNumElements()
|
||||
|
||||
# Check Disconnect()
|
||||
assert output_get_input_connection.GetNumConnections() == 1 # 1 connection to Outputslot0 from input1
|
||||
add.GetOutputSlot(0).Disconnect(output.GetInputSlot(0)) # disconnect add.OutputSlot0 from Output.InputSlot0
|
||||
assert output_get_input_connection.GetNumConnections() == 0
|
||||
|
||||
def test_output_slot__out_of_range(self, network):
|
||||
# Create input layer to check output slot get item handling
|
||||
input1 = network.AddInputLayer(0, "input1")
|
||||
|
||||
outputSlot = input1.GetOutputSlot(0)
|
||||
with pytest.raises(ValueError) as err:
|
||||
outputSlot[1]
|
||||
|
||||
assert "Invalid index 1 provided" in str(err.value)
|
||||
|
||||
def test_iconnectable_guid(self, network):
|
||||
|
||||
# Check IConnectable GetGuid()
|
||||
# Note Guid can change based on which tests are run so
|
||||
# checking here that each layer does not have the same guid
|
||||
add_id = network.AddAdditionLayer().GetGuid()
|
||||
output_id = network.AddOutputLayer(0).GetGuid()
|
||||
assert add_id != output_id
|
||||
|
||||
def test_iconnectable_layer_functions(self, network):
|
||||
|
||||
# Create input, addition & output layer
|
||||
input1 = network.AddInputLayer(0, "input1")
|
||||
input2 = network.AddInputLayer(1, "input2")
|
||||
add = network.AddAdditionLayer("addition")
|
||||
output = network.AddOutputLayer(0, "output")
|
||||
|
||||
# Check GetNumInputSlots(), GetName() & GetNumOutputSlots()
|
||||
assert input1.GetNumInputSlots() == 0
|
||||
assert input1.GetName() == "input1"
|
||||
assert input1.GetNumOutputSlots() == 1
|
||||
|
||||
assert input2.GetNumInputSlots() == 0
|
||||
assert input2.GetName() == "input2"
|
||||
assert input2.GetNumOutputSlots() == 1
|
||||
|
||||
assert add.GetNumInputSlots() == 2
|
||||
assert add.GetName() == "addition"
|
||||
assert add.GetNumOutputSlots() == 1
|
||||
|
||||
assert output.GetNumInputSlots() == 1
|
||||
assert output.GetName() == "output"
|
||||
assert output.GetNumOutputSlots() == 0
|
||||
|
||||
# Check GetOutputSlot()
|
||||
input1_get_output = input1.GetOutputSlot(0)
|
||||
assert input1_get_output.GetNumConnections() == 0
|
||||
assert len(input1_get_output) == 0
|
||||
|
||||
# Check GetInputSlot()
|
||||
add_get_input = add.GetInputSlot(0)
|
||||
add_get_input.GetConnection()
|
||||
assert isinstance(add_get_input, ann.IInputSlot)
|
149
arch/arm/ARMnn/python/pyarmnn/test/test_modeloption.py
Normal file
149
arch/arm/ARMnn/python/pyarmnn/test/test_modeloption.py
Normal file
@ -0,0 +1,149 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
|
||||
from pyarmnn import BackendOptions, BackendOption, BackendId, OptimizerOptions, ShapeInferenceMethod_InferAndValidate
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data", (True, -100, 128, 0.12345, 'string'))
|
||||
def test_backend_option_ctor(data):
|
||||
bo = BackendOption("name", data)
|
||||
assert "name" == bo.GetName()
|
||||
|
||||
|
||||
def test_backend_options_ctor():
|
||||
backend_id = BackendId('a')
|
||||
bos = BackendOptions(backend_id)
|
||||
|
||||
assert 'a' == str(bos.GetBackendId())
|
||||
|
||||
another_bos = BackendOptions(bos)
|
||||
assert 'a' == str(another_bos.GetBackendId())
|
||||
|
||||
|
||||
def test_backend_options_add():
|
||||
backend_id = BackendId('a')
|
||||
bos = BackendOptions(backend_id)
|
||||
bo = BackendOption("name", 1)
|
||||
bos.AddOption(bo)
|
||||
|
||||
assert 1 == bos.GetOptionCount()
|
||||
assert 1 == len(bos)
|
||||
|
||||
assert 'name' == bos[0].GetName()
|
||||
assert 'name' == bos.GetOption(0).GetName()
|
||||
for option in bos:
|
||||
assert 'name' == option.GetName()
|
||||
|
||||
bos.AddOption(BackendOption("name2", 2))
|
||||
|
||||
assert 2 == bos.GetOptionCount()
|
||||
assert 2 == len(bos)
|
||||
|
||||
|
||||
def test_backend_option_ownership():
|
||||
backend_id = BackendId('b')
|
||||
bos = BackendOptions(backend_id)
|
||||
bo = BackendOption('option', True)
|
||||
bos.AddOption(bo)
|
||||
|
||||
assert bo.thisown
|
||||
|
||||
del bo
|
||||
|
||||
assert 1 == bos.GetOptionCount()
|
||||
option = bos[0]
|
||||
assert not option.thisown
|
||||
assert 'option' == option.GetName()
|
||||
|
||||
del option
|
||||
|
||||
option_again = bos[0]
|
||||
assert not option_again.thisown
|
||||
assert 'option' == option_again.GetName()
|
||||
|
||||
|
||||
def test_optimizer_options_with_model_opt():
|
||||
a = BackendOptions(BackendId('a'))
|
||||
|
||||
oo = OptimizerOptions(True,
|
||||
False,
|
||||
False,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True,
|
||||
[a])
|
||||
|
||||
mo = oo.m_ModelOptions
|
||||
|
||||
assert 1 == len(mo)
|
||||
assert 'a' == str(mo[0].GetBackendId())
|
||||
|
||||
b = BackendOptions(BackendId('b'))
|
||||
|
||||
c = BackendOptions(BackendId('c'))
|
||||
|
||||
oo.m_ModelOptions = (a, b, c)
|
||||
|
||||
mo = oo.m_ModelOptions
|
||||
|
||||
assert 3 == len(oo.m_ModelOptions)
|
||||
|
||||
assert 'a' == str(mo[0].GetBackendId())
|
||||
assert 'b' == str(mo[1].GetBackendId())
|
||||
assert 'c' == str(mo[2].GetBackendId())
|
||||
|
||||
|
||||
def test_optimizer_option_default():
|
||||
oo = OptimizerOptions(True,
|
||||
False,
|
||||
False,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True)
|
||||
|
||||
assert 0 == len(oo.m_ModelOptions)
|
||||
|
||||
|
||||
def test_optimizer_options_fail():
|
||||
a = BackendOptions(BackendId('a'))
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
OptimizerOptions(True,
|
||||
False,
|
||||
False,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True,
|
||||
a)
|
||||
|
||||
assert "Wrong number or type of arguments" in str(err.value)
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
OptimizerOptions(True,
|
||||
False,
|
||||
True,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True,
|
||||
[a])
|
||||
|
||||
assert "BFloat16 and Float16 optimization cannot be enabled at the same time" in str(err.value)
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
oo = OptimizerOptions(True,
|
||||
False,
|
||||
False,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True)
|
||||
|
||||
oo.m_ModelOptions = 'nonsense'
|
||||
|
||||
assert "in method 'OptimizerOptions_m_ModelOptions_set', argument 2" in str(err.value)
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
oo = OptimizerOptions(True,
|
||||
False,
|
||||
False,
|
||||
ShapeInferenceMethod_InferAndValidate,
|
||||
True)
|
||||
|
||||
oo.m_ModelOptions = ['nonsense', a]
|
||||
|
||||
assert "in method 'OptimizerOptions_m_ModelOptions_set', argument 2" in str(err.value)
|
421
arch/arm/ARMnn/python/pyarmnn/test/test_network.py
Normal file
421
arch/arm/ARMnn/python/pyarmnn/test/test_network.py
Normal file
@ -0,0 +1,421 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
import stat
|
||||
import numpy as np
|
||||
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def test_optimizer_options_default_values():
|
||||
opt = ann.OptimizerOptions()
|
||||
assert opt.m_ReduceFp32ToFp16 == False
|
||||
assert opt.m_Debug == False
|
||||
assert opt.m_ReduceFp32ToBf16 == False
|
||||
assert opt.m_ImportEnabled == False
|
||||
assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly
|
||||
|
||||
|
||||
def test_optimizer_options_set_values1():
|
||||
opt = ann.OptimizerOptions(True, True)
|
||||
assert opt.m_ReduceFp32ToFp16 == True
|
||||
assert opt.m_Debug == True
|
||||
assert opt.m_ReduceFp32ToBf16 == False
|
||||
assert opt.m_ImportEnabled == False
|
||||
assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly
|
||||
|
||||
|
||||
def test_optimizer_options_set_values2():
|
||||
opt = ann.OptimizerOptions(False, False, True)
|
||||
assert opt.m_ReduceFp32ToFp16 == False
|
||||
assert opt.m_Debug == False
|
||||
assert opt.m_ReduceFp32ToBf16 == True
|
||||
assert opt.m_ImportEnabled == False
|
||||
assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly
|
||||
|
||||
|
||||
def test_optimizer_options_set_values3():
|
||||
opt = ann.OptimizerOptions(False, False, True, ann.ShapeInferenceMethod_InferAndValidate, True)
|
||||
assert opt.m_ReduceFp32ToFp16 == False
|
||||
assert opt.m_Debug == False
|
||||
assert opt.m_ReduceFp32ToBf16 == True
|
||||
assert opt.m_ImportEnabled == True
|
||||
assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_InferAndValidate
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_runtime(shared_data_folder, network_file):
|
||||
parser= ann.ITfLiteParser()
|
||||
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, network_file))
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
yield preferred_backends, network, runtime
|
||||
|
||||
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite',
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_optimize_executes_successfully(network_file, get_runtime):
|
||||
preferred_backends = [ann.BackendId('CpuRef')]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
assert len(messages) == 0, 'With only CpuRef, there should be no warnings irrelevant of architecture.'
|
||||
assert opt_network
|
||||
|
||||
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite',
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_optimize_owned_by_python(network_file, get_runtime):
|
||||
preferred_backends = get_runtime[0]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
assert opt_network.thisown
|
||||
|
||||
|
||||
@pytest.mark.aarch64
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite'
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_optimize_executes_successfully_for_neon_backend_only(network_file, get_runtime):
|
||||
preferred_backends = [ann.BackendId('CpuAcc')]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
assert 0 == len(messages)
|
||||
assert opt_network
|
||||
|
||||
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite'
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_optimize_fails_for_invalid_backends(network_file, get_runtime):
|
||||
invalid_backends = [ann.BackendId('Unknown')]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
ann.Optimize(network, invalid_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
expected_error_message = "None of the preferred backends [Unknown ] are supported."
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite'
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_optimize_fails_for_no_backends_specified(network_file, get_runtime):
|
||||
empty_backends = []
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
ann.Optimize(network, empty_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
expected_error_message = "Invoked Optimize with no backends specified"
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite'
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_serialize_to_dot(network_file, get_runtime, tmpdir):
|
||||
preferred_backends = get_runtime[0]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
dot_file_path = os.path.join(tmpdir, 'mock_model.dot')
|
||||
"""Check that serialized file does not exist at the start, gets created after SerializeToDot and is not empty"""
|
||||
assert not os.path.exists(dot_file_path)
|
||||
opt_network.SerializeToDot(dot_file_path)
|
||||
|
||||
assert os.path.exists(dot_file_path)
|
||||
|
||||
with open(dot_file_path) as res_file:
|
||||
expected_data = res_file.read()
|
||||
assert len(expected_data) > 1
|
||||
assert '[label=< [1,28,28,1] >]' in expected_data
|
||||
|
||||
|
||||
@pytest.mark.x86_64
|
||||
@pytest.mark.parametrize("network_file",
|
||||
[
|
||||
'mock_model.tflite'
|
||||
],
|
||||
ids=['mock_model'])
|
||||
def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir):
|
||||
preferred_backends = get_runtime[0]
|
||||
network = get_runtime[1]
|
||||
runtime = get_runtime[2]
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
"""Create file, write to it and change mode to read-only"""
|
||||
dot_file_path = os.path.join(tmpdir, 'mock_model.dot')
|
||||
f = open(dot_file_path, "w+")
|
||||
f.write("test")
|
||||
f.close()
|
||||
os.chmod(dot_file_path, stat.S_IREAD)
|
||||
assert os.path.exists(dot_file_path)
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
opt_network.SerializeToDot(dot_file_path)
|
||||
|
||||
expected_error_message = "Failed to open dot file"
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("method", [
|
||||
'AddActivationLayer',
|
||||
'AddAdditionLayer',
|
||||
'AddArgMinMaxLayer',
|
||||
'AddBatchNormalizationLayer',
|
||||
'AddBatchToSpaceNdLayer',
|
||||
'AddCastLayer',
|
||||
'AddChannelShuffleLayer',
|
||||
'AddComparisonLayer',
|
||||
'AddConcatLayer',
|
||||
'AddConstantLayer',
|
||||
'AddConvolution2dLayer',
|
||||
'AddConvolution3dLayer',
|
||||
'AddDepthToSpaceLayer',
|
||||
'AddDepthwiseConvolution2dLayer',
|
||||
'AddDequantizeLayer',
|
||||
'AddDetectionPostProcessLayer',
|
||||
'AddDivisionLayer',
|
||||
'AddElementwiseUnaryLayer',
|
||||
'AddFloorLayer',
|
||||
'AddFillLayer',
|
||||
'AddFullyConnectedLayer',
|
||||
'AddGatherLayer',
|
||||
'AddInputLayer',
|
||||
'AddInstanceNormalizationLayer',
|
||||
'AddLogicalBinaryLayer',
|
||||
'AddLogSoftmaxLayer',
|
||||
'AddL2NormalizationLayer',
|
||||
'AddLstmLayer',
|
||||
'AddMaximumLayer',
|
||||
'AddMeanLayer',
|
||||
'AddMergeLayer',
|
||||
'AddMinimumLayer',
|
||||
'AddMultiplicationLayer',
|
||||
'AddNormalizationLayer',
|
||||
'AddOutputLayer',
|
||||
'AddPadLayer',
|
||||
'AddPermuteLayer',
|
||||
'AddPooling2dLayer',
|
||||
'AddPreluLayer',
|
||||
'AddQuantizeLayer',
|
||||
'AddQuantizedLstmLayer',
|
||||
'AddRankLayer',
|
||||
'AddReduceLayer',
|
||||
'AddReshapeLayer',
|
||||
'AddResizeLayer',
|
||||
'AddShapeLayer',
|
||||
'AddSliceLayer',
|
||||
'AddSoftmaxLayer',
|
||||
'AddSpaceToBatchNdLayer',
|
||||
'AddSpaceToDepthLayer',
|
||||
'AddSplitterLayer',
|
||||
'AddStackLayer',
|
||||
'AddStandInLayer',
|
||||
'AddStridedSliceLayer',
|
||||
'AddSubtractionLayer',
|
||||
'AddSwitchLayer',
|
||||
'AddTransposeConvolution2dLayer',
|
||||
'AddTransposeLayer'
|
||||
])
|
||||
def test_network_method_exists(method):
|
||||
assert getattr(ann.INetwork, method, None)
|
||||
|
||||
|
||||
def test_fullyconnected_layer_optional_none():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
|
||||
ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_fullyconnected_layer_optional_provided():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
|
||||
ann.ConstTensor(),
|
||||
ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_fullyconnected_layer_all_args():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddFullyConnectedLayer(ann.FullyConnectedDescriptor(),
|
||||
ann.ConstTensor(),
|
||||
ann.ConstTensor(),
|
||||
'NAME1')
|
||||
|
||||
assert layer
|
||||
assert 'NAME1' == layer.GetName()
|
||||
|
||||
|
||||
def test_DepthwiseConvolution2d_layer_optional_none():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
|
||||
weights=ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_DepthwiseConvolution2d_layer_optional_provided():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
|
||||
weights=ann.ConstTensor(),
|
||||
biases=ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_DepthwiseConvolution2d_layer_all_args():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddDepthwiseConvolution2dLayer(convolution2dDescriptor=ann.DepthwiseConvolution2dDescriptor(),
|
||||
weights=ann.ConstTensor(),
|
||||
biases=ann.ConstTensor(),
|
||||
name='NAME1')
|
||||
|
||||
assert layer
|
||||
assert 'NAME1' == layer.GetName()
|
||||
|
||||
|
||||
def test_Convolution2d_layer_optional_none():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
|
||||
weights=ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_Convolution2d_layer_optional_provided():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
|
||||
weights=ann.ConstTensor(),
|
||||
biases=ann.ConstTensor())
|
||||
|
||||
assert layer
|
||||
|
||||
|
||||
def test_Convolution2d_layer_all_args():
|
||||
net = ann.INetwork()
|
||||
layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(),
|
||||
weights=ann.ConstTensor(),
|
||||
biases=ann.ConstTensor(),
|
||||
name='NAME1')
|
||||
|
||||
assert layer
|
||||
assert 'NAME1' == layer.GetName()
|
||||
|
||||
|
||||
def test_add_constant_layer_to_fully_connected():
|
||||
|
||||
inputWidth = 1
|
||||
inputHeight = 1
|
||||
inputChannels = 5
|
||||
inputNum = 2
|
||||
|
||||
outputChannels = 3
|
||||
outputNum = 2
|
||||
|
||||
inputShape = ( inputNum, inputChannels, inputHeight, inputWidth )
|
||||
outputShape = ( outputNum, outputChannels )
|
||||
weightsShape = ( inputChannels, outputChannels )
|
||||
biasShape = ( outputChannels, )
|
||||
|
||||
input = np.array([
|
||||
[1.0, 2.0, 3.0, 4.0, 5.0],
|
||||
[5.0, 4.0, 3.0, 2.0, 1.0]
|
||||
], dtype=np.float32)
|
||||
|
||||
weights = np.array([
|
||||
[.5, 2., .5],
|
||||
[.5, 2., 1.],
|
||||
[.5, 2., 2.],
|
||||
[.5, 2., 3.],
|
||||
[.5, 2., 4.]
|
||||
], dtype=np.float32)
|
||||
|
||||
biasValues = np.array([10, 20, 30], dtype=np.float32)
|
||||
|
||||
expectedOutput = np.array([
|
||||
[0.5 + 1.0 + 1.5 + 2.0 + 2.5 + biasValues[0],
|
||||
2.0 + 4.0 + 6.0 + 8.0 + 10. + biasValues[1],
|
||||
0.5 + 2.0 + 6.0 + 12. + 20. + biasValues[2]],
|
||||
[2.5 + 2.0 + 1.5 + 1.0 + 0.5 + biasValues[0],
|
||||
10.0 + 8.0 + 6.0 + 4.0 + 2. + biasValues[1],
|
||||
2.5 + 4.0 + 6.0 + 6. + 4. + biasValues[2]]
|
||||
], dtype=np.float32)
|
||||
|
||||
network = ann.INetwork()
|
||||
|
||||
input_info = ann.TensorInfo(ann.TensorShape(inputShape), ann.DataType_Float32, 0, 0, True)
|
||||
input_tensor = ann.ConstTensor(input_info, input)
|
||||
input_layer = network.AddInputLayer(0, "input")
|
||||
|
||||
w_info = ann.TensorInfo(ann.TensorShape(weightsShape), ann.DataType_Float32, 0, 0, True)
|
||||
w_tensor = ann.ConstTensor(w_info, weights)
|
||||
w_layer = network.AddConstantLayer(w_tensor, "weights")
|
||||
|
||||
b_info = ann.TensorInfo(ann.TensorShape(biasShape), ann.DataType_Float32, 0, 0, True)
|
||||
b_tensor = ann.ConstTensor(b_info, biasValues)
|
||||
b_layer = network.AddConstantLayer(b_tensor, "bias")
|
||||
|
||||
fc_descriptor = ann.FullyConnectedDescriptor()
|
||||
fc_descriptor.m_BiasEnabled = True
|
||||
fc_descriptor.m_ConstantWeights = True
|
||||
fully_connected = network.AddFullyConnectedLayer(fc_descriptor, "fc")
|
||||
|
||||
output_info = ann.TensorInfo(ann.TensorShape(outputShape), ann.DataType_Float32)
|
||||
output_tensor = ann.Tensor(output_info, np.zeros([1, 1], dtype=np.float32))
|
||||
output = network.AddOutputLayer(0, "output")
|
||||
|
||||
input_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(0))
|
||||
w_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(1))
|
||||
b_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(2))
|
||||
fully_connected.GetOutputSlot(0).Connect(output.GetInputSlot(0))
|
||||
|
||||
input_layer.GetOutputSlot(0).SetTensorInfo(input_info)
|
||||
w_layer.GetOutputSlot(0).SetTensorInfo(w_info)
|
||||
b_layer.GetOutputSlot(0).SetTensorInfo(b_info)
|
||||
fully_connected.GetOutputSlot(0).SetTensorInfo(output_info)
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuRef')]
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
|
||||
input_tensors = [(0, input_tensor)]
|
||||
output_tensors = [(0, output_tensor)]
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
assert (output_vectors==expectedOutput).all()
|
110
arch/arm/ARMnn/python/pyarmnn/test/test_onnx_parser.py
Normal file
110
arch/arm/ARMnn/python/pyarmnn/test/test_onnx_parser.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
import numpy as np
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def parser(shared_data_folder):
|
||||
"""
|
||||
Parse and setup the test network to be used for the tests below
|
||||
"""
|
||||
|
||||
# create onnx parser
|
||||
parser = ann.IOnnxParser()
|
||||
|
||||
# path to model
|
||||
path_to_model = os.path.join(shared_data_folder, 'mock_model.onnx')
|
||||
|
||||
# parse onnx binary & create network
|
||||
parser.CreateNetworkFromBinaryFile(path_to_model)
|
||||
|
||||
yield parser
|
||||
|
||||
|
||||
def test_onnx_parser_swig_destroy():
|
||||
assert ann.IOnnxParser.__swig_destroy__, "There is a swig python destructor defined"
|
||||
assert ann.IOnnxParser.__swig_destroy__.__name__ == "delete_IOnnxParser"
|
||||
|
||||
|
||||
def test_check_onnx_parser_swig_ownership(parser):
|
||||
# Check to see that SWIG has ownership for parser. This instructs SWIG to take
|
||||
# ownership of the return value. This allows the value to be automatically
|
||||
# garbage-collected when it is no longer in use
|
||||
assert parser.thisown
|
||||
|
||||
|
||||
def test_onnx_parser_get_network_input_binding_info(parser):
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo("input")
|
||||
|
||||
tensor = input_binding_info[1]
|
||||
assert tensor.GetDataType() == 1
|
||||
assert tensor.GetNumDimensions() == 4
|
||||
assert tensor.GetNumElements() == 784
|
||||
assert tensor.GetQuantizationOffset() == 0
|
||||
assert tensor.GetQuantizationScale() == 0
|
||||
|
||||
|
||||
def test_onnx_parser_get_network_output_binding_info(parser):
|
||||
output_binding_info = parser.GetNetworkOutputBindingInfo("output")
|
||||
|
||||
tensor = output_binding_info[1]
|
||||
assert tensor.GetDataType() == 1
|
||||
assert tensor.GetNumDimensions() == 4
|
||||
assert tensor.GetNumElements() == 10
|
||||
assert tensor.GetQuantizationOffset() == 0
|
||||
assert tensor.GetQuantizationScale() == 0
|
||||
|
||||
|
||||
def test_onnx_filenotfound_exception(shared_data_folder):
|
||||
parser = ann.IOnnxParser()
|
||||
|
||||
# path to model
|
||||
path_to_model = os.path.join(shared_data_folder, 'some_unknown_model.onnx')
|
||||
|
||||
# parse onnx binary & create network
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
parser.CreateNetworkFromBinaryFile(path_to_model)
|
||||
|
||||
# Only check for part of the exception since the exception returns
|
||||
# absolute path which will change on different machines.
|
||||
assert 'Invalid (null) filename' in str(err.value)
|
||||
|
||||
|
||||
def test_onnx_parser_end_to_end(shared_data_folder):
|
||||
parser = ann.IOnnxParser = ann.IOnnxParser()
|
||||
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.onnx'))
|
||||
|
||||
# load test image data stored in input_onnx.npy
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo("input")
|
||||
input_tensor_data = np.load(os.path.join(shared_data_folder, 'onnx_parser/input_onnx.npy')).astype(np.float32)
|
||||
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
assert 0 == len(messages)
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
|
||||
assert "" == messages
|
||||
|
||||
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
|
||||
output_tensors = ann.make_output_tensors([parser.GetNetworkOutputBindingInfo("output")])
|
||||
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output = ann.workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
# Load golden output file for result comparison.
|
||||
golden_output = np.load(os.path.join(shared_data_folder, 'onnx_parser/golden_output_onnx.npy'))
|
||||
|
||||
# Check that output matches golden output to 4 decimal places (there are slight rounding differences after this)
|
||||
np.testing.assert_almost_equal(output[0], golden_output, decimal=4)
|
@ -0,0 +1,68 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
class MockIProfiler:
|
||||
def __init__(self, json_string):
|
||||
self._profile_json = json_string
|
||||
|
||||
def as_json(self):
|
||||
return self._profile_json
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_profiler(shared_data_folder):
|
||||
path_to_file = os.path.join(shared_data_folder, 'mock_profile_out.json')
|
||||
with open(path_to_file, 'r') as file:
|
||||
profiler_output = file.read()
|
||||
return MockIProfiler(profiler_output)
|
||||
|
||||
|
||||
def test_inference_exec(mock_profiler):
|
||||
profiling_data_obj = ann.get_profiling_data(mock_profiler)
|
||||
|
||||
assert (len(profiling_data_obj.inference_data) > 0)
|
||||
assert (len(profiling_data_obj.per_workload_execution_data) > 0)
|
||||
|
||||
# Check each total execution time
|
||||
assert (profiling_data_obj.inference_data["execution_time"] == [1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
|
||||
assert (profiling_data_obj.inference_data["time_unit"] == "us")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exec_times, unit, backend, workload", [([2, 2,
|
||||
2, 2,
|
||||
2, 2],
|
||||
'us',
|
||||
'CpuRef',
|
||||
'RefSomeMock1dWorkload_Execute_#5'),
|
||||
([2, 2,
|
||||
2, 2,
|
||||
2, 2],
|
||||
'us',
|
||||
'CpuAcc',
|
||||
'NeonSomeMock2Workload_Execute_#6'),
|
||||
([2, 2,
|
||||
2, 2,
|
||||
2, 2],
|
||||
'us',
|
||||
'GpuAcc',
|
||||
'ClSomeMock3dWorkload_Execute_#7'),
|
||||
([2, 2,
|
||||
2, 2,
|
||||
2, 2],
|
||||
'us',
|
||||
'EthosNAcc',
|
||||
'EthosNSomeMock4dWorkload_Execute_#8')
|
||||
])
|
||||
def test_profiler_workloads(mock_profiler, exec_times, unit, backend, workload):
|
||||
profiling_data_obj = ann.get_profiling_data(mock_profiler)
|
||||
|
||||
work_load_exec = profiling_data_obj.per_workload_execution_data[workload]
|
||||
assert work_load_exec["execution_time"] == exec_times
|
||||
assert work_load_exec["time_unit"] == unit
|
||||
assert work_load_exec["backend"] == backend
|
@ -0,0 +1,91 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import pyarmnn as ann
|
||||
|
||||
# import generated so we can test for Dequantize_* and Quantize_*
|
||||
# functions not available in the public API.
|
||||
import pyarmnn._generated.pyarmnn as gen_ann
|
||||
|
||||
|
||||
@pytest.mark.parametrize('method', ['Quantize_int8_t',
|
||||
'Quantize_uint8_t',
|
||||
'Quantize_int16_t',
|
||||
'Quantize_int32_t',
|
||||
'Dequantize_int8_t',
|
||||
'Dequantize_uint8_t',
|
||||
'Dequantize_int16_t',
|
||||
'Dequantize_int32_t'])
|
||||
def test_quantize_exists(method):
|
||||
assert method in dir(gen_ann) and callable(getattr(gen_ann, method))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dt, min, max', [('uint8', 0, 255),
|
||||
('int8', -128, 127),
|
||||
('int16', -32768, 32767),
|
||||
('int32', -2147483648, 2147483647)])
|
||||
def test_quantize_uint8_output(dt, min, max):
|
||||
result = ann.quantize(3.3274056911468506, 0.02620004490017891, 128, dt)
|
||||
assert type(result) is int and min <= result <= max
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dt', ['uint8',
|
||||
'int8',
|
||||
'int16',
|
||||
'int32'])
|
||||
def test_dequantize_uint8_output(dt):
|
||||
result = ann.dequantize(3, 0.02620004490017891, 128, dt)
|
||||
assert type(result) is float
|
||||
|
||||
|
||||
def test_quantize_unsupported_dtype():
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.quantize(3.3274056911468506, 0.02620004490017891, 128, 'uint16')
|
||||
|
||||
assert 'Unexpected target datatype uint16 given.' in str(err.value)
|
||||
|
||||
|
||||
def test_dequantize_unsupported_dtype():
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.dequantize(3, 0.02620004490017891, 128, 'uint16')
|
||||
|
||||
assert 'Unexpected value datatype uint16 given.' in str(err.value)
|
||||
|
||||
|
||||
def test_dequantize_value_range():
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.dequantize(-1, 0.02620004490017891, 128, 'uint8')
|
||||
|
||||
assert 'Value is not within range of the given datatype uint8' in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dt, data', [('uint8', np.uint8(255)),
|
||||
('int8', np.int8(127)),
|
||||
('int16', np.int16(32767)),
|
||||
('int32', np.int32(2147483647)),
|
||||
|
||||
('uint8', np.int8(127)),
|
||||
('uint8', np.int16(255)),
|
||||
('uint8', np.int32(255)),
|
||||
|
||||
('int8', np.uint8(127)),
|
||||
('int8', np.int16(127)),
|
||||
('int8', np.int32(127)),
|
||||
|
||||
('int16', np.int8(127)),
|
||||
('int16', np.uint8(255)),
|
||||
('int16', np.int32(32767)),
|
||||
|
||||
('int32', np.uint8(255)),
|
||||
('int16', np.int8(127)),
|
||||
('int32', np.int16(32767))
|
||||
|
||||
])
|
||||
def test_dequantize_numpy_dt(dt, data):
|
||||
result = ann.dequantize(data, 1, 0, dt)
|
||||
|
||||
assert type(result) is float
|
||||
|
||||
assert np.float32(data) == result
|
320
arch/arm/ARMnn/python/pyarmnn/test/test_runtime.py
Normal file
320
arch/arm/ARMnn/python/pyarmnn/test/test_runtime.py
Normal file
@ -0,0 +1,320 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import warnings
|
||||
import numpy as np
|
||||
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def random_runtime(shared_data_folder):
|
||||
parser = ann.ITfLiteParser()
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
|
||||
preferred_backends = [ann.BackendId('CpuRef')]
|
||||
options = ann.CreationOptions()
|
||||
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
|
||||
graph_id = graphs_count - 1
|
||||
input_names = parser.GetSubgraphInputTensorNames(graph_id)
|
||||
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
|
||||
input_tensor_id = input_binding_info[0]
|
||||
|
||||
input_tensor_info = input_binding_info[1]
|
||||
input_tensor_info.SetConstant()
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
|
||||
input_data = np.random.randint(255, size=input_tensor_info.GetNumElements(), dtype=np.uint8)
|
||||
|
||||
const_tensor_pair = (input_tensor_id, ann.ConstTensor(input_tensor_info, input_data))
|
||||
|
||||
input_tensors = [const_tensor_pair]
|
||||
|
||||
output_tensors = []
|
||||
|
||||
for index, output_name in enumerate(output_names):
|
||||
out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
|
||||
|
||||
out_tensor_info = out_bind_info[1]
|
||||
out_tensor_id = out_bind_info[0]
|
||||
|
||||
output_tensors.append((out_tensor_id,
|
||||
ann.Tensor(out_tensor_info)))
|
||||
|
||||
yield preferred_backends, network, runtime, input_tensors, output_tensors
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def mock_model_runtime(shared_data_folder):
|
||||
parser = ann.ITfLiteParser()
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
|
||||
graph_id = 0
|
||||
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, "input_1")
|
||||
|
||||
input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy'))
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuRef')]
|
||||
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
print(messages)
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
|
||||
print(messages)
|
||||
|
||||
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
outputs_binding_info = []
|
||||
|
||||
for output_name in output_names:
|
||||
outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))
|
||||
|
||||
output_tensors = ann.make_output_tensors(outputs_binding_info)
|
||||
|
||||
yield runtime, net_id, input_tensors, output_tensors
|
||||
|
||||
|
||||
def test_python_disowns_network(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
runtime.LoadNetwork(opt_network)
|
||||
|
||||
assert not opt_network.thisown
|
||||
|
||||
|
||||
def test_load_network(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
assert "" == messages
|
||||
assert net_id == 0
|
||||
|
||||
|
||||
def test_create_runtime_with_external_profiling_enabled():
|
||||
|
||||
options = ann.CreationOptions()
|
||||
|
||||
options.m_ProfilingOptions.m_FileOnly = True
|
||||
options.m_ProfilingOptions.m_EnableProfiling = True
|
||||
options.m_ProfilingOptions.m_OutgoingCaptureFile = "/tmp/outgoing.txt"
|
||||
options.m_ProfilingOptions.m_IncomingCaptureFile = "/tmp/incoming.txt"
|
||||
options.m_ProfilingOptions.m_TimelineEnabled = True
|
||||
options.m_ProfilingOptions.m_CapturePeriod = 1000
|
||||
options.m_ProfilingOptions.m_FileFormat = "JSON"
|
||||
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
assert runtime is not None
|
||||
|
||||
|
||||
def test_create_runtime_with_external_profiling_enabled_invalid_options():
|
||||
|
||||
options = ann.CreationOptions()
|
||||
|
||||
options.m_ProfilingOptions.m_FileOnly = True
|
||||
options.m_ProfilingOptions.m_EnableProfiling = False
|
||||
options.m_ProfilingOptions.m_OutgoingCaptureFile = "/tmp/outgoing.txt"
|
||||
options.m_ProfilingOptions.m_IncomingCaptureFile = "/tmp/incoming.txt"
|
||||
options.m_ProfilingOptions.m_TimelineEnabled = True
|
||||
options.m_ProfilingOptions.m_CapturePeriod = 1000
|
||||
options.m_ProfilingOptions.m_FileFormat = "JSON"
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
expected_error_message = "It is not possible to enable timeline reporting without profiling being enabled"
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
def test_load_network_properties_provided(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
inputSource = ann.MemorySource_Malloc
|
||||
outputSource = ann.MemorySource_Malloc
|
||||
properties = ann.INetworkProperties(False, inputSource, outputSource)
|
||||
net_id, messages = runtime.LoadNetwork(opt_network, properties)
|
||||
assert "" == messages
|
||||
assert net_id == 0
|
||||
|
||||
|
||||
def test_network_properties_constructor(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
inputSource = ann.MemorySource_Undefined
|
||||
outputSource = ann.MemorySource_Undefined
|
||||
properties = ann.INetworkProperties(True, inputSource, outputSource)
|
||||
assert properties.m_AsyncEnabled == True
|
||||
assert properties.m_ProfilingEnabled == False
|
||||
assert properties.m_OutputNetworkDetailsMethod == ann.ProfilingDetailsMethod_Undefined
|
||||
assert properties.m_InputSource == ann.MemorySource_Undefined
|
||||
assert properties.m_OutputSource == ann.MemorySource_Undefined
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network, properties)
|
||||
assert "" == messages
|
||||
assert net_id == 0
|
||||
|
||||
|
||||
def test_unload_network_fails_for_invalid_net_id(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
|
||||
ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
runtime.UnloadNetwork(9)
|
||||
|
||||
expected_error_message = "Failed to unload network."
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
def test_enqueue_workload(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
input_tensors = random_runtime[3]
|
||||
output_tensors = random_runtime[4]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
net_id, _ = runtime.LoadNetwork(opt_network)
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
|
||||
def test_enqueue_workload_fails_with_empty_input_tensors(random_runtime):
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
input_tensors = []
|
||||
output_tensors = random_runtime[4]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
|
||||
net_id, _ = runtime.LoadNetwork(opt_network)
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
expected_error_message = "Number of inputs provided does not match network."
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.x86_64
|
||||
@pytest.mark.parametrize('count', [5])
|
||||
def test_multiple_inference_runs_yield_same_result(count, mock_model_runtime):
|
||||
"""
|
||||
Test that results remain consistent among multiple runs of the same inference.
|
||||
"""
|
||||
runtime = mock_model_runtime[0]
|
||||
net_id = mock_model_runtime[1]
|
||||
input_tensors = mock_model_runtime[2]
|
||||
output_tensors = mock_model_runtime[3]
|
||||
|
||||
expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]])
|
||||
|
||||
for _ in range(count):
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
for i in range(len(expected_results)):
|
||||
assert output_vectors[i].all() == expected_results[i].all()
|
||||
|
||||
|
||||
@pytest.mark.aarch64
|
||||
def test_aarch64_inference_results(mock_model_runtime):
|
||||
|
||||
runtime = mock_model_runtime[0]
|
||||
net_id = mock_model_runtime[1]
|
||||
input_tensors = mock_model_runtime[2]
|
||||
output_tensors = mock_model_runtime[3]
|
||||
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
expected_outputs = expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]])
|
||||
|
||||
for i in range(len(expected_outputs)):
|
||||
assert output_vectors[i].all() == expected_results[i].all()
|
||||
|
||||
|
||||
def test_enqueue_workload_with_profiler(random_runtime):
|
||||
"""
|
||||
Tests ArmNN's profiling extension
|
||||
"""
|
||||
preferred_backends = random_runtime[0]
|
||||
network = random_runtime[1]
|
||||
runtime = random_runtime[2]
|
||||
input_tensors = random_runtime[3]
|
||||
output_tensors = random_runtime[4]
|
||||
|
||||
opt_network, _ = ann.Optimize(network, preferred_backends,
|
||||
runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
net_id, _ = runtime.LoadNetwork(opt_network)
|
||||
|
||||
profiler = runtime.GetProfiler(net_id)
|
||||
# By default profiling should be turned off:
|
||||
assert profiler.IsProfilingEnabled() is False
|
||||
|
||||
# Enable profiling:
|
||||
profiler.EnableProfiling(True)
|
||||
assert profiler.IsProfilingEnabled() is True
|
||||
|
||||
# Run the inference:
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
# Get profile output as a string:
|
||||
str_profile = profiler.as_json()
|
||||
|
||||
# Verify that certain markers are present:
|
||||
assert len(str_profile) != 0
|
||||
assert str_profile.find('\"ArmNN\": {') > 0
|
||||
|
||||
# Get events analysis output as a string:
|
||||
str_events_analysis = profiler.event_log()
|
||||
|
||||
assert "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" in str_events_analysis
|
||||
|
||||
assert profiler.thisown == 0
|
||||
|
||||
|
||||
def test_check_runtime_swig_ownership(random_runtime):
|
||||
# Check to see that SWIG has ownership for runtime. This instructs SWIG to take
|
||||
# ownership of the return value. This allows the value to be automatically
|
||||
# garbage-collected when it is no longer in use
|
||||
runtime = random_runtime[2]
|
||||
assert runtime.thisown
|
101
arch/arm/ARMnn/python/pyarmnn/test/test_setup.py
Normal file
101
arch/arm/ARMnn/python/pyarmnn/test/test_setup.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# Copyright 2020 NXP
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.append(os.path.abspath('..'))
|
||||
from setup import find_armnn, find_includes, linux_gcc_lib_search, check_armnn_version
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_armnn(tmpdir):
|
||||
includes = str(os.path.join(tmpdir, 'include'))
|
||||
libs = str(os.path.join(tmpdir, 'lib'))
|
||||
os.environ["TEST_ARMNN_INCLUDE"] = includes
|
||||
os.environ["TEST_ARMNN_LIB"] = libs
|
||||
os.environ["EMPTY_ARMNN_INCLUDE"] = ''
|
||||
|
||||
os.mkdir(includes)
|
||||
os.mkdir(libs)
|
||||
|
||||
with open(os.path.join(libs, "libarmnn.so"), "w"):
|
||||
pass
|
||||
|
||||
with open(os.path.join(libs, "libarmnnSomeThing1.so"), "w"):
|
||||
pass
|
||||
with open(os.path.join(libs, "libarmnnSomeThing1.so.1"), "w"):
|
||||
pass
|
||||
with open(os.path.join(libs, "libarmnnSomeThing1.so.1.2"), "w"):
|
||||
pass
|
||||
|
||||
with open(os.path.join(libs, "libarmnnSomeThing2.so"), "w"):
|
||||
pass
|
||||
|
||||
with open(os.path.join(libs, "libSomeThing3.so"), "w"):
|
||||
pass
|
||||
|
||||
yield
|
||||
|
||||
del os.environ["TEST_ARMNN_INCLUDE"]
|
||||
del os.environ["TEST_ARMNN_LIB"]
|
||||
del os.environ["EMPTY_ARMNN_INCLUDE"]
|
||||
shutil.rmtree(includes)
|
||||
shutil.rmtree(libs)
|
||||
|
||||
|
||||
def test_find_armnn(tmpdir):
|
||||
lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so',
|
||||
armnn_libs_env="TEST_ARMNN_LIB",
|
||||
default_lib_search=("/lib",))
|
||||
armnn_includes = find_includes(armnn_include_env="TEST_ARMNN_INCLUDE")
|
||||
|
||||
assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names)
|
||||
assert [os.path.join(tmpdir, 'lib')] == lib_paths
|
||||
assert [os.path.join(tmpdir, 'include')] == armnn_includes
|
||||
|
||||
|
||||
def test_find_armnn_default_path(tmpdir):
|
||||
lib_names, lib_paths = find_armnn(lib_name='libarmnn*.so',
|
||||
armnn_libs_env="RUBBISH_LIB",
|
||||
default_lib_search=(os.environ["TEST_ARMNN_LIB"],))
|
||||
armnn_includes = find_includes('TEST_ARMNN_INCLUDE')
|
||||
assert [':libarmnn.so', ':libarmnnSomeThing1.so', ':libarmnnSomeThing2.so'] == sorted(lib_names)
|
||||
assert [os.path.join(tmpdir, 'lib')] == lib_paths
|
||||
assert [os.path.join(tmpdir, 'include')] == armnn_includes
|
||||
|
||||
|
||||
def test_not_find_armnn(tmpdir):
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
find_armnn(lib_name='libarmnn*.so', armnn_libs_env="RUBBISH_LIB",
|
||||
default_lib_search=("/lib",))
|
||||
|
||||
assert 'ArmNN library libarmnn*.so was not found in (\'/lib\',)' in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("env", ["RUBBISH_INCLUDE", "EMPTY_ARMNN_INCLUDE"])
|
||||
def test_rubbish_armnn_include(tmpdir, env):
|
||||
includes = find_includes(armnn_include_env=env)
|
||||
assert includes == ['/usr/local/include', '/usr/include']
|
||||
|
||||
|
||||
def test_gcc_serch_path():
|
||||
assert linux_gcc_lib_search()
|
||||
|
||||
|
||||
def test_armnn_version():
|
||||
check_armnn_version('28.0.0', '28.0.0')
|
||||
|
||||
|
||||
def test_incorrect_armnn_version():
|
||||
with pytest.raises(AssertionError) as err:
|
||||
check_armnn_version('28.0.0', '28.1.0')
|
||||
|
||||
assert 'Expected ArmNN version is 28.1.0 but installed ArmNN version is 28.0.0' in str(err.value)
|
||||
|
||||
|
||||
def test_armnn_version_patch_does_not_matter():
|
||||
check_armnn_version('28.0.0', '28.0.1')
|
@ -0,0 +1,48 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def get_supported_backends_setup(shared_data_folder):
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
get_device_spec = runtime.GetDeviceSpec()
|
||||
supported_backends = get_device_spec.GetSupportedBackends()
|
||||
|
||||
yield supported_backends
|
||||
|
||||
|
||||
def test_ownership():
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
device_spec = runtime.GetDeviceSpec()
|
||||
|
||||
assert not device_spec.thisown
|
||||
|
||||
|
||||
def test_to_string():
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
device_spec = runtime.GetDeviceSpec()
|
||||
expected_str = "IDeviceSpec {{ supportedBackends: [" \
|
||||
"{}" \
|
||||
"]}}".format(', '.join(map(lambda b: str(b), device_spec.GetSupportedBackends())))
|
||||
|
||||
assert expected_str == str(device_spec)
|
||||
|
||||
|
||||
def test_get_supported_backends_cpu_ref(get_supported_backends_setup):
|
||||
assert "CpuRef" in map(lambda b: str(b), get_supported_backends_setup)
|
||||
|
||||
|
||||
@pytest.mark.aarch64
|
||||
class TestNoneCpuRefBackends:
|
||||
|
||||
@pytest.mark.parametrize("backend", ["CpuAcc"])
|
||||
def test_get_supported_backends_cpu_acc(self, get_supported_backends_setup, backend):
|
||||
assert backend in map(lambda b: str(b), get_supported_backends_setup)
|
144
arch/arm/ARMnn/python/pyarmnn/test/test_tensor.py
Normal file
144
arch/arm/ARMnn/python/pyarmnn/test/test_tensor.py
Normal file
@ -0,0 +1,144 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
from copy import copy
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def __get_tensor_info(dt):
|
||||
tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), dt)
|
||||
|
||||
return tensor_info
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16,
|
||||
ann.DataType_QAsymmU8, ann.DataType_QSymmS8,
|
||||
ann.DataType_QAsymmS8])
|
||||
def test_create_tensor_with_info(dt):
|
||||
tensor_info = __get_tensor_info(dt)
|
||||
elements = tensor_info.GetNumElements()
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
d_type = dt
|
||||
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
|
||||
assert tensor_info != tensor.GetInfo(), "Different objects"
|
||||
assert elements == tensor.GetNumElements()
|
||||
assert num_bytes == tensor.GetNumBytes()
|
||||
assert d_type == tensor.GetDataType()
|
||||
|
||||
|
||||
def test_create_tensor_undefined_datatype():
|
||||
tensor_info = ann.TensorInfo()
|
||||
tensor_info.SetDataType(99)
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.Tensor(tensor_info)
|
||||
|
||||
assert 'The data type provided for this Tensor is not supported.' in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32])
|
||||
def test_tensor_memory_output(dt):
|
||||
tensor_info = __get_tensor_info(dt)
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
|
||||
# empty memory area because inference has not yet been run.
|
||||
assert tensor.get_memory_area().tolist() # has random stuff
|
||||
assert 4 == tensor.get_memory_area().itemsize, "it is float32"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16,
|
||||
ann.DataType_QAsymmU8, ann.DataType_QSymmS8,
|
||||
ann.DataType_QAsymmS8])
|
||||
def test_tensor__str__(dt):
|
||||
tensor_info = __get_tensor_info(dt)
|
||||
elements = tensor_info.GetNumElements()
|
||||
num_bytes = tensor_info.GetNumBytes()
|
||||
d_type = dt
|
||||
dimensions = tensor_info.GetNumDimensions()
|
||||
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
|
||||
assert str(tensor) == "Tensor{{DataType: {}, NumBytes: {}, NumDimensions: " \
|
||||
"{}, NumElements: {}}}".format(d_type, num_bytes, dimensions, elements)
|
||||
|
||||
|
||||
def test_create_empty_tensor():
|
||||
tensor = ann.Tensor()
|
||||
|
||||
assert 0 == tensor.GetNumElements()
|
||||
assert 0 == tensor.GetNumBytes()
|
||||
assert tensor.get_memory_area() is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16,
|
||||
ann.DataType_QAsymmU8, ann.DataType_QSymmS8,
|
||||
ann.DataType_QAsymmS8])
|
||||
def test_create_tensor_from_tensor(dt):
|
||||
tensor_info = __get_tensor_info(dt)
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
copied_tensor = ann.Tensor(tensor)
|
||||
|
||||
assert copied_tensor != tensor, "Different objects"
|
||||
assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
|
||||
assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area"
|
||||
assert copied_tensor.GetNumElements() == tensor.GetNumElements()
|
||||
assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
|
||||
assert copied_tensor.GetDataType() == tensor.GetDataType()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16,
|
||||
ann.DataType_QAsymmU8, ann.DataType_QSymmS8,
|
||||
ann.DataType_QAsymmS8])
|
||||
def test_copy_tensor(dt):
|
||||
tensor = ann.Tensor(__get_tensor_info(dt))
|
||||
copied_tensor = copy(tensor)
|
||||
|
||||
assert copied_tensor != tensor, "Different objects"
|
||||
assert copied_tensor.GetInfo() != tensor.GetInfo(), "Different objects"
|
||||
assert copied_tensor.get_memory_area().ctypes.data == tensor.get_memory_area().ctypes.data, "Same memory area"
|
||||
assert copied_tensor.GetNumElements() == tensor.GetNumElements()
|
||||
assert copied_tensor.GetNumBytes() == tensor.GetNumBytes()
|
||||
assert copied_tensor.GetDataType() == tensor.GetDataType()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float32, ann.DataType_Float16,
|
||||
ann.DataType_QAsymmU8, ann.DataType_QSymmS8,
|
||||
ann.DataType_QAsymmS8])
|
||||
def test_copied_tensor_has_memory_area_access_after_deletion_of_original_tensor(dt):
|
||||
|
||||
tensor = ann.Tensor(__get_tensor_info(dt))
|
||||
|
||||
tensor.get_memory_area()[0] = 100
|
||||
|
||||
initial_mem_copy = np.array(tensor.get_memory_area())
|
||||
|
||||
assert 100 == initial_mem_copy[0]
|
||||
|
||||
copied_tensor = ann.Tensor(tensor)
|
||||
|
||||
del tensor
|
||||
np.testing.assert_array_equal(copied_tensor.get_memory_area(), initial_mem_copy)
|
||||
assert 100 == copied_tensor.get_memory_area()[0]
|
||||
|
||||
|
||||
def test_create_const_tensor_incorrect_args():
|
||||
with pytest.raises(ValueError) as err:
|
||||
ann.Tensor('something', 'something')
|
||||
|
||||
expected_error_message = "Incorrect number of arguments or type of arguments provided to create Tensor."
|
||||
assert expected_error_message in str(err.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dt", [ann.DataType_Float16])
|
||||
def test_tensor_memory_output_fp16(dt):
|
||||
# Check Tensor with float16
|
||||
tensor_info = __get_tensor_info(dt)
|
||||
tensor = ann.Tensor(tensor_info)
|
||||
|
||||
assert tensor.GetNumElements() == 6
|
||||
assert tensor.GetNumBytes() == 12
|
||||
assert tensor.GetDataType() == ann.DataType_Float16
|
99
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_conversion.py
Normal file
99
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_conversion.py
Normal file
@ -0,0 +1,99 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
import numpy as np
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_tensor_info_input(shared_data_folder):
|
||||
"""
|
||||
Sample input tensor information.
|
||||
"""
|
||||
parser = ann.ITfLiteParser()
|
||||
parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
|
||||
graph_id = 0
|
||||
|
||||
input_binding_info = [parser.GetNetworkInputBindingInfo(graph_id, 'input_1')]
|
||||
|
||||
yield input_binding_info
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_tensor_info_output(shared_data_folder):
|
||||
"""
|
||||
Sample output tensor information.
|
||||
"""
|
||||
parser = ann.ITfLiteParser()
|
||||
parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
|
||||
graph_id = 0
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
outputs_binding_info = []
|
||||
|
||||
for output_name in output_names:
|
||||
outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))
|
||||
|
||||
yield outputs_binding_info
|
||||
|
||||
|
||||
def test_make_input_tensors(get_tensor_info_input):
|
||||
input_tensor_info = get_tensor_info_input
|
||||
input_data = []
|
||||
|
||||
for tensor_id, tensor_info in input_tensor_info:
|
||||
input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.uint8))
|
||||
|
||||
input_tensors = ann.make_input_tensors(input_tensor_info, input_data)
|
||||
assert len(input_tensors) == 1
|
||||
|
||||
for tensor, tensor_info in zip(input_tensors, input_tensor_info):
|
||||
# Because we created ConstTensor function, we cannot check type directly.
|
||||
assert type(tensor[1]).__name__ == 'ConstTensor'
|
||||
assert str(tensor[1].GetInfo()) == str(tensor_info[1])
|
||||
|
||||
|
||||
def test_make_output_tensors(get_tensor_info_output):
|
||||
output_binding_info = get_tensor_info_output
|
||||
|
||||
output_tensors = ann.make_output_tensors(output_binding_info)
|
||||
assert len(output_tensors) == 1
|
||||
|
||||
for tensor, tensor_info in zip(output_tensors, output_binding_info):
|
||||
assert type(tensor[1]) == ann.Tensor
|
||||
assert str(tensor[1].GetInfo()) == str(tensor_info[1])
|
||||
|
||||
|
||||
def test_workload_tensors_to_ndarray(get_tensor_info_output):
|
||||
# Check shape and size of output from workload_tensors_to_ndarray matches expected.
|
||||
output_binding_info = get_tensor_info_output
|
||||
output_tensors = ann.make_output_tensors(output_binding_info)
|
||||
|
||||
data = ann.workload_tensors_to_ndarray(output_tensors)
|
||||
|
||||
for i in range(0, len(output_tensors)):
|
||||
assert data[i].shape == tuple(output_tensors[i][1].GetShape())
|
||||
assert data[i].size == output_tensors[i][1].GetNumElements()
|
||||
|
||||
|
||||
def test_make_input_tensors_fp16(get_tensor_info_input):
|
||||
# Check ConstTensor with float16
|
||||
input_tensor_info = get_tensor_info_input
|
||||
input_data = []
|
||||
|
||||
for tensor_id, tensor_info in input_tensor_info:
|
||||
input_data.append(np.random.randint(0, 255, size=(1, tensor_info.GetNumElements())).astype(np.float16))
|
||||
tensor_info.SetDataType(ann.DataType_Float16) # set datatype to float16
|
||||
|
||||
input_tensors = ann.make_input_tensors(input_tensor_info, input_data)
|
||||
assert len(input_tensors) == 1
|
||||
|
||||
for tensor, tensor_info in zip(input_tensors, input_tensor_info):
|
||||
# Because we created ConstTensor function, we cannot check type directly.
|
||||
assert type(tensor[1]).__name__ == 'ConstTensor'
|
||||
assert str(tensor[1].GetInfo()) == str(tensor_info[1])
|
||||
assert tensor[1].GetDataType() == ann.DataType_Float16
|
||||
assert tensor[1].GetNumElements() == 28*28*1
|
||||
assert tensor[1].GetNumBytes() == (28*28*1)*2 # check each element is two byte
|
27
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_info.py
Normal file
27
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_info.py
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def test_tensor_info_ctor_shape():
|
||||
tensor_shape = ann.TensorShape((1, 1, 2))
|
||||
|
||||
tensor_info = ann.TensorInfo(tensor_shape, ann.DataType_QAsymmU8, 0.5, 1)
|
||||
|
||||
assert 2 == tensor_info.GetNumElements()
|
||||
assert 3 == tensor_info.GetNumDimensions()
|
||||
assert ann.DataType_QAsymmU8 == tensor_info.GetDataType()
|
||||
assert 0.5 == tensor_info.GetQuantizationScale()
|
||||
assert 1 == tensor_info.GetQuantizationOffset()
|
||||
|
||||
shape = tensor_info.GetShape()
|
||||
|
||||
assert 2 == shape.GetNumElements()
|
||||
assert 3 == shape.GetNumDimensions()
|
||||
|
||||
|
||||
def test_tensor_info__str__():
|
||||
tensor_info = ann.TensorInfo(ann.TensorShape((2, 3)), ann.DataType_QAsymmU8, 0.5, 1, True)
|
||||
|
||||
assert tensor_info.__str__() == "TensorInfo{DataType: 2, IsQuantized: 1, QuantizationScale: 0.500000, " \
|
||||
"QuantizationOffset: 1, IsConstant: 1, NumDimensions: 2, NumElements: 6}"
|
78
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_shape.py
Normal file
78
arch/arm/ARMnn/python/pyarmnn/test/test_tensor_shape.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def test_tensor_shape_tuple():
|
||||
tensor_shape = ann.TensorShape((1, 2, 3))
|
||||
|
||||
assert 3 == tensor_shape.GetNumDimensions()
|
||||
assert 6 == tensor_shape.GetNumElements()
|
||||
|
||||
|
||||
def test_tensor_shape_one():
|
||||
tensor_shape = ann.TensorShape((10,))
|
||||
assert 1 == tensor_shape.GetNumDimensions()
|
||||
assert 10 == tensor_shape.GetNumElements()
|
||||
|
||||
|
||||
def test_tensor_shape_empty():
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
ann.TensorShape(())
|
||||
|
||||
assert "Tensor numDimensions must be greater than 0" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape_tuple_mess():
|
||||
tensor_shape = ann.TensorShape((1, "2", 3.0))
|
||||
|
||||
assert 3 == tensor_shape.GetNumDimensions()
|
||||
assert 6 == tensor_shape.GetNumElements()
|
||||
|
||||
|
||||
def test_tensor_shape_list():
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
ann.TensorShape([1, 2, 3])
|
||||
|
||||
assert "Argument is not a tuple" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape_tuple_mess_fail():
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
ann.TensorShape((1, "two", 3.0))
|
||||
|
||||
assert "All elements must be numbers" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape_varags():
|
||||
with pytest.raises(TypeError) as err:
|
||||
ann.TensorShape(1, 2, 3)
|
||||
|
||||
assert "__init__() takes 2 positional arguments but 4 were given" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape__get_item_out_of_bounds():
|
||||
tensor_shape = ann.TensorShape((1, 2, 3))
|
||||
with pytest.raises(ValueError) as err:
|
||||
for i in range(4):
|
||||
tensor_shape[i]
|
||||
|
||||
assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape__set_item_out_of_bounds():
|
||||
tensor_shape = ann.TensorShape((1, 2, 3))
|
||||
with pytest.raises(ValueError) as err:
|
||||
for i in range(4):
|
||||
tensor_shape[i] = 1
|
||||
|
||||
assert "Invalid dimension index: 3 (number of dimensions is 3)" in str(err.value)
|
||||
|
||||
|
||||
def test_tensor_shape___str__():
|
||||
tensor_shape = ann.TensorShape((1, 2, 3))
|
||||
|
||||
assert str(tensor_shape) == "TensorShape{Shape(1, 2, 3), NumDimensions: 3, NumElements: 6}"
|
190
arch/arm/ARMnn/python/pyarmnn/test/test_tflite_parser.py
Normal file
190
arch/arm/ARMnn/python/pyarmnn/test/test_tflite_parser.py
Normal file
@ -0,0 +1,190 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
import numpy as np
|
||||
|
||||
|
||||
def test_TfLiteParserOptions_default_values():
|
||||
parserOptions = ann.TfLiteParserOptions()
|
||||
assert parserOptions.m_InferAndValidate == False
|
||||
assert parserOptions.m_StandInLayerForUnsupported == False
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def parser(shared_data_folder):
|
||||
"""
|
||||
Parse and setup the test network to be used for the tests below
|
||||
"""
|
||||
parser = ann.ITfLiteParser()
|
||||
parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
|
||||
|
||||
yield parser
|
||||
|
||||
|
||||
def test_tflite_parser_swig_destroy():
|
||||
assert ann.ITfLiteParser.__swig_destroy__, "There is a swig python destructor defined"
|
||||
assert ann.ITfLiteParser.__swig_destroy__.__name__ == "delete_ITfLiteParser"
|
||||
|
||||
|
||||
def test_check_tflite_parser_swig_ownership(parser):
|
||||
# Check to see that SWIG has ownership for parser. This instructs SWIG to take
|
||||
# ownership of the return value. This allows the value to be automatically
|
||||
# garbage-collected when it is no longer in use
|
||||
assert parser.thisown
|
||||
|
||||
|
||||
def test_tflite_parser_with_optional_options():
|
||||
parserOptions = ann.TfLiteParserOptions()
|
||||
parserOptions.m_InferAndValidate = True
|
||||
parser = ann.ITfLiteParser(parserOptions)
|
||||
assert parser.thisown
|
||||
|
||||
|
||||
def create_with_opt() :
|
||||
parserOptions = ann.TfLiteParserOptions()
|
||||
parserOptions.m_InferAndValidate = True
|
||||
return ann.ITfLiteParser(parserOptions)
|
||||
|
||||
|
||||
def test_tflite_parser_with_optional_options_out_of_scope(shared_data_folder):
|
||||
parser = create_with_opt()
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, "mock_model.tflite"))
|
||||
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
input_names = parser.GetSubgraphInputTensorNames(graph_id)
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
|
||||
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
assert 0 == len(messages)
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
assert "" == messages
|
||||
|
||||
|
||||
def test_tflite_get_sub_graph_count(parser):
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
assert graphs_count == 1
|
||||
|
||||
|
||||
def test_tflite_get_network_input_binding_info(parser):
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
input_names = parser.GetSubgraphInputTensorNames(graph_id)
|
||||
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
|
||||
|
||||
tensor = input_binding_info[1]
|
||||
assert tensor.GetDataType() == 2
|
||||
assert tensor.GetNumDimensions() == 4
|
||||
assert tensor.GetNumElements() == 784
|
||||
assert tensor.GetQuantizationOffset() == 128
|
||||
assert tensor.GetQuantizationScale() == 0.007843137718737125
|
||||
|
||||
|
||||
def test_tflite_get_network_output_binding_info(parser):
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
|
||||
output_binding_info1 = parser.GetNetworkOutputBindingInfo(graph_id, output_names[0])
|
||||
|
||||
# Check the tensor info retrieved from GetNetworkOutputBindingInfo
|
||||
tensor1 = output_binding_info1[1]
|
||||
|
||||
assert tensor1.GetDataType() == 2
|
||||
assert tensor1.GetNumDimensions() == 2
|
||||
assert tensor1.GetNumElements() == 10
|
||||
assert tensor1.GetQuantizationOffset() == 0
|
||||
assert tensor1.GetQuantizationScale() == 0.00390625
|
||||
|
||||
|
||||
def test_tflite_get_subgraph_input_tensor_names(parser):
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
input_names = parser.GetSubgraphInputTensorNames(graph_id)
|
||||
|
||||
assert input_names == ('input_1',)
|
||||
|
||||
|
||||
def test_tflite_get_subgraph_output_tensor_names(parser):
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
|
||||
assert output_names[0] == 'dense/Softmax'
|
||||
|
||||
|
||||
def test_tflite_filenotfound_exception(shared_data_folder):
|
||||
parser = ann.ITfLiteParser()
|
||||
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'some_unknown_network.tflite'))
|
||||
|
||||
# Only check for part of the exception since the exception returns
|
||||
# absolute path which will change on different machines.
|
||||
assert 'Cannot find the file' in str(err.value)
|
||||
|
||||
|
||||
def test_tflite_parser_end_to_end(shared_data_folder):
|
||||
parser = ann.ITfLiteParser()
|
||||
|
||||
network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, "mock_model.tflite"))
|
||||
|
||||
graphs_count = parser.GetSubgraphCount()
|
||||
graph_id = graphs_count - 1
|
||||
|
||||
input_names = parser.GetSubgraphInputTensorNames(graph_id)
|
||||
input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
|
||||
|
||||
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
|
||||
|
||||
preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')]
|
||||
|
||||
options = ann.CreationOptions()
|
||||
runtime = ann.IRuntime(options)
|
||||
|
||||
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
|
||||
assert 0 == len(messages)
|
||||
|
||||
net_id, messages = runtime.LoadNetwork(opt_network)
|
||||
assert "" == messages
|
||||
|
||||
# Load test image data stored in input_lite.npy
|
||||
input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy'))
|
||||
input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
|
||||
|
||||
output_tensors = []
|
||||
for index, output_name in enumerate(output_names):
|
||||
out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
|
||||
out_tensor_info = out_bind_info[1]
|
||||
out_tensor_id = out_bind_info[0]
|
||||
output_tensors.append((out_tensor_id,
|
||||
ann.Tensor(out_tensor_info)))
|
||||
|
||||
runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
|
||||
|
||||
output_vectors = []
|
||||
for index, out_tensor in enumerate(output_tensors):
|
||||
output_vectors.append(out_tensor[1].get_memory_area())
|
||||
|
||||
# Load golden output file for result comparison.
|
||||
expected_outputs = np.load(os.path.join(shared_data_folder, 'tflite_parser/golden_output_lite.npy'))
|
||||
|
||||
# Check that output matches golden output
|
||||
assert (expected_outputs == output_vectors[0]).all()
|
34
arch/arm/ARMnn/python/pyarmnn/test/test_types.py
Normal file
34
arch/arm/ARMnn/python/pyarmnn/test/test_types.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
import pytest
|
||||
import pyarmnn as ann
|
||||
|
||||
|
||||
def test_activation_function():
|
||||
assert 0 == ann.ActivationFunction_Sigmoid
|
||||
assert 1 == ann.ActivationFunction_TanH
|
||||
assert 2 == ann.ActivationFunction_Linear
|
||||
assert 3 == ann.ActivationFunction_ReLu
|
||||
assert 4 == ann.ActivationFunction_BoundedReLu
|
||||
assert 5 == ann.ActivationFunction_SoftReLu
|
||||
assert 6 == ann.ActivationFunction_LeakyReLu
|
||||
assert 7 == ann.ActivationFunction_Abs
|
||||
assert 8 == ann.ActivationFunction_Sqrt
|
||||
assert 9 == ann.ActivationFunction_Square
|
||||
|
||||
|
||||
def test_permutation_vector():
|
||||
pv = ann.PermutationVector((0, 2, 3, 1))
|
||||
assert pv[0] == 0
|
||||
assert pv[2] == 3
|
||||
|
||||
pv2 = ann.PermutationVector((0, 2, 3, 1))
|
||||
assert pv == pv2
|
||||
|
||||
pv4 = ann.PermutationVector((0, 3, 1, 2))
|
||||
assert pv.IsInverse(pv4)
|
||||
|
||||
with pytest.raises(ValueError) as err:
|
||||
pv4[4]
|
||||
|
||||
assert err.type is ValueError
|
36
arch/arm/ARMnn/python/pyarmnn/test/test_version.py
Normal file
36
arch/arm/ARMnn/python/pyarmnn/test/test_version.py
Normal file
@ -0,0 +1,36 @@
|
||||
# Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
# Copyright 2020 NXP
|
||||
# SPDX-License-Identifier: MIT
|
||||
import os
|
||||
import importlib
|
||||
|
||||
|
||||
def test_rel_version():
|
||||
import pyarmnn._version as v
|
||||
importlib.reload(v)
|
||||
assert "dev" not in v.__version__
|
||||
del v
|
||||
|
||||
|
||||
def test_dev_version():
|
||||
import pyarmnn._version as v
|
||||
os.environ["PYARMNN_DEV_VER"] = "1"
|
||||
|
||||
importlib.reload(v)
|
||||
|
||||
assert "28.0.0.dev1" == v.__version__
|
||||
|
||||
del os.environ["PYARMNN_DEV_VER"]
|
||||
del v
|
||||
|
||||
|
||||
def test_arm_version_not_affected():
|
||||
import pyarmnn._version as v
|
||||
os.environ["PYARMNN_DEV_VER"] = "1"
|
||||
|
||||
importlib.reload(v)
|
||||
|
||||
assert "28.0.0" == v.__arm_ml_version__
|
||||
|
||||
del os.environ["PYARMNN_DEV_VER"]
|
||||
del v
|
146
arch/arm/ARMnn/samples/AsyncExecutionSample.cpp
Normal file
146
arch/arm/ARMnn/samples/AsyncExecutionSample.cpp
Normal file
@ -0,0 +1,146 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#include <armnn/INetwork.hpp>
|
||||
#include <armnn/IRuntime.hpp>
|
||||
#include <armnn/Utils.hpp>
|
||||
#include <armnn/Descriptors.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
/// A simple example of using the ArmNN SDK API to run a network multiple times with different inputs in an asynchronous
|
||||
/// manner.
|
||||
///
|
||||
/// Background info: The usual runtime->EnqueueWorkload, which is used to trigger the execution of a network, is not
|
||||
/// thread safe. Each workload has memory assigned to it which would be overwritten by each thread.
|
||||
/// Before we added support for this you had to load a network multiple times to execute it at the
|
||||
/// same time. Every time a network is loaded, it takes up memory on your device. Making the
|
||||
/// execution thread safe helps to reduce the memory footprint for concurrent executions significantly.
|
||||
/// This example shows you how to execute a model concurrently (multiple threads) while still only
|
||||
/// loading it once.
|
||||
///
|
||||
/// As in most of our simple samples, the network in this example will ask the user for a single input number for each
|
||||
/// execution of the network.
|
||||
/// The network consists of a single fully connected layer with a single neuron. The neurons weight is set to 1.0f
|
||||
/// to produce an output number that is the same as the input.
|
||||
int main()
|
||||
{
|
||||
using namespace armnn;
|
||||
|
||||
// The first part of this code is very similar to the SimpleSample.cpp you should check it out for comparison
|
||||
// The interesting part starts when the graph is loaded into the runtime
|
||||
|
||||
std::vector<float> inputs;
|
||||
float number1;
|
||||
std::cout << "Please enter a number for the first iteration: " << std::endl;
|
||||
std::cin >> number1;
|
||||
float number2;
|
||||
std::cout << "Please enter a number for the second iteration: " << std::endl;
|
||||
std::cin >> number2;
|
||||
|
||||
// Turn on logging to standard output
|
||||
// This is useful in this sample so that users can learn more about what is going on
|
||||
ConfigureLogging(true, false, LogSeverity::Warning);
|
||||
|
||||
// Construct ArmNN network
|
||||
NetworkId networkIdentifier;
|
||||
INetworkPtr myNetwork = INetwork::Create();
|
||||
|
||||
float weightsData[] = {1.0f}; // Identity
|
||||
TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
|
||||
weightsInfo.SetConstant();
|
||||
ConstTensor weights(weightsInfo, weightsData);
|
||||
|
||||
// Constant layer that now holds weights data for FullyConnected
|
||||
IConnectableLayer* const constantWeightsLayer = myNetwork->AddConstantLayer(weights, "const weights");
|
||||
|
||||
FullyConnectedDescriptor fullyConnectedDesc;
|
||||
IConnectableLayer* const fullyConnectedLayer = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc,
|
||||
"fully connected");
|
||||
IConnectableLayer* InputLayer = myNetwork->AddInputLayer(0);
|
||||
IConnectableLayer* OutputLayer = myNetwork->AddOutputLayer(0);
|
||||
|
||||
InputLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
|
||||
constantWeightsLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(1));
|
||||
fullyConnectedLayer->GetOutputSlot(0).Connect(OutputLayer->GetInputSlot(0));
|
||||
|
||||
// Create ArmNN runtime
|
||||
IRuntime::CreationOptions options; // default options
|
||||
IRuntimePtr run = IRuntime::Create(options);
|
||||
|
||||
//Set the tensors in the network.
|
||||
TensorInfo inputTensorInfo(TensorShape({1, 1}), DataType::Float32);
|
||||
InputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
|
||||
|
||||
TensorInfo outputTensorInfo(TensorShape({1, 1}), DataType::Float32);
|
||||
fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
|
||||
constantWeightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
|
||||
|
||||
// Optimise ArmNN network
|
||||
IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {Compute::CpuRef}, run->GetDeviceSpec());
|
||||
if (!optNet)
|
||||
{
|
||||
// This shouldn't happen for this simple sample, with reference backend.
|
||||
// But in general usage Optimize could fail if the hardware at runtime cannot
|
||||
// support the model that has been provided.
|
||||
std::cerr << "Error: Failed to optimise the input network." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load graph into runtime.
|
||||
std::string errmsg; // To hold an eventual error message if loading the network fails
|
||||
// Add network properties to enable async execution. The MemorySource::Undefined variables indicate
|
||||
// that neither inputs nor outputs will be imported. Importing will be covered in another example.
|
||||
armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
|
||||
run->LoadNetwork(networkIdentifier,
|
||||
std::move(optNet),
|
||||
errmsg,
|
||||
networkProperties);
|
||||
|
||||
// Creates structures for inputs and outputs. A vector of float for each execution.
|
||||
std::vector<std::vector<float>> inputData{{number1}, {number2}};
|
||||
std::vector<std::vector<float>> outputData;
|
||||
outputData.resize(2, std::vector<float>(1));
|
||||
|
||||
inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
|
||||
inputTensorInfo.SetConstant(true);
|
||||
std::vector<InputTensors> inputTensors
|
||||
{
|
||||
{{0, armnn::ConstTensor(inputTensorInfo, inputData[0].data())}},
|
||||
{{0, armnn::ConstTensor(inputTensorInfo, inputData[1].data())}}
|
||||
};
|
||||
std::vector<OutputTensors> outputTensors
|
||||
{
|
||||
{{0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData[0].data())}},
|
||||
{{0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData[1].data())}}
|
||||
};
|
||||
|
||||
// Lambda function to execute the network. We use it as thread function.
|
||||
auto execute = [&](unsigned int executionIndex)
|
||||
{
|
||||
auto memHandle = run->CreateWorkingMemHandle(networkIdentifier);
|
||||
run->Execute(*memHandle, inputTensors[executionIndex], outputTensors[executionIndex]);
|
||||
};
|
||||
|
||||
// Prepare some threads and let each execute the network with a different input
|
||||
std::vector<std::thread> threads;
|
||||
for (unsigned int i = 0; i < inputTensors.size(); ++i)
|
||||
{
|
||||
threads.emplace_back(std::thread(execute, i));
|
||||
}
|
||||
|
||||
// Wait for the threads to finish
|
||||
for (std::thread& t : threads)
|
||||
{
|
||||
if(t.joinable())
|
||||
{
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Your numbers were " << outputData[0][0] << " and " << outputData[1][0] << std::endl;
|
||||
return 0;
|
||||
|
||||
}
|
23
arch/arm/ARMnn/samples/CMakeLists.txt
Normal file
23
arch/arm/ARMnn/samples/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
||||
if(BUILD_SAMPLE_APP AND ARMNNREF)
|
||||
add_executable(SimpleSample SimpleSample.cpp)
|
||||
target_link_libraries(SimpleSample armnn ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
add_executable(AsyncExecutionSample AsyncExecutionSample.cpp)
|
||||
target_link_libraries(AsyncExecutionSample armnn ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
|
||||
if(BUILD_SAMPLE_APP AND SAMPLE_DYNAMIC_BACKEND)
|
||||
add_executable(DynamicSample DynamicSample.cpp)
|
||||
target_link_libraries(DynamicSample armnn ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
|
||||
if(BUILD_SAMPLE_APP AND ARMCOMPUTECL)
|
||||
add_executable(CustomMemoryAllocatorSample CustomMemoryAllocatorSample.cpp)
|
||||
target_link_libraries(CustomMemoryAllocatorSample armnn ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
|
||||
if(BUILD_SAMPLE_APP AND ARMNNREF)
|
||||
add_executable(PreImportMemorySample PreImportMemorySample.cpp)
|
||||
target_link_libraries(PreImportMemorySample armnn ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
|
176
arch/arm/ARMnn/samples/CustomMemoryAllocatorSample.cpp
Normal file
176
arch/arm/ARMnn/samples/CustomMemoryAllocatorSample.cpp
Normal file
@ -0,0 +1,176 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include <armnn/ArmNN.hpp>
|
||||
#include <armnn/backends/ICustomAllocator.hpp>
|
||||
|
||||
#include <arm_compute/core/CL/CLKernelLibrary.h>
|
||||
#include <arm_compute/runtime/CL/CLScheduler.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/** Sample implementation of ICustomAllocator for use with the ClBackend.
|
||||
* Note: any memory allocated must be host addressable with write access
|
||||
* in order for ArmNN to be able to properly use it. */
|
||||
class SampleClBackendCustomAllocator : public armnn::ICustomAllocator
|
||||
{
|
||||
public:
|
||||
SampleClBackendCustomAllocator() = default;
|
||||
|
||||
void* allocate(size_t size, size_t alignment) override
|
||||
{
|
||||
// If alignment is 0 just use the CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE for alignment
|
||||
if (alignment == 0)
|
||||
{
|
||||
alignment = arm_compute::CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
|
||||
}
|
||||
size_t space = size + alignment + alignment;
|
||||
auto allocatedMemPtr = std::malloc(space * sizeof(size_t));
|
||||
|
||||
if (std::align(alignment, size, allocatedMemPtr, space) == nullptr)
|
||||
{
|
||||
throw armnn::Exception("SampleClBackendCustomAllocator::Alignment failed");
|
||||
}
|
||||
return allocatedMemPtr;
|
||||
}
|
||||
|
||||
void free(void* ptr) override
|
||||
{
|
||||
std::free(ptr);
|
||||
}
|
||||
|
||||
armnn::MemorySource GetMemorySourceType() override
|
||||
{
|
||||
return armnn::MemorySource::Malloc;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// A simple example application to show the usage of a custom memory allocator. In this sample, the users single
|
||||
// input number is multiplied by 1.0f using a fully connected layer with a single neuron to produce an output
|
||||
// number that is the same as the input. All memory required to execute this mini network is allocated with
|
||||
// the provided custom allocator.
|
||||
//
|
||||
// Using a Custom Allocator is required for use with Protected Mode and Protected Memory.
|
||||
// This example is provided using only unprotected malloc as Protected Memory is platform
|
||||
// and implementation specific.
|
||||
//
|
||||
// Note: This example is similar to the SimpleSample application that can also be found in armnn/samples.
|
||||
// The differences are in the use of a custom allocator, the backend is GpuAcc, and the inputs/outputs
|
||||
// are being imported instead of copied. (Import must be enabled when using a Custom Allocator)
|
||||
// You might find this useful for comparison.
|
||||
int main()
|
||||
{
|
||||
using namespace armnn;
|
||||
|
||||
float number;
|
||||
std::cout << "Please enter a number: " << std::endl;
|
||||
std::cin >> number;
|
||||
|
||||
// Turn on logging to standard output
|
||||
// This is useful in this sample so that users can learn more about what is going on
|
||||
armnn::ConfigureLogging(true, false, LogSeverity::Info);
|
||||
|
||||
// Construct ArmNN network
|
||||
armnn::NetworkId networkIdentifier;
|
||||
INetworkPtr myNetwork = INetwork::Create();
|
||||
armnn::FullyConnectedDescriptor fullyConnectedDesc;
|
||||
float weightsData[] = {1.0f}; // Identity
|
||||
TensorInfo weightsInfo(TensorShape({1, 1}), DataType::Float32, 0.0f, 0, true);
|
||||
weightsInfo.SetConstant(true);
|
||||
armnn::ConstTensor weights(weightsInfo, weightsData);
|
||||
ARMNN_NO_DEPRECATE_WARN_BEGIN
|
||||
IConnectableLayer *fullyConnected = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc,
|
||||
weights,
|
||||
EmptyOptional(),
|
||||
"fully connected");
|
||||
ARMNN_NO_DEPRECATE_WARN_END
|
||||
IConnectableLayer *InputLayer = myNetwork->AddInputLayer(0);
|
||||
IConnectableLayer *OutputLayer = myNetwork->AddOutputLayer(0);
|
||||
InputLayer->GetOutputSlot(0).Connect(fullyConnected->GetInputSlot(0));
|
||||
fullyConnected->GetOutputSlot(0).Connect(OutputLayer->GetInputSlot(0));
|
||||
|
||||
// Create ArmNN runtime:
|
||||
//
|
||||
// This is the interesting bit when executing a model with a custom allocator.
|
||||
// You can have different allocators for different backends. To support this
|
||||
// the runtime creation option has a map that takes a BackendId and the corresponding
|
||||
// allocator that should be used for that backend.
|
||||
// Only GpuAcc supports a Custom Allocator for now
|
||||
//
|
||||
// Note: This is not covered in this example but if you want to run a model on
|
||||
// protected memory a custom allocator needs to be provided that supports
|
||||
// protected memory allocations and the MemorySource of that allocator is
|
||||
// set to MemorySource::DmaBufProtected
|
||||
IRuntime::CreationOptions options;
|
||||
auto customAllocator = std::make_shared<SampleClBackendCustomAllocator>();
|
||||
options.m_CustomAllocatorMap = {{"GpuAcc", std::move(customAllocator)}};
|
||||
IRuntimePtr runtime = IRuntime::Create(options);
|
||||
|
||||
//Set the tensors in the network.
|
||||
TensorInfo inputTensorInfo(TensorShape({1, 1}), DataType::Float32);
|
||||
InputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
|
||||
|
||||
unsigned int numElements = inputTensorInfo.GetNumElements();
|
||||
size_t totalBytes = numElements * sizeof(float);
|
||||
|
||||
TensorInfo outputTensorInfo(TensorShape({1, 1}), DataType::Float32);
|
||||
fullyConnected->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
|
||||
|
||||
// Optimise ArmNN network
|
||||
OptimizerOptions optOptions;
|
||||
optOptions.m_ImportEnabled = true;
|
||||
armnn::IOptimizedNetworkPtr optNet =
|
||||
Optimize(*myNetwork, {"GpuAcc"}, runtime->GetDeviceSpec(), optOptions);
|
||||
if (!optNet)
|
||||
{
|
||||
// This shouldn't happen for this simple sample, with GpuAcc backend.
|
||||
// But in general usage Optimize could fail if the backend at runtime cannot
|
||||
// support the model that has been provided.
|
||||
std::cerr << "Error: Failed to optimise the input network." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load graph into runtime
|
||||
std::string ignoredErrorMessage;
|
||||
INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
|
||||
runtime->LoadNetwork(networkIdentifier, std::move(optNet), ignoredErrorMessage, networkProperties);
|
||||
|
||||
// Creates structures for input & output
|
||||
const size_t alignment =
|
||||
arm_compute::CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
|
||||
|
||||
void* alignedInputPtr = options.m_CustomAllocatorMap["GpuAcc"]->allocate(totalBytes, alignment);
|
||||
|
||||
// Input with negative values
|
||||
auto* inputPtr = reinterpret_cast<float*>(alignedInputPtr);
|
||||
std::fill_n(inputPtr, numElements, number);
|
||||
|
||||
void* alignedOutputPtr = options.m_CustomAllocatorMap["GpuAcc"]->allocate(totalBytes, alignment);
|
||||
auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
|
||||
std::fill_n(outputPtr, numElements, -10.0f);
|
||||
|
||||
inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, 0);
|
||||
inputTensorInfo.SetConstant(true);
|
||||
armnn::InputTensors inputTensors
|
||||
{
|
||||
{0, armnn::ConstTensor(inputTensorInfo, alignedInputPtr)},
|
||||
};
|
||||
armnn::OutputTensors outputTensors
|
||||
{
|
||||
{0, armnn::Tensor(runtime->GetOutputTensorInfo(networkIdentifier, 0), alignedOutputPtr)}
|
||||
};
|
||||
|
||||
// Execute network
|
||||
runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
|
||||
|
||||
// Tell the CLBackend to sync memory so we can read the output.
|
||||
arm_compute::CLScheduler::get().sync();
|
||||
auto* outputResult = reinterpret_cast<float*>(alignedOutputPtr);
|
||||
std::cout << "Your number was " << outputResult[0] << std::endl;
|
||||
runtime->UnloadNetwork(networkIdentifier);
|
||||
return 0;
|
||||
|
||||
}
|
82
arch/arm/ARMnn/samples/DynamicSample.cpp
Normal file
82
arch/arm/ARMnn/samples/DynamicSample.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#include <armnn/INetwork.hpp>
|
||||
#include <armnn/IRuntime.hpp>
|
||||
#include <armnn/Utils.hpp>
|
||||
#include <armnn/Descriptors.hpp>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/// A simple example of using the ArmNN SDK API with the standalone sample dynamic backend.
|
||||
/// In this example, an addition layer is used to add 2 input tensors to produce a result output tensor.
|
||||
int main()
|
||||
{
|
||||
using namespace armnn;
|
||||
|
||||
// Construct ArmNN network
|
||||
armnn::NetworkId networkIdentifier;
|
||||
INetworkPtr myNetwork = INetwork::Create();
|
||||
|
||||
IConnectableLayer* input0 = myNetwork->AddInputLayer(0);
|
||||
IConnectableLayer* input1 = myNetwork->AddInputLayer(1);
|
||||
IConnectableLayer* add = myNetwork->AddAdditionLayer();
|
||||
IConnectableLayer* output = myNetwork->AddOutputLayer(0);
|
||||
|
||||
input0->GetOutputSlot(0).Connect(add->GetInputSlot(0));
|
||||
input1->GetOutputSlot(0).Connect(add->GetInputSlot(1));
|
||||
add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
|
||||
|
||||
TensorInfo tensorInfo(TensorShape({2, 1}), DataType::Float32);
|
||||
input0->GetOutputSlot(0).SetTensorInfo(tensorInfo);
|
||||
input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
|
||||
add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
|
||||
|
||||
// Create ArmNN runtime
|
||||
IRuntime::CreationOptions options; // default options
|
||||
armnn::IRuntimePtr run(armnn::IRuntime::Create(options));
|
||||
|
||||
// Optimise ArmNN network
|
||||
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"SampleDynamic"}, run->GetDeviceSpec());
|
||||
if (!optNet)
|
||||
{
|
||||
// This shouldn't happen for this simple sample, with reference backend.
|
||||
// But in general usage Optimize could fail if the hardware at runtime cannot
|
||||
// support the model that has been provided.
|
||||
std::cerr << "Error: Failed to optimise the input network." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load graph into runtime
|
||||
run->LoadNetwork(networkIdentifier, std::move(optNet));
|
||||
|
||||
// input data
|
||||
std::vector<float> input0Data
|
||||
{
|
||||
5.0f, 3.0f
|
||||
};
|
||||
std::vector<float> input1Data
|
||||
{
|
||||
10.0f, 8.0f
|
||||
};
|
||||
std::vector<float> outputData(2);
|
||||
|
||||
TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
|
||||
inputTensorInfo.SetConstant(true);
|
||||
InputTensors inputTensors
|
||||
{
|
||||
{0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
|
||||
{1,armnn::ConstTensor(inputTensorInfo, input1Data.data())}
|
||||
};
|
||||
OutputTensors outputTensors
|
||||
{
|
||||
{0,armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
|
||||
};
|
||||
|
||||
// Execute network
|
||||
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
|
||||
|
||||
std::cout << "Addition operator result is {" << outputData[0] << "," << outputData[1] << "}" << std::endl;
|
||||
return 0;
|
||||
}
|
138
arch/arm/ARMnn/samples/ImageClassification/README.md
Normal file
138
arch/arm/ARMnn/samples/ImageClassification/README.md
Normal file
@ -0,0 +1,138 @@
|
||||
# Image Classification with the Arm NN Tensorflow Lite Delegate
|
||||
|
||||
This application demonstrates the use of the Arm NN Tensorflow Lite Delegate.
|
||||
In this application we integrate the Arm NN Tensorflow Lite Delegate into the
|
||||
TensorFlow Lite Python package.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
This repository assumes you have built, or have downloaded the
|
||||
`libarmnnDelegate.so` and `libarmnn.so` from the GitHub releases page. You will
|
||||
also need to have built the TensorFlow Lite library from source if you plan on building
|
||||
these ArmNN library files yourself.
|
||||
|
||||
If you have not already installed these, please follow our guides in the ArmNN
|
||||
repository. The guide to build the delegate can be found
|
||||
[here](../../delegate/BuildGuideNative.md) and the guide to integrate the
|
||||
delegate into Python can be found
|
||||
[here](../../delegate/DelegateQuickStartGuide.md).
|
||||
|
||||
This guide will assume you have retrieved the binaries
|
||||
from the ArmNN Github page, so there is no need to build Tensorflow from source.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Before running the application, we will first need to:
|
||||
|
||||
- Install the required Python packages
|
||||
- Download this example
|
||||
- Download a model and corresponding label mapping
|
||||
- Download an example image
|
||||
|
||||
1. Install required packages and Git Large File Storage (to download models
|
||||
from the Arm ML-Zoo).
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y python3 python3-pip wget git git-lfs unzip
|
||||
git lfs install
|
||||
```
|
||||
|
||||
2. Clone the Arm NN repository and change directory to this example.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/arm-software/armnn.git
|
||||
cd armnn/samples/ImageClassification
|
||||
```
|
||||
|
||||
3. Download your model and label mappings.
|
||||
|
||||
For this example we use the `MobileNetV2` model. This model can be found in
|
||||
the Arm ML-Zoo as well as scripts to download the labels for the model.
|
||||
|
||||
```bash
|
||||
export BASEDIR=$(pwd)
|
||||
#clone the model zoo
|
||||
git clone https://github.com/arm-software/ml-zoo.git
|
||||
#go to the mobilenetv2 uint8 folder
|
||||
cd ml-zoo/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8
|
||||
#generate the labelmapping
|
||||
./get_class_labels.sh
|
||||
#cd back to this project folder
|
||||
cd BASEDIR
|
||||
#copy your model and label mapping
|
||||
cp ml-zoo/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/mobilenet_v2_1.0_224_quantized_1_default_1.tflite .
|
||||
cp ml-zoo/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/labelmappings.txt .
|
||||
```
|
||||
|
||||
4. Download a test image.
|
||||
|
||||
```bash
|
||||
wget -O cat.png "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
|
||||
```
|
||||
|
||||
5. Download the required Python packages.
|
||||
|
||||
```bash
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
6. Copy over your `libarmnnDelegate.so` and `libarmnn.so` library files
|
||||
you built/downloaded before trying this application to the application
|
||||
folder. For example:
|
||||
|
||||
```bash
|
||||
cp /path/to/armnn/binaries/libarmnnDelegate.so .
|
||||
cp /path/to/armnn/binaries/libarmnn.so .
|
||||
```
|
||||
|
||||
## Folder Structure
|
||||
|
||||
You should now have the following folder structure:
|
||||
|
||||
```
|
||||
.
|
||||
├── README.md
|
||||
├── run_classifier.py # script for the demo
|
||||
├── libarmnnDelegate.so
|
||||
├── libarmnn.so
|
||||
├── cat.png # downloaded example image
|
||||
├── mobilenet_v2_1.0_224_quantized_1_default_1.tflite # tflite model from ml-zoo
|
||||
└── labelmappings.txt # model label mappings for output processing
|
||||
```
|
||||
|
||||
## Run the model
|
||||
|
||||
```bash
|
||||
python3 run_classifier.py \
|
||||
--input_image cat.png \
|
||||
--model_file mobilenet_v2_1.0_224_quantized_1_default_1.tflite \
|
||||
--label_file labelmappings.txt \
|
||||
--delegate_path /path/to/armnn/binaries/libarmnnDelegate.so \
|
||||
--preferred_backends GpuAcc CpuAcc CpuRef
|
||||
```
|
||||
|
||||
The output prediction will be printed. In this example we get:
|
||||
|
||||
```bash
|
||||
'tabby, tabby cat'
|
||||
```
|
||||
|
||||
## Running an inference with the Arm NN TensorFlow Lite Delegate
|
||||
|
||||
Compared to your usual TensorFlow Lite projects, using the Arm NN TensorFlow
|
||||
Lite Delegate requires one extra step when loading in your model:
|
||||
|
||||
```python
|
||||
import tflite_runtime.interpreter as tflite
|
||||
|
||||
armnn_delegate = tflite.load_delegate("/path/to/armnn/binaries/libarmnnDelegate.so",
|
||||
options={
|
||||
"backends": "GpuAcc,CpuAcc,CpuRef",
|
||||
"logging-severity": "info"
|
||||
}
|
||||
)
|
||||
interpreter = tflite.Interpreter(
|
||||
model_path="mobilenet_v2_1.0_224_quantized_1_default_1.tflite",
|
||||
experimental_delegates=[armnn_delegate]
|
||||
)
|
||||
```
|
@ -0,0 +1,3 @@
|
||||
numpy==1.20.2
|
||||
Pillow==8.2.0
|
||||
pybind11==2.6.2
|
237
arch/arm/ARMnn/samples/ImageClassification/run_classifier.py
Normal file
237
arch/arm/ARMnn/samples/ImageClassification/run_classifier.py
Normal file
@ -0,0 +1,237 @@
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import tflite_runtime.interpreter as tflite
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
|
||||
def check_args(args: argparse.Namespace):
|
||||
"""Check the values used in the command-line have acceptable values
|
||||
|
||||
args:
|
||||
- args: argparse.Namespace
|
||||
|
||||
returns:
|
||||
- None
|
||||
|
||||
raises:
|
||||
- FileNotFoundError: if passed files do not exist.
|
||||
- IOError: if files are of incorrect format.
|
||||
"""
|
||||
input_image_p = args.input_image
|
||||
if not input_image_p.suffix in (".png", ".jpg", ".jpeg"):
|
||||
raise IOError(
|
||||
"--input_image option should point to an image file of the "
|
||||
"format .jpg, .jpeg, .png"
|
||||
)
|
||||
if not input_image_p.exists():
|
||||
raise FileNotFoundError("Cannot find ", input_image_p.name)
|
||||
model_p = args.model_file
|
||||
if not model_p.suffix == ".tflite":
|
||||
raise IOError("--model_file should point to a tflite file.")
|
||||
if not model_p.exists():
|
||||
raise FileNotFoundError("Cannot find ", model_p.name)
|
||||
label_mapping_p = args.label_file
|
||||
if not label_mapping_p.suffix == ".txt":
|
||||
raise IOError("--label_file expects a .txt file.")
|
||||
if not label_mapping_p.exists():
|
||||
raise FileNotFoundError("Cannot find ", label_mapping_p.name)
|
||||
|
||||
# check all args given in preferred backends make sense
|
||||
supported_backends = ["GpuAcc", "CpuAcc", "CpuRef"]
|
||||
if not all([backend in supported_backends for backend in args.preferred_backends]):
|
||||
raise ValueError("Incorrect backends given. Please choose from "\
|
||||
"'GpuAcc', 'CpuAcc', 'CpuRef'.")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def load_image(image_path: Path, model_input_dims: Union[tuple, list], grayscale: bool):
|
||||
"""load an image and put into correct format for the tensorflow lite model
|
||||
|
||||
args:
|
||||
- image_path: pathlib.Path
|
||||
- model_input_dims: tuple (or array-like). (height,width)
|
||||
|
||||
returns:
|
||||
- image: np.array
|
||||
"""
|
||||
height, width = model_input_dims
|
||||
# load and resize image
|
||||
image = Image.open(image_path).resize((width, height))
|
||||
# convert to greyscale if expected
|
||||
if grayscale:
|
||||
image = image.convert("LA")
|
||||
|
||||
image = np.expand_dims(image, axis=0)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def load_delegate(delegate_path: Path, backends: list):
|
||||
"""load the armnn delegate.
|
||||
|
||||
args:
|
||||
- delegate_path: pathlib.Path -> location of you libarmnnDelegate.so
|
||||
- backends: list -> list of backends you want to use in string format
|
||||
|
||||
returns:
|
||||
- armnn_delegate: tflite.delegate
|
||||
"""
|
||||
# create a command separated string
|
||||
backend_string = ",".join(backends)
|
||||
# load delegate
|
||||
armnn_delegate = tflite.load_delegate(
|
||||
library=delegate_path,
|
||||
options={"backends": backend_string, "logging-severity": "info"},
|
||||
)
|
||||
|
||||
return armnn_delegate
|
||||
|
||||
|
||||
def load_tf_model(model_path: Path, armnn_delegate: tflite.Delegate):
|
||||
"""load a tflite model for use with the armnn delegate.
|
||||
|
||||
args:
|
||||
- model_path: pathlib.Path
|
||||
- armnn_delegate: tflite.TfLiteDelegate
|
||||
|
||||
returns:
|
||||
- interpreter: tflite.Interpreter
|
||||
"""
|
||||
interpreter = tflite.Interpreter(
|
||||
model_path=model_path.as_posix(), experimental_delegates=[armnn_delegate]
|
||||
)
|
||||
interpreter.allocate_tensors()
|
||||
|
||||
return interpreter
|
||||
|
||||
|
||||
def run_inference(interpreter, input_image):
|
||||
"""Run inference on a processed input image and return the output from
|
||||
inference.
|
||||
|
||||
args:
|
||||
- interpreter: tflite_runtime.interpreter.Interpreter
|
||||
- input_image: np.array
|
||||
|
||||
returns:
|
||||
- output_data: np.array
|
||||
"""
|
||||
# Get input and output tensors.
|
||||
input_details = interpreter.get_input_details()
|
||||
output_details = interpreter.get_output_details()
|
||||
# Test model on random input data.
|
||||
interpreter.set_tensor(input_details[0]["index"], input_image)
|
||||
interpreter.invoke()
|
||||
output_data = interpreter.get_tensor(output_details[0]["index"])
|
||||
|
||||
return output_data
|
||||
|
||||
|
||||
def create_mapping(label_mapping_p):
|
||||
"""Creates a Python dictionary mapping an index to a label.
|
||||
|
||||
label_mapping[idx] = label
|
||||
|
||||
args:
|
||||
- label_mapping_p: pathlib.Path
|
||||
|
||||
returns:
|
||||
- label_mapping: dict
|
||||
"""
|
||||
idx = 0
|
||||
label_mapping = {}
|
||||
with open(label_mapping_p) as label_mapping_raw:
|
||||
for line in label_mapping_raw:
|
||||
label_mapping[idx] = line
|
||||
idx += 1
|
||||
|
||||
return label_mapping
|
||||
|
||||
|
||||
def process_output(output_data, label_mapping):
|
||||
"""Process the output tensor into a label from the labelmapping file. Takes
|
||||
the index of the maximum valur from the output array.
|
||||
|
||||
args:
|
||||
- output_data: np.array
|
||||
- label_mapping: dict
|
||||
|
||||
returns:
|
||||
- str: labelmapping for max index.
|
||||
"""
|
||||
idx = np.argmax(output_data[0])
|
||||
|
||||
return label_mapping[idx]
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Run the inference for options passed in the command line.
|
||||
|
||||
args:
|
||||
- args: argparse.Namespace
|
||||
|
||||
returns:
|
||||
- None
|
||||
"""
|
||||
# sanity check on args
|
||||
check_args(args)
|
||||
# load in the armnn delegate
|
||||
armnn_delegate = load_delegate(args.delegate_path, args.preferred_backends)
|
||||
# load tflite model
|
||||
interpreter = load_tf_model(args.model_file, armnn_delegate)
|
||||
# get input shape for image resizing
|
||||
input_shape = interpreter.get_input_details()[0]["shape"]
|
||||
height, width = input_shape[1], input_shape[2]
|
||||
input_shape = (height, width)
|
||||
# load input image
|
||||
input_image = load_image(args.input_image, input_shape, False)
|
||||
# get label mapping
|
||||
labelmapping = create_mapping(args.label_file)
|
||||
output_tensor = run_inference(interpreter, input_image)
|
||||
output_prediction = process_output(output_tensor, labelmapping)
|
||||
|
||||
print("Prediction: ", output_prediction)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
"--input_image", help="File path of image file", type=Path, required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_file",
|
||||
help="File path of the model tflite file",
|
||||
type=Path,
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--label_file",
|
||||
help="File path of model labelmapping file",
|
||||
type=Path,
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--delegate_path",
|
||||
help="File path of ArmNN delegate file",
|
||||
type=Path,
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--preferred_backends",
|
||||
help="list of backends in order of preference",
|
||||
type=str,
|
||||
nargs="+",
|
||||
required=False,
|
||||
default=["CpuAcc", "CpuRef"],
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args)
|
64
arch/arm/ARMnn/samples/KeywordSpotting/CMakeLists.txt
Normal file
64
arch/arm/ARMnn/samples/KeywordSpotting/CMakeLists.txt
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
cmake_minimum_required(VERSION 3.0.2)
|
||||
|
||||
set(CMAKE_C_STANDARD 99)
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
|
||||
# Make the standard a requirement => prevent fallback to previous
|
||||
# supported standard
|
||||
set(CMAKE_C_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
# We want to pass standard C/C++ flags, without gnu extensions
|
||||
set(CMAKE_C_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
project (keyword-spotting-example)
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-DDEBUG -O0 -g -fPIC -pthread")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-DNDEBUG -O3 -fPIC -pthread")
|
||||
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-DDEBUG -O0 -g -fPIC -pthread")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 -fPIC -pthread")
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
# Build in release mode by default
|
||||
if (NOT CMAKE_BUILD_TYPE STREQUAL Debug)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
|
||||
if (NOT DEFINED DEPENDENCIES_DIR)
|
||||
set(DEPENDENCIES_DIR ${CMAKE_BINARY_DIR}/dependencies)
|
||||
endif()
|
||||
|
||||
include(../common/cmake/find_armnn.cmake)
|
||||
|
||||
include_directories(include)
|
||||
include_directories(../common/include/ArmnnUtils)
|
||||
include_directories(../common/include/Utils)
|
||||
include_directories(../common/include/Audio)
|
||||
|
||||
file(GLOB SOURCES "src/*.cpp")
|
||||
file(GLOB COMMON_UTILS_SOURCES "../common/src/Utils/*.cpp")
|
||||
file(GLOB COMMON_AUDIO_SOURCES "../common/src/Audio/*.cpp")
|
||||
list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
|
||||
file(GLOB TEST_SOURCES "test/*.cpp")
|
||||
file(GLOB APP_MAIN "src/Main.cpp")
|
||||
|
||||
if(BUILD_UNIT_TESTS)
|
||||
include(cmake/unit_tests.cmake)
|
||||
endif()
|
||||
|
||||
set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
|
||||
|
||||
add_executable("${APP_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${COMMON_AUDIO_SOURCES} ${SOURCES} ${APP_MAIN})
|
||||
|
||||
target_link_libraries("${APP_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} -lsndfile -lsamplerate)
|
||||
target_include_directories("${APP_TARGET_NAME}" PUBLIC ${ARMNN_INCLUDE_DIR} )
|
283
arch/arm/ARMnn/samples/KeywordSpotting/Readme.md
Normal file
283
arch/arm/ARMnn/samples/KeywordSpotting/Readme.md
Normal file
@ -0,0 +1,283 @@
|
||||
# Keyword Spotting Example
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a sample code showing keyword spotting using Arm NN public C++ API. The compiled application can take
|
||||
|
||||
* an audio file
|
||||
|
||||
as input and produce
|
||||
|
||||
* recognised keyword in the audio file
|
||||
|
||||
as output. The application works with the [fully quantised DS CNN Large model](https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8/) which is trained to recongize 12 keywords, including an unknown word.
|
||||
|
||||
## Dependencies
|
||||
|
||||
This example utilises `libsndfile`, `libasound` and `libsamplerate` libraries to capture the raw audio data from file, and to re-sample to the expected sample rate. Top level inference API is provided by Arm NN library.
|
||||
|
||||
### Arm NN
|
||||
|
||||
Keyword spotting example build system does not trigger Arm NN compilation. Thus, before building the application,
|
||||
please ensure that Arm NN libraries and header files are available on your build platform.
|
||||
The application executable binary dynamically links with the following Arm NN libraries:
|
||||
|
||||
* libarmnn.so
|
||||
* libarmnnTfLiteParser.so
|
||||
|
||||
The build script searches for available Arm NN libraries in the following order:
|
||||
|
||||
1. Inside custom user directory specified by ARMNN_LIB_DIR cmake option.
|
||||
2. Inside the current Arm NN repository, assuming that Arm NN was built following [these instructions](../../BuildGuideCrossCompilation.md).
|
||||
3. Inside default locations for system libraries, assuming Arm NN was installed from deb packages.
|
||||
|
||||
Arm NN header files will be searched in parent directory of found libraries files under `include` directory, i.e.
|
||||
libraries found in `/usr/lib` or `/usr/lib64` and header files in `/usr/include` (or `${ARMNN_LIB_DIR}/include`).
|
||||
|
||||
Please see [find_armnn.cmake](./cmake/find_armnn.cmake) for implementation details.
|
||||
|
||||
## Building
|
||||
|
||||
There is one flow for building this application:
|
||||
|
||||
* native build on a host platform
|
||||
|
||||
### Build Options
|
||||
|
||||
* ARMNN_LIB_DIR - point to the custom location of the Arm NN libs and headers.
|
||||
* BUILD_UNIT_TESTS - set to `1` to build tests. Additionally to the main application, `keyword-spotting-example-tests`
|
||||
unit tests executable will be created.
|
||||
|
||||
### Native Build
|
||||
|
||||
To build this application on a host platform, firstly ensure that required dependencies are installed:
|
||||
For example, for raspberry PI:
|
||||
|
||||
```commandline
|
||||
sudo apt-get update
|
||||
sudo apt-get -yq install libsndfile1-dev
|
||||
sudo apt-get -yq install libasound2-dev
|
||||
sudo apt-get -yq install libsamplerate-dev
|
||||
```
|
||||
|
||||
To build demo application, create a build directory:
|
||||
|
||||
```commandline
|
||||
mkdir build
|
||||
cd build
|
||||
```
|
||||
|
||||
If you have already installed Arm NN and and the required libraries:
|
||||
|
||||
Inside build directory, run cmake and make commands:
|
||||
|
||||
```commandline
|
||||
cmake ..
|
||||
make
|
||||
```
|
||||
|
||||
This will build the following in bin directory:
|
||||
|
||||
* `keyword-spotting-example` - application executable
|
||||
|
||||
If you have custom Arm NN location, use `ARMNN_LIB_DIR` options:
|
||||
|
||||
```commandline
|
||||
cmake -DARMNN_LIB_DIR=/path/to/armnn ..
|
||||
make
|
||||
```
|
||||
|
||||
## Executing
|
||||
|
||||
Once the application executable is built, it can be executed with the following options:
|
||||
|
||||
* --audio-file-path: Path to the audio file to run keyword spotting on **[REQUIRED]**
|
||||
* --model-file-path: Path to the Keyword Spotting model to use **[REQUIRED]**
|
||||
|
||||
* --preferred-backends: Takes the preferred backends in preference order, separated by comma.
|
||||
For example: `CpuAcc,GpuAcc,CpuRef`. Accepted options: [`CpuAcc`, `CpuRef`, `GpuAcc`].
|
||||
Defaults to `CpuRef` **[OPTIONAL]**
|
||||
|
||||
### Keyword Spotting on a supplied audio file
|
||||
|
||||
A small selection of suitable wav files containing keywords can be found [here](https://git.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit.git/plain/resources/kws/samples/).
|
||||
To run keyword spotting on a supplied audio file and output the result to console:
|
||||
|
||||
```commandline
|
||||
./keyword-spotting-example --audio-file-path /path/to/audio/file --model-file-path /path/to/model/file
|
||||
```
|
||||
|
||||
# Application Overview
|
||||
|
||||
This section provides a walkthrough of the application, explaining in detail the steps:
|
||||
|
||||
1. Initialisation
|
||||
1. Reading from Audio Source
|
||||
2. Creating a Network
|
||||
1. Creating Parser and Importing Graph
|
||||
2. Optimizing Graph for Compute Device
|
||||
3. Creating Input and Output Binding Information
|
||||
3. Keyword spotting pipeline
|
||||
1. Pre-processing the Captured Audio
|
||||
2. Making Input and Output Tensors
|
||||
3. Executing Inference
|
||||
4. Postprocessing
|
||||
5. Decoding and Processing Inference Output
|
||||
|
||||
### Initialisation
|
||||
|
||||
##### Reading from Audio Source
|
||||
|
||||
After parsing user arguments, the chosen audio file is loaded into an AudioCapture object.
|
||||
We use [`AudioCapture`](./include/AudioCapture.hpp) in our main function to capture appropriately sized audio blocks from the source using the
|
||||
`Next()` function.
|
||||
|
||||
The `AudioCapture` object also re-samples the audio input to a desired sample rate, and sets the number of channels used to one channel (i.e `mono`)
|
||||
|
||||
### Creating a Network
|
||||
|
||||
All operations with Arm NN and networks are encapsulated in [`ArmnnNetworkExecutor`](./include/ArmnnNetworkExecutor.hpp)
|
||||
class.
|
||||
|
||||
##### Creating Parser and Importing Graph
|
||||
|
||||
The first step with Arm NN SDK is to import a graph from file by using the appropriate parser.
|
||||
|
||||
The Arm NN SDK provides parsers for reading graphs from a variety of model formats. In our application we specifically
|
||||
focus on `.tflite, .pb, .onnx` models.
|
||||
|
||||
Based on the extension of the provided model file, the corresponding parser is created and the network file loaded with
|
||||
`CreateNetworkFromBinaryFile()` method. The parser will handle the creation of the underlying Arm NN graph.
|
||||
|
||||
Currently this example only supports tflite format model files and uses `ITfLiteParser`:
|
||||
|
||||
```c++
|
||||
#include "armnnTfLiteParser/ITfLiteParser.hpp"
|
||||
|
||||
armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
|
||||
armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
|
||||
```
|
||||
|
||||
##### Optimizing Graph for Compute Device
|
||||
|
||||
Arm NN supports optimized execution on multiple CPU and GPU devices. Prior to executing a graph, we must select the
|
||||
appropriate device context. We do this by creating a runtime context with default options with `IRuntime()`.
|
||||
|
||||
For example:
|
||||
|
||||
```c++
|
||||
#include "armnn/ArmNN.hpp"
|
||||
|
||||
auto runtime = armnn::IRuntime::Create(armnn::IRuntime::CreationOptions());
|
||||
```
|
||||
|
||||
We can optimize the imported graph by specifying a list of backends in order of preference and implement
|
||||
backend-specific optimizations. The backends are identified by a string unique to the backend,
|
||||
for example `CpuAcc, GpuAcc, CpuRef`.
|
||||
|
||||
For example:
|
||||
|
||||
```c++
|
||||
std::vector<armnn::BackendId> backends{"CpuAcc", "GpuAcc", "CpuRef"};
|
||||
```
|
||||
|
||||
Internally and transparently, Arm NN splits the graph into subgraph based on backends, it calls a optimize subgraphs
|
||||
function on each of them and, if possible, substitutes the corresponding subgraph in the original graph with
|
||||
its optimized version.
|
||||
|
||||
Using the `Optimize()` function we optimize the graph for inference and load the optimized network onto the compute
|
||||
device with `LoadNetwork()`. This function creates the backend-specific workloads
|
||||
for the layers and a backend specific workload factory which is called to create the workloads.
|
||||
|
||||
For example:
|
||||
|
||||
```c++
|
||||
armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
|
||||
backends,
|
||||
m_Runtime->GetDeviceSpec(),
|
||||
armnn::OptimizerOptions());
|
||||
std::string errorMessage;
|
||||
runtime->LoadNetwork(0, std::move(optNet), errorMessage));
|
||||
std::cerr << errorMessage << std::endl;
|
||||
```
|
||||
|
||||
##### Creating Input and Output Binding Information
|
||||
|
||||
Parsers can also be used to extract the input information for the network. By calling `GetSubgraphInputTensorNames`
|
||||
we extract all the input names and, with `GetNetworkInputBindingInfo`, bind the input points of the graph.
|
||||
For example:
|
||||
|
||||
```c++
|
||||
std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
|
||||
auto inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
|
||||
```
|
||||
|
||||
The input binding information contains all the essential information about the input. It is a tuple consisting of
|
||||
integer identifiers for bindable layers (inputs, outputs) and the tensor info (data type, quantization information,
|
||||
number of dimensions, total number of elements).
|
||||
|
||||
Similarly, we can get the output binding information for an output layer by using the parser to retrieve output
|
||||
tensor names and calling `GetNetworkOutputBindingInfo()`.
|
||||
|
||||
### Keyword Spotting pipeline
|
||||
|
||||
The keyword spotting pipeline has 3 steps to perform: data pre-processing, run inference and decode inference results.
|
||||
|
||||
See [`KeywordSpottingPipeline`](include/KeywordSpottingPipeline.hpp) for more details.
|
||||
|
||||
#### Pre-processing the Audio Input
|
||||
|
||||
Each frame captured from source is read and stored by the AudioCapture object.
|
||||
It's `Next()` function provides us with the correctly positioned window of data, sized appropriately for the given model, to pre-process before inference.
|
||||
|
||||
```c++
|
||||
std::vector<float> audioBlock = capture.Next();
|
||||
...
|
||||
std::vector<int8_t> preprocessedData = kwsPipeline->PreProcessing(audioBlock);
|
||||
```
|
||||
|
||||
The `MFCC` class is then used to extract the Mel-frequency Cepstral Coefficients (MFCCs, [see Wikipedia](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum)) from each stored audio frame in the provided window of audio, to be used as features for the network. MFCCs are the result of computing the dot product of the Discrete Cosine Transform (DCT) Matrix and the log of the Mel energy.
|
||||
|
||||
After all the MFCCs needed for an inference have been extracted from the audio data they are concatenated to make the input tensor for the model.
|
||||
|
||||
#### Executing Inference
|
||||
|
||||
```c++
|
||||
common::InferenceResults results;
|
||||
...
|
||||
kwsPipeline->Inference(preprocessedData, results);
|
||||
```
|
||||
|
||||
Inference step will call `ArmnnNetworkExecutor::Run` method that will prepare input tensors and execute inference.
|
||||
A compute device performs inference for the loaded network using the `EnqueueWorkload()` function of the runtime context.
|
||||
For example:
|
||||
|
||||
```c++
|
||||
//const void* inputData = ...;
|
||||
//outputTensors were pre-allocated before
|
||||
|
||||
armnn::InputTensors inputTensors = {{ inputBindingInfo.first,armnn::ConstTensor(inputBindingInfo.second, inputData)}};
|
||||
runtime->EnqueueWorkload(0, inputTensors, outputTensors);
|
||||
```
|
||||
|
||||
We allocate memory for output data once and map it to output tensor objects. After successful inference, we read data
|
||||
from the pre-allocated output data buffer. See [`ArmnnNetworkExecutor::ArmnnNetworkExecutor`](./src/ArmnnNetworkExecutor.cpp)
|
||||
and [`ArmnnNetworkExecutor::Run`](./src/ArmnnNetworkExecutor.cpp) for more details.
|
||||
|
||||
#### Postprocessing
|
||||
|
||||
##### Decoding
|
||||
|
||||
The output from the inference is decoded to obtain the spotted keyword- the word with highest probability is outputted to the console.
|
||||
|
||||
```c++
|
||||
kwsPipeline->PostProcessing(results, labels,
|
||||
[](int index, std::string& label, float prob) -> void {
|
||||
printf("Keyword \"%s\", index %d:, probability %f\n",
|
||||
label.c_str(),
|
||||
index,
|
||||
prob);
|
||||
});
|
||||
```
|
||||
|
||||
The produced string is displayed on the console.
|
@ -0,0 +1,65 @@
|
||||
# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Function to download a file from the Arm Model Zoo
|
||||
function(download_file_from_modelzoo model_zoo_version file_sub_path download_path)
|
||||
set(MODEL_ZOO_REPO "https://github.com/ARM-software/ML-zoo/raw")
|
||||
string(JOIN "/" FILE_URL
|
||||
${MODEL_ZOO_REPO} ${model_zoo_version} ${file_sub_path})
|
||||
message(STATUS "Downloading ${FILE_URL} to ${download_path}...")
|
||||
file(DOWNLOAD ${FILE_URL} ${download_path}
|
||||
STATUS DOWNLOAD_STATE)
|
||||
list(GET DOWNLOAD_STATE 0 RET_VAL)
|
||||
if(${RET_VAL})
|
||||
list(GET DOWNLOAD_STATE 1 RET_MSG)
|
||||
message(FATAL_ERROR "Download failed with error code: ${RET_VAL}; "
|
||||
"Error message: ${RET_MSG}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
set(TEST_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/test/resources)
|
||||
file(MAKE_DIRECTORY ${TEST_RESOURCES_DIR})
|
||||
add_definitions (-DTEST_RESOURCE_DIR="${TEST_RESOURCES_DIR}")
|
||||
set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-tests")
|
||||
|
||||
file(GLOB TEST_SOURCES "test/*")
|
||||
file(GLOB TESTS_AUDIO_COMMON "../common/test/Audio/*")
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/test/resources)
|
||||
include(../common/cmake/find_catch.cmake)
|
||||
|
||||
add_executable("${TEST_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${COMMON_AUDIO_SOURCES} ${SOURCES} ${TEST_SOURCES} ${TESTS_AUDIO_COMMON})
|
||||
|
||||
ExternalProject_Add(passport
|
||||
URL https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav
|
||||
DOWNLOAD_NO_EXTRACT 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/myVoiceIsMyPassportVerifyMe04.wav ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
add_dependencies(
|
||||
"${TEST_TARGET_NAME}"
|
||||
"passport"
|
||||
"catch2-headers"
|
||||
)
|
||||
|
||||
|
||||
set(MODEL_FILENAME ds_cnn_clustered_int8.tflite)
|
||||
set(MODEL_RESOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test/resources)
|
||||
file(MAKE_DIRECTORY ${MODEL_RESOURCES_DIR})
|
||||
set(DEFAULT_MODEL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/test/resources/${MODEL_FILENAME})
|
||||
|
||||
# Download the default model
|
||||
set(ZOO_COMMON_SUBPATH "models/keyword_spotting/ds_cnn_large/tflite_clustered_int8")
|
||||
set(ZOO_MODEL_SUBPATH "${ZOO_COMMON_SUBPATH}/${MODEL_FILENAME}")
|
||||
set(ZOO_MODEL_VERSION "68b5fbc77ed28e67b2efc915997ea4477c1d9d5b")
|
||||
|
||||
download_file_from_modelzoo(${ZOO_MODEL_VERSION} ${ZOO_MODEL_SUBPATH} ${DEFAULT_MODEL_PATH})
|
||||
|
||||
|
||||
target_include_directories("${TEST_TARGET_NAME}" PUBLIC ${TEST_TPIP_INCLUDE}
|
||||
${ARMNN_INCLUDE_DIR}
|
||||
${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR} ${COMMON_INCLUDE_DIR})
|
||||
|
||||
target_link_libraries("${TEST_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} -lsndfile -lsamplerate)
|
32
arch/arm/ARMnn/samples/KeywordSpotting/include/Decoder.hpp
Normal file
32
arch/arm/ARMnn/samples/KeywordSpotting/include/Decoder.hpp
Normal file
@ -0,0 +1,32 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
# pragma once
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include "ArmnnNetworkExecutor.hpp"
|
||||
|
||||
namespace kws
|
||||
{
|
||||
|
||||
/**
|
||||
* @brief Decodes quantised last layer of model output
|
||||
*
|
||||
*/
|
||||
class Decoder
|
||||
{
|
||||
private:
|
||||
int quantisationOffset;
|
||||
float quantisationScale;
|
||||
|
||||
public:
|
||||
|
||||
Decoder(int quantisationOffset, float quantisationScale) : quantisationOffset(quantisationOffset),
|
||||
quantisationScale(quantisationScale) {}
|
||||
|
||||
std::pair<int, float> decodeOutput(std::vector<int8_t>& modelOutput);
|
||||
|
||||
};
|
||||
} // namespace kws
|
@ -0,0 +1,39 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#ifndef KEYWORD_SPOTTING_EXAMPLE_DSCNNPREPROCESSOR_HPP
|
||||
#define KEYWORD_SPOTTING_EXAMPLE_DSCNNPREPROCESSOR_HPP
|
||||
|
||||
#include <numeric>
|
||||
#include "DsCnnMfcc.hpp"
|
||||
|
||||
namespace kws
|
||||
{
|
||||
class DsCNNPreprocessor
|
||||
{
|
||||
public:
|
||||
DsCNNPreprocessor(uint32_t windowLen, uint32_t windowStride,
|
||||
std::unique_ptr<DsCnnMFCC> mfccInst);
|
||||
|
||||
/**
|
||||
* @brief Calculates the features required from audio data. This
|
||||
* includes MFCC, first and second order deltas,
|
||||
* normalisation and finally, quantisation. The tensor is
|
||||
* populated with feature from a given window placed along
|
||||
* in a single row.
|
||||
* @param[in] audioData pointer to the first element of audio data
|
||||
* @param[in] output output to be populated
|
||||
* @return true if successful, false in case of error.
|
||||
*/
|
||||
std::vector<int8_t> Invoke(const float* audioData,
|
||||
size_t dataSize,
|
||||
int quantOffset,
|
||||
float quantScale) ;
|
||||
|
||||
uint32_t m_windowLen; // Window length for MFCC
|
||||
uint32_t m_windowStride; // Window stride len for MFCC
|
||||
std::unique_ptr<MFCC> m_mfcc;
|
||||
};
|
||||
} // namespace kws
|
||||
#endif //KEYWORD_SPOTTING_EXAMPLE_DSCNNPREPROCESSOR_HPP
|
20
arch/arm/ARMnn/samples/KeywordSpotting/include/DsCnnMfcc.hpp
Normal file
20
arch/arm/ARMnn/samples/KeywordSpotting/include/DsCnnMfcc.hpp
Normal file
@ -0,0 +1,20 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#pragma once
|
||||
|
||||
#include "MFCC.hpp"
|
||||
|
||||
/* Class to provide DS-CNN specific MFCC calculation requirements. */
|
||||
class DsCnnMFCC : public MFCC
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
explicit DsCnnMFCC(MfccParams& params)
|
||||
: MFCC(params)
|
||||
{}
|
||||
DsCnnMFCC() = delete;
|
||||
~DsCnnMFCC() = default;
|
||||
};
|
@ -0,0 +1,91 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ArmnnNetworkExecutor.hpp"
|
||||
#include "Decoder.hpp"
|
||||
#include "MFCC.hpp"
|
||||
#include "DsCNNPreprocessor.hpp"
|
||||
|
||||
namespace kws
|
||||
{
|
||||
/**
|
||||
* Generic Keyword Spotting pipeline with 3 steps: data pre-processing, inference execution and inference
|
||||
* result post-processing.
|
||||
*
|
||||
*/
|
||||
class KWSPipeline
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* Creates speech recognition pipeline with given network executor and decoder.
|
||||
* @param executor - unique pointer to inference runner
|
||||
* @param decoder - unique pointer to inference results decoder
|
||||
*/
|
||||
KWSPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
|
||||
std::unique_ptr<Decoder> decoder,
|
||||
std::unique_ptr<DsCNNPreprocessor> preProcessor);
|
||||
|
||||
/**
|
||||
* @brief Standard audio pre-processing implementation.
|
||||
*
|
||||
* Preprocesses and prepares the data for inference by
|
||||
* extracting the MFCC features.
|
||||
|
||||
* @param[in] audio - the raw audio data
|
||||
*/
|
||||
|
||||
std::vector<int8_t> PreProcessing(std::vector<float>& audio);
|
||||
|
||||
/**
|
||||
* @brief Executes inference
|
||||
*
|
||||
* Calls inference runner provided during instance construction.
|
||||
*
|
||||
* @param[in] preprocessedData - input inference data. Data type should be aligned with input tensor.
|
||||
* @param[out] result - raw inference results.
|
||||
*/
|
||||
void Inference(const std::vector<int8_t>& preprocessedData, common::InferenceResults<int8_t>& result);
|
||||
|
||||
/**
|
||||
* @brief Standard inference results post-processing implementation.
|
||||
*
|
||||
* Decodes inference results using decoder provided during construction.
|
||||
*
|
||||
* @param[in] inferenceResult - inference results to be decoded.
|
||||
* @param[in] labels - the words we use for the model
|
||||
*/
|
||||
void PostProcessing(common::InferenceResults<int8_t>& inferenceResults,
|
||||
std::map<int, std::string>& labels,
|
||||
const std::function<void (int, std::string&, float)>& callback);
|
||||
|
||||
/**
|
||||
* @brief Get the number of samples for the pipeline input
|
||||
|
||||
* @return - number of samples for the pipeline
|
||||
*/
|
||||
int getInputSamplesSize();
|
||||
|
||||
protected:
|
||||
std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> m_executor;
|
||||
std::unique_ptr<Decoder> m_decoder;
|
||||
std::unique_ptr<DsCNNPreprocessor> m_preProcessor;
|
||||
};
|
||||
|
||||
using IPipelinePtr = std::unique_ptr<kws::KWSPipeline>;
|
||||
|
||||
/**
|
||||
* Constructs speech recognition pipeline based on configuration provided.
|
||||
*
|
||||
* @param[in] config - speech recognition pipeline configuration.
|
||||
* @param[in] labels - asr labels
|
||||
*
|
||||
* @return unique pointer to asr pipeline.
|
||||
*/
|
||||
IPipelinePtr CreatePipeline(common::PipelineOptions& config);
|
||||
|
||||
};// namespace kws
|
35
arch/arm/ARMnn/samples/KeywordSpotting/src/Decoder.cpp
Normal file
35
arch/arm/ARMnn/samples/KeywordSpotting/src/Decoder.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include "Decoder.hpp"
|
||||
|
||||
std::pair<int, float> kws::Decoder::decodeOutput(std::vector<int8_t>& modelOutput)
|
||||
{
|
||||
|
||||
std::vector<float> dequantisedOutput;
|
||||
//Normalise vector values into new vector
|
||||
for (auto& value : modelOutput)
|
||||
{
|
||||
float normalisedModelOutput = this->quantisationScale * (static_cast<float >(value) -
|
||||
static_cast<float >(this->quantisationOffset));
|
||||
dequantisedOutput.push_back(normalisedModelOutput);
|
||||
}
|
||||
|
||||
//Get largest value in modelOutput
|
||||
const std::vector<float>::iterator& maxElementIterator = std::max_element(dequantisedOutput.begin(),
|
||||
dequantisedOutput.end());
|
||||
//Find the labelMapIndex of the largest value which corresponds to a key in a label map
|
||||
int labelMapIndex = static_cast<int>(std::distance(dequantisedOutput.begin(), maxElementIterator));
|
||||
|
||||
//Round to two DP
|
||||
float maxModelOutputProbability = std::roundf((*maxElementIterator) * 100) / 100;
|
||||
|
||||
return std::make_pair(labelMapIndex, maxModelOutputProbability);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,40 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include "MathUtils.hpp"
|
||||
#include "SlidingWindow.hpp"
|
||||
#include "DsCNNPreprocessor.hpp"
|
||||
|
||||
std::vector<int8_t> kws::DsCNNPreprocessor::Invoke(const float* audioData, size_t dataSize,
|
||||
int quantOffset, float quantScale)
|
||||
{
|
||||
auto window = SlidingWindow<const float>(
|
||||
audioData, dataSize,
|
||||
this->m_windowLen, this->m_windowStride);
|
||||
|
||||
uint32_t mfccBufIdx = 0;
|
||||
std::vector<int8_t> outputBuffer;
|
||||
// While we can slide over the window
|
||||
while (window.HasNext())
|
||||
{
|
||||
const float* mfccWindow = window.Next();
|
||||
auto mfccAudioData = std::vector<float>(mfccWindow, mfccWindow + this->m_windowLen);
|
||||
|
||||
auto mfcc = this->m_mfcc->MfccComputeQuant<int8_t>(mfccAudioData, quantScale, quantOffset);
|
||||
|
||||
std::copy(mfcc.begin(), mfcc.end(), std::back_inserter(outputBuffer));
|
||||
|
||||
++mfccBufIdx;
|
||||
}
|
||||
|
||||
return outputBuffer;
|
||||
}
|
||||
|
||||
kws::DsCNNPreprocessor::DsCNNPreprocessor(const uint32_t windowLen, const uint32_t windowStride,
|
||||
std::unique_ptr<DsCnnMFCC> mfccInst) :
|
||||
m_windowLen{windowLen}, m_windowStride{windowStride}, m_mfcc{std::move(mfccInst)} {}
|
@ -0,0 +1,94 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include "KeywordSpottingPipeline.hpp"
|
||||
#include "ArmnnNetworkExecutor.hpp"
|
||||
#include "DsCNNPreprocessor.hpp"
|
||||
|
||||
namespace kws
|
||||
{
|
||||
KWSPipeline::KWSPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
|
||||
std::unique_ptr<Decoder> decoder,
|
||||
std::unique_ptr<DsCNNPreprocessor> preProcessor
|
||||
) :
|
||||
m_executor(std::move(executor)),
|
||||
m_decoder(std::move(decoder)),
|
||||
m_preProcessor(std::move(preProcessor)) {}
|
||||
|
||||
|
||||
std::vector<int8_t> KWSPipeline::PreProcessing(std::vector<float>& audio)
|
||||
{
|
||||
return m_preProcessor->Invoke(audio.data(), audio.size(), m_executor->GetQuantizationOffset(),
|
||||
m_executor->GetQuantizationScale());
|
||||
}
|
||||
|
||||
void KWSPipeline::Inference(const std::vector<int8_t>& preprocessedData,
|
||||
common::InferenceResults<int8_t>& result)
|
||||
{
|
||||
m_executor->Run(preprocessedData.data(), preprocessedData.size(), result);
|
||||
}
|
||||
|
||||
void KWSPipeline::PostProcessing(common::InferenceResults<int8_t>& inferenceResults,
|
||||
std::map<int, std::string>& labels,
|
||||
const std::function<void (int, std::string&, float)>& callback)
|
||||
{
|
||||
std::pair<int,float> outputDecoder = this->m_decoder->decodeOutput(inferenceResults[0]);
|
||||
int keywordIndex = std::get<0>(outputDecoder);
|
||||
std::string output = labels[keywordIndex];
|
||||
callback(keywordIndex, output, std::get<1>(outputDecoder));
|
||||
}
|
||||
|
||||
int KWSPipeline::getInputSamplesSize()
|
||||
{
|
||||
return this->m_preProcessor->m_windowLen +
|
||||
((this->m_preProcessor->m_mfcc->m_params.m_numMfccVectors - 1) *
|
||||
this->m_preProcessor->m_windowStride);
|
||||
}
|
||||
|
||||
IPipelinePtr CreatePipeline(common::PipelineOptions& config)
|
||||
{
|
||||
if (config.m_ModelName == "DS_CNN_CLUSTERED_INT8")
|
||||
{
|
||||
//DS-CNN model settings
|
||||
float SAMP_FREQ = 16000;
|
||||
int MFCC_WINDOW_LEN = 640;
|
||||
int MFCC_WINDOW_STRIDE = 320;
|
||||
int NUM_MFCC_FEATS = 10;
|
||||
int NUM_MFCC_VECTORS = 49;
|
||||
//todo: calc in pipeline and use in main
|
||||
int SAMPLES_PER_INFERENCE = NUM_MFCC_VECTORS * MFCC_WINDOW_STRIDE +
|
||||
MFCC_WINDOW_LEN - MFCC_WINDOW_STRIDE; //16000
|
||||
float MEL_LO_FREQ = 20;
|
||||
float MEL_HI_FREQ = 4000;
|
||||
int NUM_FBANK_BIN = 40;
|
||||
|
||||
MfccParams mfccParams(SAMP_FREQ,
|
||||
NUM_FBANK_BIN,
|
||||
MEL_LO_FREQ,
|
||||
MEL_HI_FREQ,
|
||||
NUM_MFCC_FEATS,
|
||||
MFCC_WINDOW_LEN, false,
|
||||
NUM_MFCC_VECTORS);
|
||||
|
||||
std::unique_ptr<DsCnnMFCC> mfccInst = std::make_unique<DsCnnMFCC>(mfccParams);
|
||||
auto preprocessor = std::make_unique<kws::DsCNNPreprocessor>(
|
||||
MFCC_WINDOW_LEN, MFCC_WINDOW_STRIDE, std::move(mfccInst));
|
||||
|
||||
auto executor = std::make_unique<common::ArmnnNetworkExecutor<int8_t>>(
|
||||
config.m_ModelFilePath, config.m_backends);
|
||||
|
||||
auto decoder = std::make_unique<kws::Decoder>(executor->GetOutputQuantizationOffset(0),
|
||||
executor->GetOutputQuantizationScale(0));
|
||||
|
||||
return std::make_unique<kws::KWSPipeline>(std::move(executor),
|
||||
std::move(decoder), std::move(preprocessor));
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::invalid_argument("Unknown Model name: " + config.m_ModelName + " .");
|
||||
}
|
||||
}
|
||||
|
||||
};// namespace kws
|
128
arch/arm/ARMnn/samples/KeywordSpotting/src/Main.cpp
Normal file
128
arch/arm/ARMnn/samples/KeywordSpotting/src/Main.cpp
Normal file
@ -0,0 +1,128 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include "KeywordSpottingPipeline.hpp"
|
||||
#include "CmdArgsParser.hpp"
|
||||
#include "ArmnnNetworkExecutor.hpp"
|
||||
#include "AudioCapture.hpp"
|
||||
|
||||
const std::string AUDIO_FILE_PATH = "--audio-file-path";
|
||||
const std::string MODEL_FILE_PATH = "--model-file-path";
|
||||
const std::string LABEL_PATH = "--label-path";
|
||||
const std::string PREFERRED_BACKENDS = "--preferred-backends";
|
||||
const std::string HELP = "--help";
|
||||
|
||||
/*
|
||||
* The accepted options for this Speech Recognition executable
|
||||
*/
|
||||
static std::map<std::string, std::string> CMD_OPTIONS =
|
||||
{
|
||||
{AUDIO_FILE_PATH, "[REQUIRED] Path to the Audio file to run speech recognition on"},
|
||||
{MODEL_FILE_PATH, "[REQUIRED] Path to the Speech Recognition model to use"},
|
||||
{PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
|
||||
" For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
|
||||
" Defaults to CpuAcc,CpuRef"}
|
||||
};
|
||||
|
||||
/*
|
||||
* Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
|
||||
*/
|
||||
std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferredBackends)
|
||||
{
|
||||
std::vector<armnn::BackendId> backends;
|
||||
std::stringstream ss(preferredBackends);
|
||||
|
||||
while (ss.good())
|
||||
{
|
||||
std::string backend;
|
||||
std::getline(ss, backend, ',');
|
||||
backends.emplace_back(backend);
|
||||
}
|
||||
return backends;
|
||||
}
|
||||
|
||||
//Labels for this model
|
||||
std::map<int, std::string> labels =
|
||||
{
|
||||
{0, "silence"},
|
||||
{1, "unknown"},
|
||||
{2, "yes"},
|
||||
{3, "no"},
|
||||
{4, "up"},
|
||||
{5, "down"},
|
||||
{6, "left"},
|
||||
{7, "right"},
|
||||
{8, "on"},
|
||||
{9, "off"},
|
||||
{10, "stop"},
|
||||
{11, "go"}
|
||||
};
|
||||
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
printf("ArmNN major version: %d\n", ARMNN_MAJOR_VERSION);
|
||||
std::map<std::string, std::string> options;
|
||||
|
||||
//Read command line args
|
||||
int result = ParseOptions(options, CMD_OPTIONS, argv, argc);
|
||||
if (result != 0)
|
||||
{
|
||||
return result;
|
||||
}
|
||||
|
||||
// Create the ArmNN inference runner
|
||||
common::PipelineOptions pipelineOptions;
|
||||
pipelineOptions.m_ModelName = "DS_CNN_CLUSTERED_INT8";
|
||||
pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
|
||||
if (CheckOptionSpecified(options, PREFERRED_BACKENDS))
|
||||
{
|
||||
pipelineOptions.m_backends = GetPreferredBackendList(
|
||||
(GetSpecifiedOption(options, PREFERRED_BACKENDS)));
|
||||
}
|
||||
else
|
||||
{
|
||||
pipelineOptions.m_backends = {"CpuAcc", "CpuRef"};
|
||||
}
|
||||
|
||||
kws::IPipelinePtr kwsPipeline = kws::CreatePipeline(pipelineOptions);
|
||||
|
||||
//Extract audio data from sound file
|
||||
auto filePath = GetSpecifiedOption(options, AUDIO_FILE_PATH);
|
||||
std::vector<float> audioData = audio::AudioCapture::LoadAudioFile(filePath);
|
||||
|
||||
audio::AudioCapture capture;
|
||||
//todo: read samples and stride from pipeline
|
||||
capture.InitSlidingWindow(audioData.data(),
|
||||
audioData.size(),
|
||||
kwsPipeline->getInputSamplesSize(),
|
||||
kwsPipeline->getInputSamplesSize()/2);
|
||||
|
||||
//Loop through audio data buffer
|
||||
while (capture.HasNext())
|
||||
{
|
||||
std::vector<float> audioBlock = capture.Next();
|
||||
common::InferenceResults<int8_t> results;
|
||||
|
||||
//Prepare input tensors
|
||||
std::vector<int8_t> preprocessedData = kwsPipeline->PreProcessing(audioBlock);
|
||||
//Run inference
|
||||
kwsPipeline->Inference(preprocessedData, results);
|
||||
//Decode output
|
||||
kwsPipeline->PostProcessing(results, labels,
|
||||
[](int index, std::string& label, float prob) -> void {
|
||||
printf("Keyword \"%s\", index %d:, probability %f\n",
|
||||
label.c_str(),
|
||||
index,
|
||||
prob);
|
||||
});
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
28
arch/arm/ARMnn/samples/KeywordSpotting/test/DecoderTest.cpp
Normal file
28
arch/arm/ARMnn/samples/KeywordSpotting/test/DecoderTest.cpp
Normal file
@ -0,0 +1,28 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <map>
|
||||
#include "Decoder.hpp"
|
||||
|
||||
|
||||
TEST_CASE("Test KWS decoder")
|
||||
{
|
||||
// Actual output probability: [0.0, 0.06, 0.02, 0.03, 0.0, 0.0, 0.05, 0.0, 0.83, 0.0, 0.1, 0.0]
|
||||
// int8 quantised Model output [1, 4, 2, 3, 1, 1, 3, 1, 43, 1, 6, 1]
|
||||
// Reconstructed dequantised probability [0.0, 0.06, 0.02, 0.04, 0.0, 0.0, 0.04, 0.0, 0.84, 0.0, 0.1, 0.0]
|
||||
|
||||
int quantisationOffset = 1;
|
||||
float quantisationScale = 0.02;
|
||||
|
||||
std::vector<int8_t> modelOutput = {1, 4, 2, 3, 1, 1, 3, 1, 43, 1, 6, 1};
|
||||
|
||||
kws::Decoder decoder(quantisationOffset,quantisationScale);
|
||||
|
||||
std::pair<int,float> result = decoder.decodeOutput(modelOutput);
|
||||
|
||||
|
||||
CHECK(result == std::pair<int,float>(8,0.84));
|
||||
}
|
@ -0,0 +1,230 @@
|
||||
//
|
||||
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <map>
|
||||
#include <cinttypes>
|
||||
#include "KeywordSpottingPipeline.hpp"
|
||||
#include "DsCNNPreprocessor.hpp"
|
||||
|
||||
static std::string GetResourceFilePath(const std::string& filename)
|
||||
{
|
||||
std::string testResources = TEST_RESOURCE_DIR;
|
||||
if (testResources.empty())
|
||||
{
|
||||
throw std::invalid_argument("Invalid test resources directory provided");
|
||||
}
|
||||
else
|
||||
{
|
||||
if(testResources.back() != '/')
|
||||
{
|
||||
return testResources + "/" + filename;
|
||||
}
|
||||
else
|
||||
{
|
||||
return testResources + filename;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Test Keyword spotting pipeline")
|
||||
{
|
||||
const int8_t ifm0_kws [] =
|
||||
{
|
||||
-0x1b, 0x4f, 0x7a, -0x55, 0x6, -0x11, 0x6e, -0x6, 0x67, -0x7e, -0xd, 0x6, 0x49, 0x79, -0x1e, 0xe,
|
||||
0x1d, 0x6e, 0x6f, 0x6f, -0x2e, -0x4b, 0x2, -0x3e, 0x40, -0x4b, -0x7, 0x31, -0x38, -0x64, -0x28,
|
||||
0xc, -0x1d, 0xf, 0x1c, 0x5a, -0x4b, 0x56, 0x7e, 0x9, -0x29, 0x13, -0x65, -0xa, 0x34, -0x59, 0x41,
|
||||
-0x6f, 0x75, 0x67, -0x5f, 0x17, 0x4a, -0x76, -0x7a, 0x49, -0x19, -0x41, 0x78, 0x40, 0x44, 0xe,
|
||||
-0x51, -0x5c, 0x3d, 0x24, 0x76, -0x66, -0x11, 0x5e, 0x7b, -0x4, 0x7a, 0x9, 0x13, 0x8, -0x21, -0x11,
|
||||
0x13, 0x7a, 0x25, 0x6, -0x68, 0x6a, -0x30, -0x16, -0x43, -0x27, 0x4c, 0x6b, -0x14, -0x12, -0x5f,
|
||||
0x49, -0x2a, 0x44, 0x57, -0x78, -0x72, 0x62, -0x8, -0x38, -0x73, -0x2, -0x80, 0x79, -0x3f, 0x57,
|
||||
0x9, -0x7e, -0x34, -0x59, 0x19, -0x66, 0x58, -0x3b, -0x69, -0x1a, 0x13, -0x2f, -0x2f, 0x13, 0x35,
|
||||
-0x30, 0x1e, 0x3b, -0x71, 0x67, 0x7d, -0x5d, 0x1a, 0x69, -0x53, -0x38, -0xf, 0x76, 0x2, 0x7e, 0x45,
|
||||
-0xa, 0x59, -0x6b, -0x28, -0x5d, -0x63, -0x7d, -0x3, 0x48, 0x74, -0x75, -0x7a, 0x1f, -0x53, 0x5b,
|
||||
0x4d, -0x18, -0x4a, 0x39, -0x52, 0x5a, -0x6b, -0x41, -0x3e, -0x61, -0x80, -0x52, 0x67, 0x71, -0x47,
|
||||
0x79, -0x41, 0x3a, -0x8, -0x1f, 0x4d, -0x7, 0x5b, 0x6b, -0x1b, -0x8, -0x20, -0x21, 0x7c, -0x74,
|
||||
0x25, -0x68, -0xe, -0x7e, -0x45, -0x28, 0x45, -0x1a, -0x39, 0x78, 0x11, 0x48, -0x6b, -0x7b, -0x43,
|
||||
-0x21, 0x38, 0x46, 0x7c, -0x5d, 0x59, 0x53, -0x3f, -0x15, 0x59, -0x17, 0x75, 0x2f, 0x7c, 0x68, 0x6a,
|
||||
0x0, -0x10, 0x5b, 0x61, 0x36, -0x41, 0x33, 0x23, -0x80, -0x1d, -0xb, -0x56, 0x2d, 0x68, -0x68,
|
||||
0x2f, 0x48, -0x5d, -0x44, 0x64, -0x27, 0x68, -0x13, 0x39, -0x3f, 0x18, 0x31, 0x15, -0x78, -0x2,
|
||||
0x72, 0x60, 0x59, -0x30, -0x22, 0x73, 0x61, 0x76, -0x4, -0x62, -0x64, -0x80, -0x32, -0x16, 0x51,
|
||||
-0x2, -0x70, 0x71, 0x3f, -0x5f, -0x35, -0x3c, 0x79, 0x48, 0x61, 0x5b, -0x20, -0x1e, -0x68, -0x1c,
|
||||
0x6c, 0x3a, 0x28, -0x36, -0x3e, 0x5f, -0x75, -0x73, 0x1e, 0x75, -0x66, -0x22, 0x20, -0x64, 0x67,
|
||||
0x36, 0x14, 0x37, -0xa, -0xe, 0x8, -0x37, -0x43, 0x21, -0x8, 0x54, 0x1, 0x34, -0x2c, -0x73, -0x11,
|
||||
-0x48, -0x1c, -0x40, 0x14, 0x4e, -0x53, 0x25, 0x5e, 0x14, 0x4f, 0x7c, 0x6d, -0x61, -0x38, 0x35,
|
||||
-0x5a, -0x44, 0x12, 0x52, -0x60, 0x22, -0x1c, -0x8, -0x4, -0x6b, -0x71, 0x43, 0xb, 0x7b, -0x7,
|
||||
-0x3c, -0x3b, -0x40, -0xd, 0x44, 0x6, 0x30, 0x38, 0x57, 0x1f, -0x7, 0x2, 0x4f, 0x64, 0x7c, -0x3,
|
||||
-0x13, -0x71, -0x45, -0x53, -0x52, 0x2b, -0x11, -0x1d, -0x2, -0x29, -0x37, 0x3d, 0x19, 0x76, 0x18,
|
||||
0x1d, 0x12, -0x29, -0x5e, -0x54, -0x48, 0x5d, -0x41, -0x3f, 0x7e, -0x2a, 0x41, 0x57, -0x65, -0x15,
|
||||
0x12, 0x1f, -0x57, 0x79, -0x64, 0x3a, -0x2f, 0x7f, -0x6c, 0xa, 0x52, -0x1f, -0x41, 0x6e, -0x4b,
|
||||
0x3d, -0x1b, -0x42, 0x22, -0x3c, -0x35, -0xf, 0xc, 0x32, -0x15, -0x68, -0x21, 0x0, -0x16, 0x14,
|
||||
-0x10, -0x5b, 0x2f, 0x21, 0x41, -0x8, -0x12, -0xa, 0x10, 0xf, 0x7e, -0x76, -0x1d, 0x2b, -0x49,
|
||||
0x42, -0x25, -0x78, -0x69, -0x2c, 0x3f, 0xc, 0x52, 0x6d, 0x2e, -0x13, 0x76, 0x37, -0x36, -0x51,
|
||||
-0x5, -0x63, -0x4f, 0x1c, 0x6b, -0x4b, 0x71, -0x12, 0x72, -0x3f,-0x4a, 0xf, 0x3a, -0xd, 0x38, 0x3b,
|
||||
-0x5d, 0x75, -0x43, -0x10, -0xa, -0x7a, 0x1a, -0x44, 0x1c, 0x6a, 0x43, -0x1b, -0x35, 0x7d, -0x2c,
|
||||
-0x10, 0x5b, -0x42, -0x4f, 0x69, 0x1f, 0x1b, -0x64, -0x21, 0x19, -0x5d, 0x2e, -0x2a, -0x65, -0x13,
|
||||
-0x70, -0x6e
|
||||
};
|
||||
|
||||
const int8_t ofm0_kws [] =
|
||||
{
|
||||
-0x80, 0x7f, -0x80, -0x80, -0x80, -0x80, -0x80, -0x80, -0x80, -0x80, -0x80, -0x80
|
||||
};
|
||||
|
||||
// First 640 samples from yes.wav.
|
||||
std::vector<int16_t> testWav = std::vector<int16_t>
|
||||
{
|
||||
139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
|
||||
165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
|
||||
132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
|
||||
183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
|
||||
193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
|
||||
197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
|
||||
157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
|
||||
194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
|
||||
200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
|
||||
172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
|
||||
177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
|
||||
233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
|
||||
176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
|
||||
184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
|
||||
132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
|
||||
161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
|
||||
101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
|
||||
148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
|
||||
125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
|
||||
93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
|
||||
110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
|
||||
139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
|
||||
96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
|
||||
111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
|
||||
85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
|
||||
109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
|
||||
142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
|
||||
68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
|
||||
92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
|
||||
118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
|
||||
86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
|
||||
81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
|
||||
69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
|
||||
145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
|
||||
85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
|
||||
93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
|
||||
129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
|
||||
95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
|
||||
107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
|
||||
65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
|
||||
24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
|
||||
60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
|
||||
19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
|
||||
50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
|
||||
51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
|
||||
-2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
|
||||
2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
|
||||
-9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
|
||||
-10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
|
||||
34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
|
||||
-2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
|
||||
-42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
|
||||
-69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
|
||||
-43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
|
||||
-40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
|
||||
-56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
|
||||
-62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
|
||||
-52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
|
||||
-51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
|
||||
-42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
|
||||
-93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
|
||||
-25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
|
||||
-78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
|
||||
-40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
|
||||
};
|
||||
|
||||
// Golden audio ops mfcc output for the above wav.
|
||||
const std::vector<float> testWavMfcc
|
||||
{
|
||||
-22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
|
||||
};
|
||||
|
||||
std::vector<float> testWavFloat(640);
|
||||
constexpr float normaliser = 1.0/(1u<<15u);
|
||||
std::transform(testWav.begin(), testWav.end(), testWavFloat.begin(),
|
||||
std::bind1st(std::multiplies<float>(), normaliser));
|
||||
|
||||
const float DsCNNInputQuantizationScale = 1.107164;
|
||||
const int DsCNNInputQuantizationOffset = 95;
|
||||
|
||||
std::map<int,std::string> labels =
|
||||
{
|
||||
{0,"silence"},
|
||||
{1, "unknown"},
|
||||
{ 2, "yes"},
|
||||
{ 3,"no"},
|
||||
{ 4, "up"},
|
||||
{ 5, "down"},
|
||||
{ 6, "left"},
|
||||
{ 7, "right"},
|
||||
{ 8, "on"},
|
||||
{ 9, "off"},
|
||||
{ 10, "stop"},
|
||||
{11, "go"}
|
||||
};
|
||||
common::PipelineOptions options;
|
||||
options.m_ModelFilePath = GetResourceFilePath("ds_cnn_clustered_int8.tflite");
|
||||
options.m_ModelName = "DS_CNN_CLUSTERED_INT8";
|
||||
options.m_backends = {"CpuAcc", "CpuRef"};
|
||||
kws::IPipelinePtr kwsPipeline = kws::CreatePipeline(options);
|
||||
|
||||
CHECK(kwsPipeline->getInputSamplesSize() == 16000);
|
||||
std::vector<int8_t> expectedWavMfcc;
|
||||
for(auto& i : testWavMfcc)
|
||||
{
|
||||
expectedWavMfcc.push_back(
|
||||
(i + DsCNNInputQuantizationScale * DsCNNInputQuantizationOffset) / DsCNNInputQuantizationScale);
|
||||
}
|
||||
|
||||
SECTION("Pre-processing")
|
||||
{
|
||||
testWavFloat.resize(16000);
|
||||
expectedWavMfcc.resize(49 * 10);
|
||||
std::vector<int8_t> preprocessedData = kwsPipeline->PreProcessing(testWavFloat);
|
||||
CHECK(preprocessedData.size() == expectedWavMfcc.size());
|
||||
for(int i = 0; i < 10; ++i)
|
||||
{
|
||||
CHECK(expectedWavMfcc[i] == Approx(preprocessedData[i]).margin(1));
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("Execute inference")
|
||||
{
|
||||
common::InferenceResults<int8_t> result;
|
||||
std::vector<int8_t> IFM(std::begin(ifm0_kws), std::end(ifm0_kws));
|
||||
kwsPipeline->Inference(IFM, result);
|
||||
std::vector<int8_t> OFM(std::begin(ofm0_kws), std::end(ofm0_kws));
|
||||
|
||||
CHECK(1 == result.size());
|
||||
CHECK(OFM.size() == result[0].size());
|
||||
|
||||
int count = 0;
|
||||
for (auto& i : result)
|
||||
{
|
||||
for (signed char& j : i)
|
||||
{
|
||||
CHECK(j == OFM[count++]);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("Convert inference result to keyword")
|
||||
{
|
||||
std::vector< std::vector< int8_t >> modelOutput = {{1, 4, 2, 3, 1, 1, 3, 1, 43, 1, 6, 1}};
|
||||
kwsPipeline->PostProcessing(modelOutput, labels,
|
||||
[](int index, std::string& label, float prob) -> void {
|
||||
CHECK(index == 8);
|
||||
CHECK(label == "on");
|
||||
});
|
||||
}
|
||||
}
|
71
arch/arm/ARMnn/samples/ObjectDetection/CMakeLists.txt
Normal file
71
arch/arm/ARMnn/samples/ObjectDetection/CMakeLists.txt
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
cmake_minimum_required(VERSION 3.0.2)
|
||||
|
||||
set(CMAKE_C_STANDARD 99)
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
|
||||
# Make the standard a requirement => prevent fallback to previous
|
||||
# supported standard
|
||||
set(CMAKE_C_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
# We want to pass standard C/C++ flags, without gnu extensions
|
||||
set(CMAKE_C_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
project (object_detection_example)
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-DDEBUG -O0 -g -fPIC")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-DNDEBUG -O3 -fPIC")
|
||||
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-DDEBUG -O0 -g -fPIC")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O3 -fPIC")
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
# Build in release mode by default
|
||||
if (NOT CMAKE_BUILD_TYPE STREQUAL Debug)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "")
|
||||
endif()
|
||||
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
|
||||
if (NOT DEFINED DEPENDENCIES_DIR)
|
||||
set(DEPENDENCIES_DIR ${CMAKE_BINARY_DIR}/dependencies)
|
||||
endif()
|
||||
|
||||
include(../common/cmake/find_opencv.cmake)
|
||||
include(../common/cmake/find_armnn.cmake)
|
||||
|
||||
include_directories(include)
|
||||
include_directories(../common/include/ArmnnUtils)
|
||||
include_directories(../common/include/Utils)
|
||||
include_directories(../common/include/CVUtils)
|
||||
|
||||
file(GLOB SOURCES "src/*.cpp")
|
||||
file(GLOB CVUTILS_SOURCES "../common/src/CVUtils**/*.cpp")
|
||||
file(GLOB UTILS_SOURCES "../common/src/Utils**/*.cpp")
|
||||
list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
|
||||
file(GLOB TEST_SOURCES "test/*.cpp")
|
||||
file(GLOB APP_MAIN "src/Main.cpp")
|
||||
|
||||
if(BUILD_UNIT_TESTS)
|
||||
include(cmake/unit_tests.cmake)
|
||||
endif()
|
||||
|
||||
|
||||
set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
|
||||
|
||||
add_executable("${APP_TARGET_NAME}" ${SOURCES} ${CVUTILS_SOURCES} ${UTILS_SOURCES} ${APP_MAIN})
|
||||
|
||||
if (NOT OPENCV_LIBS_FOUND)
|
||||
message("Building OpenCV libs")
|
||||
add_dependencies("${APP_TARGET_NAME}" "${OPENCV_LIB}")
|
||||
endif()
|
||||
|
||||
target_link_libraries("${APP_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} ${OPENCV_LIBS})
|
||||
target_include_directories("${APP_TARGET_NAME}" PUBLIC ${ARMNN_INCLUDE_DIR} ${OPENCV_INCLUDE_DIR})
|
453
arch/arm/ARMnn/samples/ObjectDetection/Readme.md
Normal file
453
arch/arm/ARMnn/samples/ObjectDetection/Readme.md
Normal file
@ -0,0 +1,453 @@
|
||||
# Object Detection Example
|
||||
|
||||
## Introduction
|
||||
This is a sample code showing object detection using Arm NN public C++ API. The compiled application can take
|
||||
|
||||
* a video file
|
||||
|
||||
as input and
|
||||
* save a video file
|
||||
* or output video stream to the window
|
||||
|
||||
with detections shown in bounding boxes, class labels and confidence.
|
||||
|
||||
## Dependencies
|
||||
|
||||
This example utilises OpenCV functions to capture and output video data. Top level inference API is provided by Arm NN
|
||||
library.
|
||||
|
||||
### Arm NN
|
||||
|
||||
Object detection example build system does not trigger Arm NN compilation. Thus, before building the application,
|
||||
please ensure that Arm NN libraries and header files are available on your build platform.
|
||||
The application executable binary dynamically links with the following Arm NN libraries:
|
||||
* libarmnn.so
|
||||
* libarmnnTfLiteParser.so
|
||||
|
||||
The build script searches for available Arm NN libraries in the following order:
|
||||
1. Inside custom user directory specified by ARMNN_LIB_DIR cmake option.
|
||||
2. Inside the current Arm NN repository, assuming that Arm NN was built following [this instructions](../../BuildGuideCrossCompilation.md).
|
||||
3. Inside default locations for system libraries, assuming Arm NN was installed from deb packages.
|
||||
|
||||
Arm NN header files will be searched in parent directory of found libraries files under `include` directory, i.e.
|
||||
libraries found in `/usr/lib` or `/usr/lib64` and header files in `/usr/include` (or `${ARMNN_LIB_DIR}/include`).
|
||||
|
||||
Please see [find_armnn.cmake](./cmake/find_armnn.cmake) for implementation details.
|
||||
|
||||
### OpenCV
|
||||
|
||||
This application uses [OpenCV (Open Source Computer Vision Library)](https://opencv.org/) for video stream processing.
|
||||
Your host platform may have OpenCV available through linux package manager. If this is the case, please install it using
|
||||
standard way. If not, our build system has a script to download and cross-compile required OpenCV modules
|
||||
as well as [FFMPEG](https://ffmpeg.org/) and [x264 encoder](https://www.videolan.org/developers/x264.html) libraries.
|
||||
The latter will build limited OpenCV functionality and application will support only video file input and video file output
|
||||
way of working. Displaying video frames in a window requires building OpenCV with GTK and OpenGL support.
|
||||
|
||||
The application executable binary dynamically links with the following OpenCV libraries:
|
||||
* libopencv_core.so.4.0.0
|
||||
* libopencv_imgproc.so.4.0.0
|
||||
* libopencv_imgcodecs.so.4.0.0
|
||||
* libopencv_videoio.so.4.0.0
|
||||
* libopencv_video.so.4.0.0
|
||||
* libopencv_highgui.so.4.0.0
|
||||
|
||||
and transitively depends on:
|
||||
* libavcodec.so (FFMPEG)
|
||||
* libavformat.so (FFMPEG)
|
||||
* libavutil.so (FFMPEG)
|
||||
* libswscale.so (FFMPEG)
|
||||
* libx264.so (x264)
|
||||
|
||||
The application searches for above libraries in the following order:
|
||||
1. Inside custom user directory specified by OPENCV_LIB_DIR cmake option.
|
||||
2. Inside default locations for system libraries.
|
||||
|
||||
If no OpenCV libraries were found, the cross-compilation build is extended with x264, ffmpeg and OpenCV compilation steps.
|
||||
|
||||
Note: Native build does not add third party libraries to compilation.
|
||||
|
||||
Please see [find_opencv.cmake](./cmake/find_opencv.cmake) for implementation details.
|
||||
|
||||
## Building
|
||||
There are two flows for building this application:
|
||||
* native build on a host platform,
|
||||
* cross-compilation for a Arm-based host platform.
|
||||
|
||||
### Build Options
|
||||
|
||||
* CMAKE_TOOLCHAIN_FILE - choose one of the available cross-compilation toolchain files:
|
||||
* `cmake/aarch64-toolchain.cmake`
|
||||
* `cmake/arm-linux-gnueabihf-toolchain.cmake`
|
||||
* ARMNN_LIB_DIR - point to the custom location of the Arm NN libs and headers.
|
||||
* OPENCV_LIB_DIR - point to the custom location of the OpenCV libs and headers.
|
||||
* BUILD_UNIT_TESTS - set to `1` to build tests. Additionally to the main application, `object_detection_example-tests`
|
||||
unit tests executable will be created.
|
||||
|
||||
### Native Build
|
||||
To build this application on a host platform, firstly ensure that required dependencies are installed:
|
||||
For example, for raspberry PI:
|
||||
```commandline
|
||||
sudo apt-get update
|
||||
sudo apt-get -yq install pkg-config
|
||||
sudo apt-get -yq install libgtk2.0-dev zlib1g-dev libjpeg-dev libpng-dev libxvidcore-dev libx264-dev
|
||||
sudo apt-get -yq install libavcodec-dev libavformat-dev libswscale-dev
|
||||
```
|
||||
|
||||
To build demo application, create a build directory:
|
||||
```commandline
|
||||
mkdir build
|
||||
cd build
|
||||
```
|
||||
If you have already installed Arm NN and OpenCV:
|
||||
|
||||
Inside build directory, run cmake and make commands:
|
||||
```commandline
|
||||
cmake ..
|
||||
make
|
||||
```
|
||||
This will build the following in bin directory:
|
||||
* object_detection_example - application executable
|
||||
|
||||
If you have custom Arm NN and OpenCV location, use `OPENCV_LIB_DIR` and `ARMNN_LIB_DIR` options:
|
||||
```commandline
|
||||
cmake -DARMNN_LIB_DIR=/path/to/armnn -DOPENCV_LIB_DIR=/path/to/opencv ..
|
||||
make
|
||||
```
|
||||
|
||||
### Cross-compilation
|
||||
|
||||
This section will explain how to cross-compile the application and dependencies on a Linux x86 machine
|
||||
for arm host platforms.
|
||||
|
||||
You will require working cross-compilation toolchain supported by your host platform. For raspberry Pi 3 and 4 with glibc
|
||||
runtime version 2.28, the following toolchains were successfully used:
|
||||
* https://releases.linaro.org/components/toolchain/binaries/latest-7/aarch64-linux-gnu/
|
||||
* https://releases.linaro.org/components/toolchain/binaries/latest-7/arm-linux-gnueabihf/
|
||||
|
||||
Choose aarch64-linux-gnu if `lscpu` command shows architecture as aarch64 or arm-linux-gnueabihf if detected
|
||||
architecture is armv71.
|
||||
|
||||
You can check runtime version on your host platform by running:
|
||||
```
|
||||
ldd --version
|
||||
```
|
||||
On **build machine**, install C and C++ cross compiler toolchains and add them to the PATH variable.
|
||||
|
||||
Install package dependencies:
|
||||
```commandline
|
||||
sudo apt-get update
|
||||
sudo apt-get -yq install pkg-config
|
||||
```
|
||||
Package config is required by OpenCV build to discover FFMPEG libs.
|
||||
|
||||
To build demo application, create a build directory:
|
||||
```commandline
|
||||
mkdir build
|
||||
cd build
|
||||
```
|
||||
Inside build directory, run cmake and make commands:
|
||||
|
||||
**Arm 32bit**
|
||||
```commandline
|
||||
cmake -DARMNN_LIB_DIR=<path-to-armnn-libs> -DCMAKE_TOOLCHAIN_FILE=cmake/arm-linux-gnueabihf-toolchain.cmake ..
|
||||
make
|
||||
```
|
||||
**Arm 64bit**
|
||||
```commandline
|
||||
cmake -DARMNN_LIB_DIR=<path-to-armnn-libs> -DCMAKE_TOOLCHAIN_FILE=cmake/aarch64-toolchain.cmake ..
|
||||
make
|
||||
```
|
||||
|
||||
Add `-j` flag to the make command to run compilation in multiple threads.
|
||||
|
||||
From the build directory, copy the following to the host platform:
|
||||
* bin directory - contains object_detection_example executable,
|
||||
* lib directory - contains cross-compiled OpenCV, ffmpeg, x264 libraries,
|
||||
* Your Arm NN libs used during compilation.
|
||||
|
||||
The full list of libs after cross-compilation to copy on your board:
|
||||
```
|
||||
libarmnn.so
|
||||
libarmnn.so.28
|
||||
libarmnn.so.28.0
|
||||
libarmnnTfLiteParser.so
|
||||
libarmnnTfLiteParser.so.24.4
|
||||
libavcodec.so
|
||||
libavcodec.so.58
|
||||
libavcodec.so.58.54.100
|
||||
libavdevice.so
|
||||
libavdevice.so.58
|
||||
libavdevice.so.58.8.100
|
||||
libavfilter.so
|
||||
libavfilter.so.7
|
||||
libavfilter.so.7.57.100
|
||||
libavformat.so
|
||||
libavformat.so.58
|
||||
libavformat.so.58.29.100
|
||||
libavutil.so
|
||||
libavutil.so.56
|
||||
libavutil.so.56.31.100
|
||||
libopencv_core.so
|
||||
libopencv_core.so.4.0
|
||||
libopencv_core.so.4.0.0
|
||||
libopencv_highgui.so
|
||||
libopencv_highgui.so.4.0
|
||||
libopencv_highgui.so.4.0.0
|
||||
libopencv_imgcodecs.so
|
||||
libopencv_imgcodecs.so.4.0
|
||||
libopencv_imgcodecs.so.4.0.0
|
||||
libopencv_imgproc.so
|
||||
libopencv_imgproc.so.4.0
|
||||
libopencv_imgproc.so.4.0.0
|
||||
libopencv_video.so
|
||||
libopencv_video.so.4.0
|
||||
libopencv_video.so.4.0.0
|
||||
libopencv_videoio.so
|
||||
libopencv_videoio.so.4.0
|
||||
libopencv_videoio.so.4.0.0
|
||||
libpostproc.so
|
||||
libpostproc.so.55
|
||||
libpostproc.so.55.5.100
|
||||
libswresample.a
|
||||
libswresample.so
|
||||
libswresample.so.3
|
||||
libswresample.so.3.5.100
|
||||
libswscale.so
|
||||
libswscale.so.5
|
||||
libswscale.so.5.5.100
|
||||
libx264.so
|
||||
libx264.so.160
|
||||
```
|
||||
## Executing
|
||||
|
||||
Once the application executable is built, it can be executed with the following options:
|
||||
* --video-file-path: Path to the video file to run object detection on **[REQUIRED]**
|
||||
* --model-file-path: Path to the Object Detection model to use **[REQUIRED]**
|
||||
* --label-path: Path to the label set for the provided model file **[REQUIRED]**
|
||||
* --model-name: The name of the model being used. Accepted options: SSD_MOBILE | YOLO_V3_TINY **[REQUIRED]**
|
||||
* --output-video-file-path: Path to the output video file with detections added in. Defaults to /tmp/output.avi
|
||||
**[OPTIONAL]**
|
||||
* --preferred-backends: Takes the preferred backends in preference order, separated by comma.
|
||||
For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc].
|
||||
Defaults to CpuRef **[OPTIONAL]**
|
||||
|
||||
### Object Detection on a supplied video file
|
||||
|
||||
To run object detection on a supplied video file and output result to a video file:
|
||||
```commandline
|
||||
LD_LIBRARY_PATH=/path/to/armnn/libs:/path/to/opencv/libs ./object_detection_example --label-path /path/to/labels/file
|
||||
--video-file-path /path/to/video/file --model-file-path /path/to/model/file
|
||||
--model-name [YOLO_V3_TINY | SSD_MOBILE] --output-video-file-path /path/to/output/file
|
||||
```
|
||||
|
||||
To run object detection on a supplied video file and output result to a window gui:
|
||||
```commandline
|
||||
LD_LIBRARY_PATH=/path/to/armnn/libs:/path/to/opencv/libs ./object_detection_example --label-path /path/to/labels/file
|
||||
--video-file-path /path/to/video/file --model-file-path /path/to/model/file
|
||||
--model-name [YOLO_V3_TINY | SSD_MOBILE]
|
||||
```
|
||||
|
||||
This application has been verified to work against the MobileNet SSD and the YOLO V3 tiny models, which can be downloaded along with their label sets from the Arm Model Zoo:
|
||||
* https://github.com/ARM-software/ML-zoo/tree/master/models/object_detection/ssd_mobilenet_v1
|
||||
* https://github.com/ARM-software/ML-zoo/tree/master/models/object_detection/yolo_v3_tiny
|
||||
|
||||
---
|
||||
|
||||
# Application Overview
|
||||
This section provides a walkthrough of the application, explaining in detail the steps:
|
||||
1. Initialisation
|
||||
1. Reading from Video Source
|
||||
2. Preparing Labels and Model Specific Functions
|
||||
2. Creating a Network
|
||||
1. Creating Parser and Importing Graph
|
||||
3. Optimizing Graph for Compute Device
|
||||
4. Creating Input and Output Binding Information
|
||||
3. Object detection pipeline
|
||||
1. Pre-processing the Captured Frame
|
||||
2. Making Input and Output Tensors
|
||||
3. Executing Inference
|
||||
4. Postprocessing
|
||||
5. Decoding and Processing Inference Output
|
||||
6. Drawing Bounding Boxes
|
||||
|
||||
|
||||
### Initialisation
|
||||
|
||||
##### Reading from Video Source
|
||||
After parsing user arguments, the chosen video file or stream is loaded into an OpenCV `cv::VideoCapture` object.
|
||||
We use [`IFrameReader`](./include/IFrameReader.hpp) interface and OpenCV specific implementation
|
||||
[`CvVideoFrameReader`](./include/CvVideoFrameReader.hpp) in our main function to capture frames from the source using the
|
||||
`ReadFrame()` function.
|
||||
|
||||
The `CvVideoFrameReader` object also tells us information about the input video. Using this information and application
|
||||
arguments, we create one of the implementations of the [`IFrameOutput`](./include/IFrameOutput.hpp) interface:
|
||||
[`CvVideoFileWriter`](./include/CvVideoFileWriter.hpp) or [`CvWindowOutput`](./include/CvWindowOutput.hpp).
|
||||
This object will be used at the end of every loop to write the processed frame to an output video file or gui
|
||||
window.
|
||||
`CvVideoFileWriter` uses `cv::VideoWriter` with ffmpeg backend. `CvWindowOutput` makes use of `cv::imshow()` function.
|
||||
|
||||
See `GetFrameSourceAndSink` function in [Main.cpp](./src/Main.cpp) for more details.
|
||||
|
||||
##### Preparing Labels and Model Specific Functions
|
||||
In order to interpret the result of running inference on the loaded network, it is required to load the labels
|
||||
associated with the model. In the provided example code, the `AssignColourToLabel` function creates a vector of pairs
|
||||
label - colour that is ordered according to object class index at the output node of the model. Labels are assigned with
|
||||
a randomly generated RGB color. This ensures that each class has a unique color which will prove helpful when plotting
|
||||
the bounding boxes of various detected objects in a frame.
|
||||
|
||||
Depending on the model being used, `CreatePipeline` function returns specific implementation of the object detection
|
||||
pipeline.
|
||||
|
||||
### Creating a Network
|
||||
|
||||
All operations with Arm NN and networks are encapsulated in [`ArmnnNetworkExecutor`](./include/ArmnnNetworkExecutor.hpp)
|
||||
class.
|
||||
|
||||
##### Creating Parser and Importing Graph
|
||||
The first step with Arm NN SDK is to import a graph from file by using the appropriate parser.
|
||||
|
||||
The Arm NN SDK provides parsers for reading graphs from a variety of model formats. In our application we specifically
|
||||
focus on `.tflite, .pb, .onnx` models.
|
||||
|
||||
Based on the extension of the provided model file, the corresponding parser is created and the network file loaded with
|
||||
`CreateNetworkFromBinaryFile()` method. The parser will handle the creation of the underlying Arm NN graph.
|
||||
|
||||
Current example accepts tflite format model files, we use `ITfLiteParser`:
|
||||
```c++
|
||||
#include "armnnTfLiteParser/ITfLiteParser.hpp"
|
||||
|
||||
armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
|
||||
armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
|
||||
```
|
||||
|
||||
##### Optimizing Graph for Compute Device
|
||||
Arm NN supports optimized execution on multiple CPU and GPU devices. Prior to executing a graph, we must select the
|
||||
appropriate device context. We do this by creating a runtime context with default options with `IRuntime()`.
|
||||
|
||||
For example:
|
||||
```c++
|
||||
#include "armnn/ArmNN.hpp"
|
||||
|
||||
auto runtime = armnn::IRuntime::Create(armnn::IRuntime::CreationOptions());
|
||||
```
|
||||
|
||||
We can optimize the imported graph by specifying a list of backends in order of preference and implement
|
||||
backend-specific optimizations. The backends are identified by a string unique to the backend,
|
||||
for example `CpuAcc, GpuAcc, CpuRef`.
|
||||
|
||||
For example:
|
||||
```c++
|
||||
std::vector<armnn::BackendId> backends{"CpuAcc", "GpuAcc", "CpuRef"};
|
||||
```
|
||||
|
||||
Internally and transparently, Arm NN splits the graph into subgraph based on backends, it calls a optimize subgraphs
|
||||
function on each of them and, if possible, substitutes the corresponding subgraph in the original graph with
|
||||
its optimized version.
|
||||
|
||||
Using the `Optimize()` function we optimize the graph for inference and load the optimized network onto the compute
|
||||
device with `LoadNetwork()`. This function creates the backend-specific workloads
|
||||
for the layers and a backend specific workload factory which is called to create the workloads.
|
||||
|
||||
For example:
|
||||
```c++
|
||||
armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
|
||||
backends,
|
||||
m_Runtime->GetDeviceSpec(),
|
||||
armnn::OptimizerOptions());
|
||||
std::string errorMessage;
|
||||
runtime->LoadNetwork(0, std::move(optNet), errorMessage));
|
||||
std::cerr << errorMessage << std::endl;
|
||||
```
|
||||
|
||||
##### Creating Input and Output Binding Information
|
||||
Parsers can also be used to extract the input information for the network. By calling `GetSubgraphInputTensorNames`
|
||||
we extract all the input names and, with `GetNetworkInputBindingInfo`, bind the input points of the graph.
|
||||
For example:
|
||||
```c++
|
||||
std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
|
||||
auto inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
|
||||
```
|
||||
The input binding information contains all the essential information about the input. It is a tuple consisting of
|
||||
integer identifiers for bindable layers (inputs, outputs) and the tensor info (data type, quantization information,
|
||||
number of dimensions, total number of elements).
|
||||
|
||||
Similarly, we can get the output binding information for an output layer by using the parser to retrieve output
|
||||
tensor names and calling `GetNetworkOutputBindingInfo()`.
|
||||
|
||||
### Object detection pipeline
|
||||
|
||||
Generic object detection pipeline has 3 steps to perform data pre-processing, run inference and decode inference results
|
||||
in the post-processing step.
|
||||
|
||||
See [`ObjDetectionPipeline`](include/ObjectDetectionPipeline.hpp) and implementations for [`MobileNetSSDv1`](include/ObjectDetectionPipeline.hpp)
|
||||
and [`YoloV3Tiny`](include/ObjectDetectionPipeline.hpp) for more details.
|
||||
|
||||
#### Pre-processing the Captured Frame
|
||||
Each frame captured from source is read as an `cv::Mat` in BGR format but channels are swapped to RGB in a frame reader
|
||||
code.
|
||||
|
||||
```c++
|
||||
cv::Mat processed;
|
||||
...
|
||||
objectDetectionPipeline->PreProcessing(frame, processed);
|
||||
```
|
||||
|
||||
A pre-processing step consists of resizing the frame to the required resolution, padding and doing data type conversion
|
||||
to match the model input layer.
|
||||
For example, SSD MobileNet V1 that is used in our example takes for input a tensor with shape `[1, 300, 300, 3]` and
|
||||
data type `uint8`.
|
||||
|
||||
Pre-processing step returns `cv::Mat` object containing data ready for inference.
|
||||
|
||||
#### Executing Inference
|
||||
```c++
|
||||
od::InferenceResults results;
|
||||
...
|
||||
objectDetectionPipeline->Inference(processed, results);
|
||||
```
|
||||
Inference step will call `ArmnnNetworkExecutor::Run` method that will prepare input tensors and execute inference.
|
||||
A compute device performs inference for the loaded network using the `EnqueueWorkload()` function of the runtime context.
|
||||
For example:
|
||||
```c++
|
||||
//const void* inputData = ...;
|
||||
//outputTensors were pre-allocated before
|
||||
|
||||
armnn::InputTensors inputTensors = {{ inputBindingInfo.first,armnn::ConstTensor(inputBindingInfo.second, inputData)}};
|
||||
runtime->EnqueueWorkload(0, inputTensors, outputTensors);
|
||||
```
|
||||
We allocate memory for output data once and map it to output tensor objects. After successful inference, we read data
|
||||
from the pre-allocated output data buffer. See [`ArmnnNetworkExecutor::ArmnnNetworkExecutor`](./src/ArmnnNetworkExecutor.cpp)
|
||||
and [`ArmnnNetworkExecutor::Run`](./src/ArmnnNetworkExecutor.cpp) for more details.
|
||||
|
||||
#### Postprocessing
|
||||
|
||||
##### Decoding and Processing Inference Output
|
||||
The output from inference must be decoded to obtain information about detected objects in the frame. In the examples
|
||||
there are implementations for two networks but you may also implement your own network decoding solution here.
|
||||
|
||||
For SSD MobileNet V1 models, we decode the results to obtain the bounding box positions, classification index,
|
||||
confidence and number of detections in the input frame.
|
||||
See [`SSDResultDecoder`](./include/SSDResultDecoder.hpp) for more details.
|
||||
|
||||
For YOLO V3 Tiny models, we decode the output and perform non-maximum suppression to filter out any weak detections
|
||||
below a confidence threshold and any redudant bounding boxes above an intersection-over-union threshold.
|
||||
See [`YoloResultDecoder`](./include/YoloResultDecoder.hpp) for more details.
|
||||
|
||||
It is encouraged to experiment with threshold values for confidence and intersection-over-union (IoU)
|
||||
to achieve the best visual results.
|
||||
|
||||
The detection results are always returned as a vector of [`DetectedObject`](./include/DetectedObject.hpp),
|
||||
with the box positions list containing bounding box coordinates in the form `[x_min, y_min, x_max, y_max]`.
|
||||
|
||||
#### Drawing Bounding Boxes
|
||||
Post-processing step accepts a callback function to be invoked when the decoding is finished. We will use it
|
||||
to draw detections on the initial frame.
|
||||
With the obtained detections and using [`AddInferenceOutputToFrame`](./src/ImageUtils.cpp) function, we are able to draw bounding boxes around
|
||||
detected objects and add the associated label and confidence score.
|
||||
```c++
|
||||
//results - inference output
|
||||
objectDetectionPipeline->PostProcessing(results, [&frame, &labels](od::DetectedObjects detects) -> void {
|
||||
AddInferenceOutputToFrame(detects, *frame, labels);
|
||||
});
|
||||
```
|
||||
The processed frames are written to a file or displayed in a separate window.
|
@ -0,0 +1,64 @@
|
||||
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
set(TEST_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/test/resources)
|
||||
file(MAKE_DIRECTORY ${TEST_RESOURCES_DIR})
|
||||
add_definitions (-DTEST_RESOURCE_DIR="${TEST_RESOURCES_DIR}")
|
||||
set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-tests")
|
||||
|
||||
file(GLOB TEST_SOURCES "test/*")
|
||||
|
||||
include(../common/cmake/find_catch.cmake)
|
||||
|
||||
ExternalProject_Add(basketball-image
|
||||
URL https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/basketball1.png
|
||||
DOWNLOAD_NO_EXTRACT 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/basketball1.png ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
ExternalProject_Add(messi
|
||||
URL https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/messi5.jpg
|
||||
DOWNLOAD_NO_EXTRACT 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/messi5.jpg ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
ExternalProject_Add(vtest
|
||||
URL https://raw.githubusercontent.com/opencv/opencv/4.0.0/samples/data/Megamind.avi
|
||||
DOWNLOAD_NO_EXTRACT 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/Megamind.avi ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
ExternalProject_Add(ssd_mobile
|
||||
URL https://github.com/ARM-software/ML-zoo/raw/master/models/object_detection/ssd_mobilenet_v1/tflite_uint8/ssd_mobilenet_v1.tflite
|
||||
DOWNLOAD_NO_EXTRACT 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/ssd_mobilenet_v1.tflite ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
add_executable("${TEST_TARGET_NAME}" ${SOURCES} ${TEST_SOURCES} ${CVUTILS_SOURCES} ${UTILS_SOURCES})
|
||||
|
||||
add_dependencies(
|
||||
"${TEST_TARGET_NAME}"
|
||||
"catch2-headers"
|
||||
"vtest"
|
||||
"messi"
|
||||
"basketball-image"
|
||||
)
|
||||
|
||||
if (NOT OPENCV_LIBS_FOUND)
|
||||
message("Building OpenCV libs")
|
||||
add_dependencies("${TEST_TARGET_NAME}" "${OPENCV_LIB}")
|
||||
endif()
|
||||
|
||||
target_include_directories("${TEST_TARGET_NAME}" PUBLIC ${TEST_TPIP_INCLUDE}
|
||||
${ARMNN_INCLUDE_DIR}
|
||||
${OPENCV_INCLUDE_DIR} ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR} ${COMMON_INCLUDE_DIR})
|
||||
|
||||
target_link_libraries("${TEST_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} ${OPENCV_LIBS} ${FFMPEG_LIBS})
|
108
arch/arm/ARMnn/samples/ObjectDetection/include/BoundingBox.hpp
Normal file
108
arch/arm/ARMnn/samples/ObjectDetection/include/BoundingBox.hpp
Normal file
@ -0,0 +1,108 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace od
|
||||
{
|
||||
/**
|
||||
* @brief Class used to store and receive bounding box location and size information
|
||||
*
|
||||
*/
|
||||
class BoundingBox
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Default constructor
|
||||
*/
|
||||
BoundingBox();
|
||||
|
||||
/**
|
||||
* @brief Constructor with parameters to configure the bounding box dimensions
|
||||
* @param[in] x int value representing the x coordinate.
|
||||
* @param[in] y int value representing the y coordinate.
|
||||
* @param[in] width unsigned int value representing the width value.
|
||||
* @param[in] height unsigned int value representing the height value.
|
||||
*/
|
||||
BoundingBox(int x, int y, unsigned int width, unsigned int height);
|
||||
|
||||
/**
|
||||
* @brief Constructor with a BoundingBox type parameter to copy from.
|
||||
* @param[in] other Bounding box to copy.
|
||||
*/
|
||||
BoundingBox(const BoundingBox& other);
|
||||
|
||||
~BoundingBox() = default;
|
||||
|
||||
/**
|
||||
* @brief Function to retrieve the X coordinate.
|
||||
*/
|
||||
int GetX() const;
|
||||
|
||||
/**
|
||||
* @brief Function to retrieve the Y coordinate.
|
||||
*/
|
||||
int GetY() const;
|
||||
|
||||
/**
|
||||
* @brief Function to retrieve the width.
|
||||
*/
|
||||
unsigned int GetWidth() const;
|
||||
|
||||
/**
|
||||
* @brief Function to retrieve the height.
|
||||
*/
|
||||
unsigned int GetHeight() const;
|
||||
|
||||
/**
|
||||
* @brief Function to set the X coordinate.
|
||||
* @param[in] x int value representing x coordinate
|
||||
*/
|
||||
void SetX(int x);
|
||||
|
||||
/**
|
||||
* @brief Function to set the Y coordinate.
|
||||
* @param[in] y int value representing y coordinate
|
||||
*/
|
||||
void SetY(int y);
|
||||
|
||||
/**
|
||||
* @brief Function to set the width of the BoundingBox.
|
||||
* @param[in] width int value representing the width
|
||||
*/
|
||||
void SetWidth(unsigned int width);
|
||||
|
||||
/**
|
||||
* @brief Function to set the height of the BoundingBox.
|
||||
* @param[in] height int value representing the height
|
||||
*/
|
||||
void SetHeight(unsigned int height);
|
||||
|
||||
/**
|
||||
* @brief Function to check equality with another BoundingBox
|
||||
* @param[in] other BoundingBox to compare with
|
||||
*/
|
||||
BoundingBox& operator=(const BoundingBox& other);
|
||||
|
||||
private:
|
||||
int m_X;
|
||||
int m_Y;
|
||||
unsigned int m_Width;
|
||||
unsigned int m_Height;
|
||||
};
|
||||
|
||||
/*
|
||||
* @brief: Get a bounding box within the limits of another bounding box
|
||||
*
|
||||
* @param[in] boxIn Input bounding box
|
||||
* @param[out] boxOut Output bounding box
|
||||
* @param[in] boxLimits Bounding box defining the limits which the output
|
||||
* needs to conform to.
|
||||
* @return none
|
||||
*/
|
||||
void GetValidBoundingBox(const BoundingBox& boxIn, BoundingBox& boxOut,
|
||||
const BoundingBox& boxLimits);
|
||||
|
||||
}// namespace od
|
@ -0,0 +1,96 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BoundingBox.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace od
|
||||
{
|
||||
/**
|
||||
* An object detection network inference result decoded data representation.
|
||||
*/
|
||||
class DetectedObject
|
||||
{
|
||||
|
||||
public:
|
||||
DetectedObject();
|
||||
|
||||
/**
|
||||
* Creates detection with given parameters.
|
||||
*
|
||||
* @param id - class id
|
||||
* @param label - human readable text class label
|
||||
* @param boundingBox - rectangular detection coordinates
|
||||
* @param score - detection score/probability
|
||||
*/
|
||||
DetectedObject(unsigned int id,
|
||||
std::string label,
|
||||
const BoundingBox& boundingBox,
|
||||
float score);
|
||||
|
||||
~DetectedObject() = default;
|
||||
|
||||
/**
|
||||
* Get class id
|
||||
* @return id
|
||||
*/
|
||||
unsigned int GetId() const;
|
||||
|
||||
/**
|
||||
* Get human readable text class label
|
||||
* @return label
|
||||
*/
|
||||
const std::string& GetLabel() const;
|
||||
|
||||
/**
|
||||
* Get rectangular detection coordinates
|
||||
* @return detection coordinates
|
||||
*/
|
||||
const BoundingBox& GetBoundingBox() const;
|
||||
|
||||
/**
|
||||
* Get detection score
|
||||
* @return score
|
||||
*/
|
||||
float GetScore() const;
|
||||
|
||||
/**
|
||||
* Set class id
|
||||
* @param[in] id - class id
|
||||
*/
|
||||
void SetId(unsigned int id);
|
||||
|
||||
/**
|
||||
* Set class label
|
||||
* @param[in] label - human readable text class label
|
||||
*/
|
||||
void SetLabel(const std::string& label);
|
||||
|
||||
/**
|
||||
* Set detection coordinates
|
||||
* @param[in] boundingBox detection coordinates
|
||||
*/
|
||||
void SetBoundingBox(const BoundingBox& boundingBox);
|
||||
|
||||
/**
|
||||
* Set detection score
|
||||
* @param[in] score - detection score
|
||||
*/
|
||||
void SetScore(float score);
|
||||
|
||||
private:
|
||||
unsigned int m_Id;
|
||||
std::string m_Label;
|
||||
BoundingBox m_BoundingBox;
|
||||
float m_Score;
|
||||
};
|
||||
|
||||
using DetectedObjects = std::vector<DetectedObject>;
|
||||
|
||||
}// namespace od
|
@ -0,0 +1,39 @@
|
||||
//
|
||||
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "DetectedObject.hpp"
|
||||
#include "Types.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace od
|
||||
{
|
||||
|
||||
class IDetectionResultDecoder
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Returns decoded detected objects from a network model.
|
||||
* @desc Outputs 4 vectors: bounding boxes, label, probabilities & number of detections.
|
||||
* This function decodes network model output and converts it to expected format.
|
||||
*
|
||||
* @param[in] results Vector of outputs from a model.
|
||||
* @param[in] outputFrameSize Struct containing height & width of output frame that is displayed.
|
||||
* @param[in] resizedFrameSize Struct containing height & width of resized input frame before padding
|
||||
* and inference.
|
||||
* @param[in] labels Vector of network labels.
|
||||
* @param[in] detectionScoreThreshold float value for the detection score threshold.
|
||||
*
|
||||
* @return Vector of decoded detected objects.
|
||||
*/
|
||||
virtual DetectedObjects Decode(const common::InferenceResults<float>& results,
|
||||
const common::Size& outputFrameSize,
|
||||
const common::Size& resizedFrameSize,
|
||||
const std::vector<std::string>& labels) = 0;
|
||||
|
||||
};
|
||||
}// namespace od
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user