Amazing-Python-Scripts
166 строк · 6.4 Кб
1#!/usr/bin/env python3
2"""
3Copyright (c) 2018 Intel Corporation.
4
5Permission is hereby granted, free of charge, to any person obtaining
6a copy of this software and associated documentation files (the
7"Software"), to deal in the Software without restriction, including
8without limitation the rights to use, copy, modify, merge, publish,
9distribute, sublicense, and/or sell copies of the Software, and to
10permit persons to whom the Software is furnished to do so, subject to
11the following conditions:
12
13The above copyright notice and this permission notice shall be
14included in all copies or substantial portions of the Software.
15
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23"""
24
25import os
26import sys
27import logging as log
28from openvino.inference_engine import IENetwork, IECore
29
30
31class Network:
32"""
33Load and configure inference plugins for the specified target devices
34and performs synchronous and asynchronous modes for the specified infer requests.
35"""
36
37def __init__(self):
38self.net = None
39self.plugin = None
40self.input_blob = None
41self.out_blob = None
42self.net_plugin = None
43self.infer_request_handle = None
44
45def load_model(self, model, device, input_size, output_size, num_requests, cpu_extension=None, plugin=None):
46"""
47Loads a network and an image to the Inference Engine plugin.
48:param model: .xml file of pre trained model
49:param cpu_extension: extension for the CPU device
50:param device: Target device
51:param input_size: Number of input layers
52:param output_size: Number of output layers
53:param num_requests: Index of Infer request value. Limited to device capabilities.
54:param plugin: Plugin for specified device
55:return: Shape of input layer
56"""
57
58model_xml = model
59model_bin = os.path.splitext(model_xml)[0] + ".bin"
60# Plugin initialization for specified device
61# and load extensions library if specified
62if not plugin:
63log.info("Initializing plugin for {} device...".format(device))
64self.plugin = IECore()
65else:
66self.plugin = plugin
67
68if cpu_extension and 'CPU' in device:
69self.plugin.add_extension(cpu_extension, "CPU")
70
71# Read IR
72log.info("Reading IR...")
73self.net = self.plugin.read_network(model=model_xml, weights=model_bin)
74log.info("Loading IR to the plugin...")
75
76if "CPU" in device:
77supported_layers = self.plugin.query_network(self.net, "CPU")
78not_supported_layers = \
79[l for l in self.net.layers.keys() if l not in supported_layers]
80if len(not_supported_layers) != 0:
81log.error("Following layers are not supported by "
82"the plugin for specified device {}:\n {}".
83format(device,
84', '.join(not_supported_layers)))
85log.error("Please try to specify cpu extensions library path"
86" in command line parameters using -l "
87"or --cpu_extension command line argument")
88sys.exit(1)
89
90if num_requests == 0:
91# Loads network read from IR to the plugin
92self.net_plugin = self.plugin.load_network(
93network=self.net, device_name=device)
94else:
95self.net_plugin = self.plugin.load_network(
96network=self.net, num_requests=num_requests, device_name=device)
97# log.error("num_requests != 0")
98
99self.input_blob = next(iter(self.net.inputs))
100self.out_blob = next(iter(self.net.outputs))
101assert len(self.net.inputs.keys()) == input_size, \
102"Supports only {} input topologies".format(len(self.net.inputs))
103assert len(self.net.outputs) == output_size, \
104"Supports only {} output topologies".format(len(self.net.outputs))
105
106return self.plugin, self.get_input_shape()
107
108def get_input_shape(self):
109"""
110Gives the shape of the input layer of the network.
111:return: None
112"""
113return self.net.inputs[self.input_blob].shape
114
115def performance_counter(self, request_id):
116"""
117Queries performance measures per layer to get feedback of what is the
118most time consuming layer.
119:param request_id: Index of Infer request value. Limited to device capabilities
120:return: Performance of the layer
121"""
122perf_count = self.net_plugin.requests[request_id].get_perf_counts()
123return perf_count
124
125def exec_net(self, request_id, frame):
126"""
127Starts asynchronous inference for specified request.
128:param request_id: Index of Infer request value. Limited to device capabilities.
129:param frame: Input image
130:return: Instance of Executable Network class
131"""
132
133self.infer_request_handle = self.net_plugin.start_async(
134request_id=request_id, inputs={self.input_blob: frame})
135return self.net_plugin
136
137def wait(self, request_id):
138"""
139Waits for the result to become available.
140:param request_id: Index of Infer request value. Limited to device capabilities.
141:return: Timeout value
142"""
143wait_process = self.net_plugin.requests[request_id].wait(-1)
144return wait_process
145
146def get_output(self, request_id, output=None):
147"""
148Gives a list of results for the output layer of the network.
149:param request_id: Index of Infer request value. Limited to device capabilities.
150:param output: Name of the output layer
151:return: Results for the specified request
152"""
153if output:
154res = self.infer_request_handle.outputs[output]
155else:
156res = self.net_plugin.requests[request_id].outputs[self.out_blob]
157return res
158
159def clean(self):
160"""
161Deletes all the instances
162:return: None
163"""
164del self.net_plugin
165del self.plugin
166del self.net
167