Я пытаюсь обнаружить автомобили в определенной области живого видеопотока. Для этого я использовал API обнаружения объектов Tensorflow. Теперь обнаружение достаточно справедливое, и почти все автомобили в прямом эфире видеопотока обнаруживаются как «машины» с ограничивающими рамками вокруг них и с некоторой степенью достоверности обнаружения в процентах.
Мой вопрос: как мне проверить наличие только нужных ограничительных рамок?
Например, поскольку требуемая область и камера, используемая для обнаружения, зафиксированы в определенном положении, я использовал функцию cv2.rectangle()
OpenCV и передал координаты (x1,y1)
и (x2,y2)
требуемой области. Итак, теперь у меня есть константа прямоугольная коробка вокруг этой области. Моя задача - каким-то образом узнать, что машина прибыла в отмеченную область прямоугольника, распечатав сообщение «обнаружено» в терминале Ubuntu.
У меня возникают трудности при сравнении координат ограничительной рамки с координатами прямоугольника. Таким образом, возникает вопрос, как
- Захватить только обязательные ограничивающие рамки (тем самым требуется обнаруженные автомобили)?
- определить, находятся ли эти ограничивающие рамки внутри прямоугольника / отмеченной области?
Вот код, который я использовал.
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from PIL import Image
import cv2
cap = cv2.VideoCapture(0)
# This is needed since the notebook is stored in the object_detection
folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if tf.__version__ != '1.10.1':
raise ImportError('Please upgrade your tensorflow installation to
v1.10.1* or later!')
# ## Env setup
# In[3]:
# ## Object detection imports
# Here are the imports from the object detection module.
# In[5]:
from utils import label_map_util
from utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
# Any model exported using the `export_inference_graph.py` tool can be
loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a
new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the
[detection model zoo]
# In[6]:
# What model to download.
MODEL_NAME = 'car_inference_graph'
# Path to frozen detection graph. This is the actual model that is
used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')
NUM_CLASSES = 1
# ## Load a (frozen) Tensorflow model into memory.
# In[7]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our
convolution network predicts `5`, we know that this corresponds to
`airplane`. Here we use internal utility functions, but anything that
returns a dictionary mapping integers to appropriate string labels
would be fine
# In[8]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[9]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in
op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] =
tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'],
[0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'],
[0])
# Reframe is required to translate mask from box coordinates
to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0],
tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0],
[real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0],
[real_num_detection, -1, -1])
detection_masks_reframed =
utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0],
image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor =
tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor:
np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as
appropriate
output_dict['num_detections'] =
int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes']
[0]
output_dict['detection_scores'] =
output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] =
output_dict['detection_masks'][0]
return output_dict
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape:
[1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor =
detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular
object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the
objects.
# Score is shown on the result image, together with the class
label.
scores =
detection_graph.get_tensor_by_name('detection_scores:0')
classes =
detection_graph.get_tensor_by_name('detection_classes:0')
num_detections =
detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
area1 = cv2.rectangle(image_np,(201,267),(355,476),
(0,255,0),2)
area2 = cv2.rectangle(image_np,(354,271),(562,454),
(255,0,0),2)
cv2.imshow("object detection", image_np)
if 'detection_boxes:0' == 1 in area1[(201,267),(353,468)]:
print("area1 occupied!")
else:
print("area1 free!")
if 'detection_boxes:1' == 1 in area2[(354,271),(562,454)]:
print("area2 occupied!")
else:
print("area2 free!")
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
cap.release()
break
Мне трудно найти решение. Пожалуйста, помогите.
Техническая информация:
Tensorflow 1.10
ОС - Ubuntu 18.04
Python 3.6
OpenCV 3.4.2
Спасибо:)