Я не смог найти способ избавиться от этой ошибки, не связанной с типом: исключение 'NoneType' объект не является подписным - исключение вызывает проблему - PullRequest
0 голосов
/ 23 марта 2020
'''I am working in tensorflow object_detection_API. I have followed the method in https://towardsdatascience.com/creating-your-own-object-detector-ad69dda69c85 but still i couldn't find a solution to this "NONE TYPE OBJECT IS NOT SUBSCRIPTABLE" ERROR:''' 


#!/usr/bin/env python
# coding: utf-8

# # Object Detection API Demo
# 
# <table align="left"><td>
#   <a target="_blank"  

href = "https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/obj ect_detection_tutorial.ipynb"> # imageRun in Google Colab # # #

href="https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutoria l.ipynb"> # Просмотр исходного кода на GitHub #

# Welcome to the [Object Detection API] 
(https://github.com/tensorflow/models/tree/master/research/object_detection). This notebook will walk 
you step by step through the process of using a pre-trained model to detect objects in an image.

# > **Important**: This tutorial is to help you through the first step towards using [Object 
Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) to build 
models. If you just just need an off the shelf model that does the job, see the [TFHub object 
detection example] 

(https://colab.sandbox.google.com/github/tensorflow/hub/blob/master/examples/colab/object_detection.ipynb).

# # Setup

# Important: If you're running on a local machine, be sure to follow the [installation instructions] 
(https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). 


# ### Install

# In[1]:


#pip install tensorflow-object-detection-api


# In[2]:


import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile

from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image


sys.path.append("..")
from object_detection.utils import ops as utils_ops

if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
    raise ImportError('Please upgrade your Tensorflow installation to  v1.9.* or later!')


# In[3]:


#get_ipython().run_line_magic('matplotlib', 'inline')


# In[4]:


#from object_detection.utils import ops as utils_ops
from utils import label_map_util

from utils import visualization_utils as vis_util


# In[5]:


MODEL_NAME = "inference_graph"
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = "Training/labelmap.pbtxt" 


# In[6]:


detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.compat.v1.GraphDef()
    with tf.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')


# In[7]:


category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, 
use_display_name=True)


# In[8]:


def run_inference_for_single_image(image,graph):

    if 'detection_masks' in tensor_dict:

        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])

        real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0,0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0,0,0], [real_num_detection, -1,-1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, 
detection_boxes, image.shape[0], image.shape[1])
        detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)

        tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)

        image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})

        output_dict['num_detections'] = int(output_dict['num_detections'][0])
        output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
        output_dict['detection_scores'] = output_dict['scores'][0]
        if detection_masks in output_dict:
            output_dict['detection_masks'] = output_dict['detection_masks'][0]

        return output_dict




# In[9]:


import cv2
cap = cv2.VideoCapture(0)
try:
    #print("hai1")
    with detection_graph.as_default():
        with tf.compat.v1.Session as sess:
            #print('hai2')
            ops = tf.compat.v1.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            tensor_dict = {}
            for key in [

'num_detections','detection_boxes','detection_scores','detection_classes','detection_masks'
             ]:
                tensor_name = key + ':0'
                #print("Hai3")
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
                   # print("hai4")

            while True:
                #print("Hai5")
                ret, image_np = cap.read()
                image_np_expanded = np.expand_dims(image_np, axis=0)

                output_dict = run_inference_for_single_image(image_np, detection_graph)
                # print("hai6")

                vis_util.visualize_boxes_and_labels_on_image_array(
                    image_np,
                    output_dict['detection_boxes'],
                    output_dict['detection_classes'],
                    output_dict['detection_scores'],
                    category_index=category_index,
                    instance_masks = output_dict.get('detection_masks'),
                    use_normalized_coordinates = True,
                    line_thickness = 8)
                print("Hai7")
                cv2.imshow('object_detection', cv2.resize(image_np, (800,600)))
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cap.release()
                    cv2.destroyAllWindows()
                    break

except Exception as e:
    print(e)
    cap.release()
...