Есть ли способ отобразить изображение в виде прокрутки при добавлении, если папка python? - PullRequest
0 голосов
/ 30 марта 2020

У меня есть два отдельных кода, один отображает вновь добавленное изображение из папки во время выполнения кода. А другой показывает все изображения, представленные в папке, в виде прокрутки, созданной с помощью pyqt5. Проблема здесь в том, что представление прокрутки не обновляется недавно добавленным изображением, так как я работаю над проектом обнаружения объектов, поэтому мой код обрезает каждый обнаруженный объект и продолжает сохранять в папке (путь = "C: / Users / AISHA / Обнаружение / модель / исследование / tensorflow_object_counting_api / detected_objects "). Ниже мой основной GUI код.

import sys
import numpy as np

import cv2
import os
from PyQt5 import QtCore
from PyQt5.QtCore  import pyqtSlot
from PyQt5.QtGui import QImage , QPixmap
from PyQt5.QtWidgets import QDialog , QApplication
from PyQt5.uic import loadUi

from PyQt5 import QtCore, QtGui, QtWidgets

from PyQt5.QtCore import QDir, Qt, QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import (QApplication, QFileDialog, QHBoxLayout, QLabel,
        QPushButton, QSizePolicy, QSlider, QStyle, QVBoxLayout, QWidget)
from PyQt5.QtWidgets import QMainWindow,QWidget, QPushButton, QAction
from PyQt5.QtGui import QIcon
import sys


import tensorflow as tf
import cv2

import os, sys, time
from utils import backbone
from api import object_counting_api


import csv

import numpy as np
from utils import visualization_utils as vis_util

class mycode(QMainWindow):
        def __init__(self):
                super(mycode,self).__init__()
                #loadUi("student3.ui",self)
                loadUi("untitled5.ui",self)
                highlight_dir = "C:/Users/AISHA/Detection/models/research/tensorflow_object_counting_api/detected_objects"
                content_widget = QtWidgets.QWidget()
                self.scrollArea.setWidget(content_widget)
                self._lay = QtWidgets.QVBoxLayout(content_widget)

                self.files_it = iter([os.path.join(highlight_dir, file) for file in os.listdir(highlight_dir)])

                self._timer = QtCore.QTimer(self, interval=1)
                self._timer.timeout.connect(self.on_timeout)
                self._timer.start()

                self.logic = 0
                self.value = 1
                #self.SHOW.clicked.connect(self.onClicked)
                self.TEXT.setText("Kindly 'Select' a file to start counting.")
                #self.CAPTURE.clicked.connect(self.CaptureClicked)
                self.actionOpen.setStatusTip('Open movie')
                self.actionOpen.triggered.connect(self.onClicked)

        @pyqtSlot()
        def onClicked(self):
                self.TEXT.setText('Dislaying Vehicle Detection and Counting')
                fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie", QDir.homePath())
                detection_graph, category_index = backbone.set_model('inference_graph', 'labelmap1.pbtxt')
                is_color_recognition_enabled = 1
                roi = 385
                deviation = 5
                cap =cv2.VideoCapture(fileName)
                total_passed_vehicle = 0
                total_passed_vehicle1 = 0
                total_passed_vehicle2 = 0
                total_passed_vehicle3 = 0
                total_passed_vehicle4 = 0
                width_heigh_taken = True
                with detection_graph.as_default():
                        with tf.Session(graph=detection_graph) as sess:
                                image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
                                detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
                                detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
                                detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
                                num_detections = detection_graph.get_tensor_by_name('num_detections:0')

                                #while (True):
                                #print(cap.read())
                                while(cap.isOpened()):
                                        ret, frame=cap.read()

                                        if ret==True:
                                                print('here')
                                                #self.displayImage(frame,1)
                                                image_np_expanded = np.expand_dims(frame, axis=0)
                                                (boxes, scores, classes, num) = sess.run(
                                                        [detection_boxes, detection_scores, detection_classes, num_detections],
                                                        feed_dict={image_tensor: image_np_expanded})
                                                font = cv2.FONT_HERSHEY_SIMPLEX
                                                counter, csv_line, counting_mode,counter1, counter2, counter3, counter4 = vis_util.visualize_boxes_and_labels_on_image_array_y_axis(cap.get(1),
                                                                                                                             frame,
                                                                                                                             2,
                                                                                                                             is_color_recognition_enabled,
                                                                                                                             np.squeeze(boxes),
                                                                                                                             np.squeeze(classes).astype(np.int32),
                                                                                                                             np.squeeze(scores),
                                                                                                                             category_index,
                                                                                                                             y_reference = roi,
                                                                                                                             deviation = deviation,
                                                                                                                             use_normalized_coordinates=True,
                                                                                                                             line_thickness=4)
                                                if counter == 1:
                                                       cv2.line(frame, (0, roi), (800, roi), (0, 0xFF, 0), 3)

                                                #cv2.imshow('object counting',frame)
                                                self.displayImage(frame,1)

                                                if cv2.waitKey(1) & 0xFF == ord('q'):
                                                        break

                                cap.release()
                                cv2.destroyAllWindows()
                ##      def CaptureClicked(self):
##              self.logic=2
        def displayImage(self,img,window=1):
                qformat=QImage.Format_Indexed8
                if len(img.shape)==3:
                        if(img.shape[2])==4:
                                qformat=QImage.Format_RGBA888
                        else:
                                qformat=QImage.Format_RGB888
                img = QImage(img,img.shape[1],img.shape[0],qformat)
                img = img.rgbSwapped()
                self.imgLabel.setPixmap(QPixmap.fromImage(img))
                self.imgLabel.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)


        def on_timeout(self):
                try:
                   file = next(self.files_it)
                   pixmap = QtGui.QPixmap(self.files_it)
                   self.add_pixmap(pixmap)
                except StopIteration:
                        self._timer.stop()
        def add_pixmap(self, pixmap):
                if not pixmap.isNull():
                        label = QtWidgets.QLabel(pixmap=pixmap)
                        self._lay.addWidget(label)


app =  QApplication(sys.argv)
window=mycode()
window.show()
sys.exit(app.exec_())
##try:
##      sys.exit(app.exec_())
##except:
##      print('excitng')
'''

Код для обработки в реальном времени выглядит следующим образом

  path_to_watch = "C:/Users/AISHA/Detection/models/research/tensorflow_object_counting_api/detected_objects"
  files = iter([os.path.join(path_to_watch, f) for f in os.listdir(path_to_watch)])
  before = dict ([(f, None) for f in os.listdir (path_to_watch)])
  while 1:
   time.sleep (10)
   after = dict ([(f, None) for f in os.listdir (path_to_watch)])
   added = [f for f in after if not f in before]
   if added: print ("Added: ", ", ".join (added))
   before = after
...