PYQT标签RSTP视频流延迟

发布于 2025-02-07 06:37:10 字数 3868 浏览 0 评论 0 原文

我的最小工作代码段在下面。当我从现在与计算机同一网络中的RTSP IP摄像机阅读时,我会延迟1秒钟。

这是因为我正在使用Python吗?这个相机?还是您有建议,如果我在代码中做错了什么,

from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QSizePolicy, QLabel
from CalibrationGUI.qtgui.CameraThread import CaptureIpCameraFramesWorker

class VideoLabel(QLabel):

    def __init__(self,camera_unit,ui_state_obj, parentGiven=None):
        super(VideoLabel, self).__init__(parent=parentGiven)
        self.ui_state_obj = ui_state_obj
        self.camera_unit=camera_unit
        self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
        self.setScaledContents(True)
        self.installEventFilter(self)
        self.setMaximumSize(1265536, 1265536)
        self.setupUI()


    def setupUI(self):
        self.Camworker= CaptureIpCameraFramesWorker(self.camera_unit,self.ui_state_obj)
        self.Camworker.ImageUpdated.connect(lambda image: self.ShowCamera(image))

    @QtCore.pyqtSlot()
    def ShowCamera(self, frame: QImage) -> None:
        self.frame = frame
        self.setPixmap(QPixmap.fromImage(frame))
    
    def startStream(self):
        self.Camworker.start()
    
    def stopStream(self):
        if self.Camworker.isRunning():
            self.Camworker.quit()
            
    def get_frame(self):
        return self.Camworker.get_frame()
        
import cv2
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QThread, Qt


class CaptureIpCameraFramesWorker(QThread):
    # Signal emitted when a new image or a new frame is ready.
    ImageUpdated = pyqtSignal(QImage)

    def __init__(self,camera_unit,UI_state_obj) -> None:
        super(CaptureIpCameraFramesWorker, self).__init__()
        # Declare and initialize instance variables
        self.camera_unit = camera_unit
        self.name = camera_unit.get_name()
        self.__thread_active = True
        self.fps = 0
        self.__thread_pause = False
        self.readframe=None

    def get_frame(self):
        return self.readframe

    def run(self) -> None:
        # While the thread is active.
        while self.__thread_active:
            if not self.__thread_pause:
                # Grabs, decodes and returns the next video frame.
                frame = self.camera_unit.get_current_image()
                #=camera_unit.get_current_image gives image as numpy array and
                #camera_unit is fetching image from link actively at the back end.
                ret = frame is not None
                if ret:
                    self.readframe=frame
                    # Get the frame height, width and channels.
                    height, width, channels = frame.shape
                    # Calculate the number of bytes per line.
                    bytes_per_line = width * channels
                    # If frame is read correctly.
                    # Convert image from BGR (cv2 default color format) to RGB (Qt default color format).
                    cv_rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    # Convert the image to Qt format.
                    qt_rgb_image = QImage(cv_rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888)
                    # Scale the image.
                    qt_rgb_image_scaled = qt_rgb_image.scaled(1280, 720, Qt.KeepAspectRatio)  # 720p
                    self.ImageUpdated.emit(qt_rgb_image_scaled)
        # When everything done, release the video capture object.
        # cap.release()
        # Tells the thread's event loop to exit with return code 0 (success).
        self.quit()

我已经修改了

My minimal working code snippet is below. When I read from rtsp IP camera which is in the same network with my computer now, I am getting delay around 1 second.

Is this because I am using python? this camera? or do you have a suggestion that If I am doing something wrong in the code

from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QSizePolicy, QLabel
from CalibrationGUI.qtgui.CameraThread import CaptureIpCameraFramesWorker

class VideoLabel(QLabel):

    def __init__(self,camera_unit,ui_state_obj, parentGiven=None):
        super(VideoLabel, self).__init__(parent=parentGiven)
        self.ui_state_obj = ui_state_obj
        self.camera_unit=camera_unit
        self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
        self.setScaledContents(True)
        self.installEventFilter(self)
        self.setMaximumSize(1265536, 1265536)
        self.setupUI()


    def setupUI(self):
        self.Camworker= CaptureIpCameraFramesWorker(self.camera_unit,self.ui_state_obj)
        self.Camworker.ImageUpdated.connect(lambda image: self.ShowCamera(image))

    @QtCore.pyqtSlot()
    def ShowCamera(self, frame: QImage) -> None:
        self.frame = frame
        self.setPixmap(QPixmap.fromImage(frame))
    
    def startStream(self):
        self.Camworker.start()
    
    def stopStream(self):
        if self.Camworker.isRunning():
            self.Camworker.quit()
            
    def get_frame(self):
        return self.Camworker.get_frame()
        
import cv2
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QThread, Qt


class CaptureIpCameraFramesWorker(QThread):
    # Signal emitted when a new image or a new frame is ready.
    ImageUpdated = pyqtSignal(QImage)

    def __init__(self,camera_unit,UI_state_obj) -> None:
        super(CaptureIpCameraFramesWorker, self).__init__()
        # Declare and initialize instance variables
        self.camera_unit = camera_unit
        self.name = camera_unit.get_name()
        self.__thread_active = True
        self.fps = 0
        self.__thread_pause = False
        self.readframe=None

    def get_frame(self):
        return self.readframe

    def run(self) -> None:
        # While the thread is active.
        while self.__thread_active:
            if not self.__thread_pause:
                # Grabs, decodes and returns the next video frame.
                frame = self.camera_unit.get_current_image()
                #=camera_unit.get_current_image gives image as numpy array and
                #camera_unit is fetching image from link actively at the back end.
                ret = frame is not None
                if ret:
                    self.readframe=frame
                    # Get the frame height, width and channels.
                    height, width, channels = frame.shape
                    # Calculate the number of bytes per line.
                    bytes_per_line = width * channels
                    # If frame is read correctly.
                    # Convert image from BGR (cv2 default color format) to RGB (Qt default color format).
                    cv_rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    # Convert the image to Qt format.
                    qt_rgb_image = QImage(cv_rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888)
                    # Scale the image.
                    qt_rgb_image_scaled = qt_rgb_image.scaled(1280, 720, Qt.KeepAspectRatio)  # 720p
                    self.ImageUpdated.emit(qt_rgb_image_scaled)
        # When everything done, release the video capture object.
        # cap.release()
        # Tells the thread's event loop to exit with return code 0 (success).
        self.quit()

I have modified the code in https://github.com/god233012yamil/Streaming-IP-Cameras-Using-PyQt-and-OpenCV/blob/main/Streaming_IP_Camera_Using_PyQt_OpenCV.py#L150

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文