如何通过cv2.videocapture()获取Kinect v2帧?
我想使用Kinect v2作为网络摄像头来运行YOLO,通过yolo源代码我发现流是通过cv2.videocapture()
捕获的我搜索了opencv文档并得到了API Kinect cv2.videocapture(cv2.CAP_OPENNI2)
但我无法从此函数获取任何流。然而,我可以通过 dev.create_color_stream() 获取颜色和深度帧,但处理它以适应 YOLO 并不容易。欣赏!
环境:
Ubuntu:20.04
Openni:2.3.0
Opencv-python:4.5.5.64
Libfreenect
from openni import openni2
import numpy as np
import cv2
openni2.initialize() # can also accept the path of the OpenNI redistribution
dev = openni2.Device.open_any()
print(dev.get_device_info())
#
# depth_stream = dev.create_depth_stream()
# color_stream = dev.create_color_stream()
# depth_stream.start()
# color_stream.start()
# get depth img,default size: 424x512
def get_last_depth():
#depth_stream = dev.create_depth_stream()
#depth_stream.start()
# show depth img
frame = depth_stream.read_frame()
dframe_data = np.array(frame.get_buffer_as_triplet()).reshape([480, 640, 2])
dpt1 = np.asarray(dframe_data[:, :, 0], dtype='float32')
dpt2 = np.asarray(dframe_data[:, :, 1], dtype='float32')
dpt2 *= 255
dpt = dpt1 + dpt2
#cv2.imshow('dpt', dpt)
return dpt
depth_stream.stop()
#get rgb img, 1080x1920x4
def get_last_rgb():
#color_stream = dev.create_color_stream()
#color_stream.start()
# show
cframe = color_stream.read_frame()
cframe_data = np.array(cframe.get_buffer_as_triplet()).reshape([1080, 1920, 3])
R = cframe_data[:, :, 0]
G = cframe_data[:, :, 1]
B = cframe_data[:, :, 2]
cframe_data = np.transpose(np.array([B, G, R]), [1, 2, 0])
# print(cframe_data.shape)
#cv2.imshow('color', cframe_data)
#print(cframe_data)
return cframe_data
color_stream.stop()
if __name__ == "__main__":
for i in range(1000):
capture = cv2.VideoCapture(cv2.CAP_OPENNI2)
capture.grab()
ret_d,depth_map = capture.retrieve(cv2.CAP_OPENNI_DEPTH_MAP)
ret_f, frame = capture.retrieve(cv2.CAP_OPENNI_IMAGE_GENERATOR)
# print(cv2.CAP_OPENNI_BGR_IMAGE, cv2.CAP_OPENNI_DEPTH_MAP)
cv2.imshow('kinect',frame)
[enter image description here][1]print(ret_f,frame)
# rgb = get_last_rgb()
# cv2.imshow('rgb',rgb)
# depth = get_last_depth()
# print("depth:",depth[100][100])
# cv2.imshow('depth',depth)
cv2.waitKey(1)
# print(i)
# close the divice
dev.close()
I'd like to use Kinect v2 as a webcam to run YOLO, through yolo source code I found that the stream is captured by cv2.videocapture()
I searched the opencv documentation and I got the API of Kinect cv2.videocapture(cv2.CAP_OPENNI2)
but I can't get any stream from this function. However, I can get color and depth frame by dev.create_color_stream()
, but it's not easy to process it to fit YOLO. Appreciate!
Environment:
Ubuntu:20.04
Openni:2.3.0
Opencv-python:4.5.5.64
Libfreenect
from openni import openni2
import numpy as np
import cv2
openni2.initialize() # can also accept the path of the OpenNI redistribution
dev = openni2.Device.open_any()
print(dev.get_device_info())
#
# depth_stream = dev.create_depth_stream()
# color_stream = dev.create_color_stream()
# depth_stream.start()
# color_stream.start()
# get depth img,default size: 424x512
def get_last_depth():
#depth_stream = dev.create_depth_stream()
#depth_stream.start()
# show depth img
frame = depth_stream.read_frame()
dframe_data = np.array(frame.get_buffer_as_triplet()).reshape([480, 640, 2])
dpt1 = np.asarray(dframe_data[:, :, 0], dtype='float32')
dpt2 = np.asarray(dframe_data[:, :, 1], dtype='float32')
dpt2 *= 255
dpt = dpt1 + dpt2
#cv2.imshow('dpt', dpt)
return dpt
depth_stream.stop()
#get rgb img, 1080x1920x4
def get_last_rgb():
#color_stream = dev.create_color_stream()
#color_stream.start()
# show
cframe = color_stream.read_frame()
cframe_data = np.array(cframe.get_buffer_as_triplet()).reshape([1080, 1920, 3])
R = cframe_data[:, :, 0]
G = cframe_data[:, :, 1]
B = cframe_data[:, :, 2]
cframe_data = np.transpose(np.array([B, G, R]), [1, 2, 0])
# print(cframe_data.shape)
#cv2.imshow('color', cframe_data)
#print(cframe_data)
return cframe_data
color_stream.stop()
if __name__ == "__main__":
for i in range(1000):
capture = cv2.VideoCapture(cv2.CAP_OPENNI2)
capture.grab()
ret_d,depth_map = capture.retrieve(cv2.CAP_OPENNI_DEPTH_MAP)
ret_f, frame = capture.retrieve(cv2.CAP_OPENNI_IMAGE_GENERATOR)
# print(cv2.CAP_OPENNI_BGR_IMAGE, cv2.CAP_OPENNI_DEPTH_MAP)
cv2.imshow('kinect',frame)
[enter image description here][1]print(ret_f,frame)
# rgb = get_last_rgb()
# cv2.imshow('rgb',rgb)
# depth = get_last_depth()
# print("depth:",depth[100][100])
# cv2.imshow('depth',depth)
cv2.waitKey(1)
# print(i)
# close the divice
dev.close()
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论