I have to stitch the images captured from many (9) cameras. Initially, I tried to capture the frames from 2 cameras with rate 15 FPS. Then, I connected 4 cameras (I also used externally powered USB hub to provide enough power) but I could only see only one stream.
For testing, I used the following script:
import numpy as np
import cv2
import imutils
index = 0
arr = []
while True:
cap = cv2.VideoCapture(index)
if not cap.read()[0]:
break
else:
arr.append(index)
cap.release()
index += 1
video_captures = [cv2.VideoCapture(idx) for idx in arr]
while True:
# Capture frame-by-frame
frames = []
frames_preview = []
for i in arr:
# skip webcam capture
if i == 1: continue
ret, frame = video_captures[i].read()
if ret:
frames.append(frame)
small = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
frames_preview.append(small)
for i, frame in enumerate(frames_preview):
cv2.imshow('Cam {}'.format(i), frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
for video_capture in video_captures:
video_capture.release()
cv2.destroyAllWindows()
Is there any limit for the number of cameras? Does anyone know what is the right way to capture frames from multiple cameras?
To capture multiple streams with OpenCV, I recommend using threading which can improve performance by alleviating the heavy I/O operations to a separate thread. Since accessing the webcam/IP/RTSP stream using cv2.VideoCapture().read() is a blocking operation, our main program is stuck until the frame is read from the camera device. If you have multiple streams, this latency will definitely be visible. To remedy this problem, we can use threading to spawn another thread to handle retrieving the frames using a deque in parallel instead of relying on a single thread to obtain the frames in sequential order. Threading allows frames to be continuously read without impacting the performance of our main program. The idea to capture a single stream using threading and OpenCV, is from a previous answer in Python OpenCV multithreading streaming from camera.
But if you want to capture multiple streams, OpenCV alone is not enough. You can use OpenCV in combination with a GUI framework to stitch each image onto a nice display. I will use PyQt4 as the framework, qdarkstyle for GUI CSS, and imutils for OpenCV convenience functions.
Here is a very stripped down version of the camera GUI I currently use without the placeholder images, credential admin login page, and camera switching ability. I've kept the automatic camera reconnect feature incase the internet dies or the camera connection is lost. I only have 8 cameras as shown in the image above, but it is very simple to add in another camera and should not impact performance. This camera GUI currently performs at about ~60 FPS so it is real-time. You can easily rearrange the layout using PyQt layouts so feel free to modify the code! Remember to change the stream links!
from PyQt4 import QtCore, QtGui
import qdarkstyle
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtGui.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
#param width - Width of the video frame
#param height - Height of the video frame
#param stream_link - IP/RTSP/Webcam link
#param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
"""
def __init__(self, width, height, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
# Slight offset is needed since PyQt layouts have a built in padding
# So add offset to counter the padding
self.offset = 16
self.screen_width = width - self.offset
self.screen_height = height - self.offset
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = stream_link
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtGui.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtGui.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
# Add timestamp to cameras
cv2.rectangle(self.frame, (self.screen_width-190,0), (self.screen_width,50), color=(0,0,0), thickness=-1)
cv2.putText(self.frame, datetime.now().strftime('%H:%M:%S'), (self.screen_width-185,37), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), lineType=cv2.LINE_AA)
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped()
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def get_video_frame(self):
return self.video_frame
def exit_application():
"""Exit program event handler"""
sys.exit(1)
if __name__ == '__main__':
# Create main application window
app = QtGui.QApplication([])
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt())
app.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
mw = QtGui.QMainWindow()
mw.setWindowTitle('Camera GUI')
mw.setWindowFlags(QtCore.Qt.FramelessWindowHint)
cw = QtGui.QWidget()
ml = QtGui.QGridLayout()
cw.setLayout(ml)
mw.setCentralWidget(cw)
mw.showMaximized()
# Dynamically determine screen width/height
screen_width = QtGui.QApplication.desktop().screenGeometry().width()
screen_height = QtGui.QApplication.desktop().screenGeometry().height()
# Create Camera Widgets
username = 'Your camera username!'
password = 'Your camera password!'
# Stream links
camera0 = 'rtsp://{}:{}#192.168.1.43:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera1 = 'rtsp://{}:{}#192.168.1.45/axis-media/media.amp'.format(username, password)
camera2 = 'rtsp://{}:{}#192.168.1.47:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera3 = 'rtsp://{}:{}#192.168.1.40:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera4 = 'rtsp://{}:{}#192.168.1.44:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera5 = 'rtsp://{}:{}#192.168.1.42:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera6 = 'rtsp://{}:{}#192.168.1.46:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
camera7 = 'rtsp://{}:{}#192.168.1.41:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
# Create camera widgets
print('Creating Camera Widgets...')
zero = CameraWidget(screen_width//3, screen_height//3, camera0)
one = CameraWidget(screen_width//3, screen_height//3, camera1)
two = CameraWidget(screen_width//3, screen_height//3, camera2)
three = CameraWidget(screen_width//3, screen_height//3, camera3)
four = CameraWidget(screen_width//3, screen_height//3, camera4)
five = CameraWidget(screen_width//3, screen_height//3, camera5)
six = CameraWidget(screen_width//3, screen_height//3, camera6)
seven = CameraWidget(screen_width//3, screen_height//3, camera7)
# Add widgets to layout
print('Adding widgets to layout...')
ml.addWidget(zero.get_video_frame(),0,0,1,1)
ml.addWidget(one.get_video_frame(),0,1,1,1)
ml.addWidget(two.get_video_frame(),0,2,1,1)
ml.addWidget(three.get_video_frame(),1,0,1,1)
ml.addWidget(four.get_video_frame(),1,1,1,1)
ml.addWidget(five.get_video_frame(),1,2,1,1)
ml.addWidget(six.get_video_frame(),2,0,1,1)
ml.addWidget(seven.get_video_frame(),2,1,1,1)
print('Verifying camera credentials...')
mw.show()
QtGui.QShortcut(QtGui.QKeySequence('Ctrl+Q'), mw, exit_application)
if(sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
Related camera/IP/RTSP, FPS, video, threading, and multiprocessing posts
Python OpenCV streaming from camera - multithreading, timestamps
Video Streaming from IP Camera in Python Using OpenCV cv2.VideoCapture
How to capture multiple camera streams with OpenCV?
OpenCV real time streaming video capture is slow. How to drop frames or get synced with real time?
Storing RTSP stream as video file with OpenCV VideoWriter
OpenCV video saving
Python OpenCV multiprocessing cv2.VideoCapture mp4
Related
I have written the following web app to perform pose detection on two videos. The idea is to, say, give a benchmark video in the first and a user video (either a pre-recorded one or their webcam feed) in the second, and compare the movements of the two.
import dash, cv2
import dash_core_components as dcc
import dash_html_components as html
import mediapipe as mp
from flask import Flask, Response
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
class VideoCamera(object):
def __init__(self, video_path):
self.video = cv2.VideoCapture(video_path)
def __del__(self):
self.video.release()
def get_frame(self):
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
success, image = self.video.read()
# Recolor image to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
)
_, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
server = Flask(__name__)
app = dash.Dash(__name__, server=server)
#server.route('/video_feed_1')
def video_feed_1():
return Response(gen(VideoCamera(0)), mimetype='multipart/x-mixed-replace; boundary=frame')
#server.route('/video_feed_2')
def video_feed_2():
return Response(gen(VideoCamera(0)), mimetype='multipart/x-mixed-replace; boundary=frame')
app.layout = html.Div([
html.Img(src="/video_feed_1", style={'width' : '40%', 'padding': 10}),
html.Img(src="/video_feed_2", style={'width' : '40%', 'padding': 10})
])
if __name__ == '__main__':
app.run_server(debug=True)
However, when I run this code, the fans on my laptop start to kick in and it doesn't render anything in the browser. It works fine with any video, but it seems to be able to handle only one video. You can remove either of the two functions video_feed_1() or video_feed_2(), and you can also replace the video path from 0 (which is webcam) with the path to any other video (like, /path/to/video.mp4), and it works fine.
Also, when I simply display two videos in the browser, that too works fine. You can try this out too by replacing the get_frame() function in the class above with the following:
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
So, how do I reduce the load on the browser when rendering the pose estimation of two videos simultaneously? And why is the load so high anyway when rendering in browser, when it works perfectly fine when the pose estimations render by default on two pop-up windows (i.e., with cv.imshow(image))?
For a task that requires real time updates like your pose estimation, I would recommend using websockets for communication. Here is a small example where a Quart server streams the data via websockets to a Dash frontend,
import asyncio
import base64
import dash, cv2
import dash_html_components as html
import mediapipe as mp
import threading
from dash.dependencies import Output, Input
from quart import Quart, websocket
from dash_extensions import WebSocket
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
class VideoCamera(object):
def __init__(self, video_path):
self.video = cv2.VideoCapture(video_path)
def __del__(self):
self.video.release()
def get_frame(self):
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
success, image = self.video.read()
# Recolor image to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
)
_, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
# Setup small Quart server for streaming via websocket, one for each stream.
server = Quart(__name__)
n_streams = 2
async def stream(camera, delay=None):
while True:
if delay is not None:
await asyncio.sleep(delay) # add delay if CPU usage is too high
frame = camera.get_frame()
await websocket.send(f"data:image/jpeg;base64, {base64.b64encode(frame).decode()}")
#server.websocket("/stream0")
async def stream0():
camera = VideoCamera("./kangaroo.mp4")
await stream(camera)
#server.websocket("/stream1")
async def stream1():
camera = VideoCamera("./yoga.mp4")
await stream(camera)
# Create small Dash application for UI.
app = dash.Dash(__name__)
app.layout = html.Div(
[html.Img(style={'width': '40%', 'padding': 10}, id=f"v{i}") for i in range(n_streams)] +
[WebSocket(url=f"ws://127.0.0.1:5000/stream{i}", id=f"ws{i}") for i in range(n_streams)]
)
# Copy data from websockets to Img elements.
for i in range(n_streams):
app.clientside_callback("function(m){return m? m.data : '';}", Output(f"v{i}", "src"), Input(f"ws{i}", "message"))
if __name__ == '__main__':
threading.Thread(target=app.run_server).start()
server.run()
While this solution performs significantly better (on my laptop at least), the resource usage is still high, so I added a delay parameter that makes it possible to lower resource usage at the expense of frame rate reduction.
I'm trying to build a script to manage multiple threads that are supposed to be running in parallel, and exchange data between them.
As a starting point, I have two threads...the first one should be reading frames from a USB camera and send them to queue, while the second should read them and display them.
I tried:
import json
import queue, threading
from queue import Queue
import cv2
class system_manager():
def __init__(self, source):
## camera reader
self.camera_queue = queue.Queue()
self.camera_reader = threading.Thread(target=camera_reader, args=(source, self.camera_queue))
self.camera_reader.daemon = True
self.camera_reader.run()
self.camera_display = threading.Thread(target=camera_display, args=(self.camera_queue))
self.camera_display.daemon = True
self.camera_display.run()
def camera_reader(source, camera_queue):
print("Cam Loading...")
cap = cv2.VideoCapture(source)
print("Cam Loaded...")
while(True):
ret, frame = cap.read()
camera_queue.put(frame)
def camera_display(camera_queue):
print("doing something")
while(True):
frame = camera_queue.get()
key = cv2.waitKey(1)
if (key == ord('q')):
break
cv2.imshow("frame", frame)
if __name__ == "__main__":
SM = system_manager(source=0)
but it's not really working. The first thread, the one supposed to read frames, is actually doing that, but the second one is not displaying anything (there's a print statement at the beginning, and it's not shown). Also, after running for a few minutes, it got my computer completely stuck, so I assume I'm accidentally continuously occupying memory.
I'm fairly new to multiprocessing/multithreading, so I'm probably doing some very basic mistake somewhere.
EDIT
Ok, fixed the memory problem by using:
self.camera_queue = queue.Queue(maxsize=5)
but the second thread is not working yet
Use thread.start() instead of thread.run() , fix the thread target method and add comma after the argument. This works.
import json
import queue, threading
from queue import Queue
import cv2
class system_manager():
def __init__(self, source):
## camera reader
self.camera_queue = queue.Queue(maxsize=5)
self.camera_reader = threading.Thread(target=camera_reader, args=(source, self.camera_queue))
self.camera_reader.daemon = True
self.camera_reader.start()
self.camera_display = threading.Thread(target=camera_display, args=(self.camera_queue,))
self.camera_display.daemon = True
self.camera_display.start()
def camera_reader(source, camera_queue):
print("Cam Loading...")
cap = cv2.VideoCapture(source)
print("Cam Loaded...")
while(True):
ret, frame = cap.read()
camera_queue.put(frame)
def camera_display(camera_queue):
print("doing something")
while(True):
frame = camera_queue.get()
key = cv2.waitKey(1)
if (key == ord('q')):
break
cv2.imshow("frame", frame)
if __name__ == "__main__":
SM = system_manager(source=0)
I'm currently in the works of making an auto-aiming turret, and my camera's have a noticeable fish eye effect, which is totally fine. I'm using OpenCV's undistort() function to handle this, with data from a camera checkerboard calibration.
I will most likely be running the vision system on a raspberry pi 4, and currently, my undistort function takes 80-90% of my CPU (i5-8600k OC 5GHz) when processing both of my cameras at 1280x720px, ideally this px as it's the largest and will provide best accuracy. Also note I'm aiming for a 15Hz update time.
Any ideas on how to make this more lightweight? Here's my code that I'm currently running as a test:
from cv2 import cv2
import numpy as np
import yaml
import time
cam1 = cv2.VideoCapture(0)
cam2 = cv2.VideoCapture(1)
cam1.set(3, 1280)
cam1.set(4, 720)
cam2.set(3, 1280)
cam2.set(4, 720)
#load calibration matrix
with open ("calibration_matrix.yaml") as file:
documents = yaml.full_load(file)
x=0
for item, doc in documents.items():
if x == 0:
mtx = np.matrix(doc)
x = 1
else:
dist = np.matrix(doc)
def camera(ID, asd):
if asd == -1:
ID.release()
ret, frame = ID.read()
if ret:
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
undistortedFrame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
undistortedFrame = undistortedFrame[y:y+h, x:x+w]
return undistortedFrame
while True:
frame1 = camera(cam1, 0)
frame2 = camera(cam2, 0)
cv2.imshow('Frame 1', frame1)
cv2.imshow('Frame 2', frame2)
if cv2.waitKey(1) & 0xFF == ord('q'):
camera(cam1, -1)
camera(cam2, -1)
cv2.destroyAllWindows()
Comments above resolved, here's the solution:
As #Micka said,
use initundistortrectifymap() (once) and remap() (for each image)
initundistortrectifymap() basically takes the heavy load off of the undistort function (Micka) In practice, you run initundistortrectifymap() at the start of the program with the image calibration matrix and distance coefficients, and then initundistortrectifymap() returns two maps, map1 and map2.
These maps can be passed into the remap() function to remap your image, which is a significantly lighter function than undistort(). In my particular case, I have a fisheye camera, and OpenCV has fisheye modules that are optimized to undistort fish eye cameras with ease.
I am building a motion detector application.So for the motion detection algorithm to work, I converted the frames to grayscale so now the application is able to detect the motions.But when I try to put a text on the frame trying to post a message like "MOVING", even the text has turned gray and is hardly visible.How do I draw colored text on a video frame?
Below is my motion detection application code
import cv2
import numpy as np
from skimage.measure import compare_ssim
from twilio.rest import Client
#we can compare two images using Structural Similarity
#so a small change in pixel value won't prompt this method to term both images as dissimilar
#the closer the value is to 1,the more similar two images are
def ssim(A, B):
return compare_ssim(A, B, data_range=A.max() - A.min())
#capture a video either from a file or a live video stream
cap = cv2.VideoCapture(0)
first_frame = True
prev_frame = None
current_frame = None
#we keep a count of the frames
frame_counter = 0
while True:
if frame_counter == 0:
#prev_frame will always trail behind the current_frame
prev_frame = current_frame
#get a frame from the video
ret, current_frame = cap.read()
#if we reach the end of the video in case of a video file,stop reading
if current_frame is None:
break
#convert the image to grayscale
current_frame = cv2.cvtColor(current_frame,cv2.COLOR_BGR2GRAY)
if first_frame:
#for the first time prev_frame and current_frame will be the same
prev_frame = current_frame
first_frame = False
if frame_counter == 9:
#compare two images based on SSIM
ssim_val = ssim(current_frame, prev_frame)
print(ssim_val)
#if there is a major drop in the SSIM value ie it has detected an object
if ssim_val < 0.8:
# Here I want to put a colored text to the screen
cv2.putText(current_frame, "MOVING", (100, 300),
cv2.FONT_HERSHEY_TRIPLEX, 4, (255, 0, 0))
frame_counter = -1
#show the video as a series of frames
cv2.imshow("Motion Detection",current_frame) #(name of the window,image file)
frame_counter += 1
key = cv2.waitKey(1) & 0xFF #cv2.waitKey(1) returns a value of -1 which is masked using & 0xFF to get char value
if key == ord('q'): #gives ASCII value of 'q'
break
#release the resources allocated to the video file or video stream
cap.release()
#destroy all the windows
cv2.destroyAllWindows()
I searched online and I got this piece of code which basically suggests to convert grayscale back to BGR
backtorgb = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2RGB)
But this didn't work.I even took a copy of the current frame before it being converted to grayscale frame and then tried to write on the copied color frame but still the text comes gray and not colored.What should I do?
I am using opencv(cv2) in python to record videos(only video required) from multiple webcams simultaneously. Though they are not synchronized they record at a constant framerate. The problems are
They record at 4fps when resolution is set to 1080p, while desired is 30fps. The cameras I am using support this. While previewing though the framerate is 30fps which leads me to believe I may be doing some thing wrong while recording. I am using threading as in imutils library to get the videos as suggested in this blog post.
Is there anyway to synchronize the different camera outputs(videos).
PC specs:
Intel i5 7thgen,
8gb ddr3 ram,
SSD harddrive.
The webcams I'm using are Genius 32200312100 WideCam F100 USB 2.0 WebCam.
I do not think these are a limitation as I've been monitoring the CPU and memory usage while recording.
Any help is appreciated and if any further information is required please feel free to ask.
I'm open to using any encodings that will not compromise the quality of the picture.
Edit: I'm posting the code below.
class VideoRecorder():
#Function that runs once when the VideoRecorder class is called.
def __init__(self,cam_id = 0):
#Initialize the camera and set its properties
self.cam_id = cam_id
self.framerate = 30
self.video_height = 720
self.video_width = 1280
self.cap = cv2.VideoCapture(self.cam_id)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,self.video_height)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,self.video_width)
self.cap.set(cv2.CAP_PROP_FPS,self.framerate)
# Grab a initial frame, It is to warm up the camera.
# This frame will not be used
temp1, temp2 = self.cap.read()
#Set the place where the file is to be stored
storage_path = "{Output_folder}"
file_name = datetime.datetime.now().strftime("%m%d%Y_%H%M%S")
file_name = "cam_" + str(self.cam_id) + "_" + file_name + ".avi"
self.file = os.path.join(storage_path,file_name)
#Initialize a videowriter object to save the recorded frames to a file
self.fourcc = cv2.VideoWriter_fourcc(*'H264')
self.out = cv2.VideoWriter(self.file,self.fourcc,self.framerate,
(self.video_width,self.video_height))
def record(self, timer = 10):
#Start a timer for the recording to see how long the recording should be
self.start_time = time.time()
#Start a frame counter to calculate the framerate at the end
self.frame_count = 0
#Run an loop for given time to get frames from camera and store them
while(self.cap.isOpened()):
tora1 = time.time()
ret, frame = self.cap.read()
print("Time for reading the frame",time.time()-tora1)
if ret == True:
tora2 = time.time()
self.out.write(frame)
print("Time for write",tora2-time.time())
self.frame_count += 1
else:
break
current_time = time.time()
self.elapsed_time = current_time - self.start_time
if(self.elapsed_time > timer):
self.stop()
break
#Start the recording in a thread
def start(self, timer):
video_thread = threading.Thread(target = self.record, args = (timer,))
#video_thread.daemon = True
video_thread.start()
#Print the fps and release all the objects
def stop(self):
print("Frame count: %d"%self.frame_count)
self.fps = self.frame_count/self.elapsed_time
print(self.elapsed_time)
print("fps: %f"%self.fps)
self.out.release()
self.cap.release()
print("Done")
if __name__ == "__main__":
#Giving which arguments to pass to the script form command line ans
#getting them in a args structure
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--runtime", type = int, default = 10,
help = "TIme for which the videorecoder runs")
ap.add_argument("-id", "--cam_id", type=int, default= 0,
help="give camera id")
args = vars(ap.parse_args())
required_time = args["runtime"]
video_thread = []
for i in range(args["cam_id"]+1):
t = VideoRecorder(i)
video_thread.append(t)
t.start(required_time)