Opencv two camera source - python

I am working with opencv and have two video source. I am using the following code. The code works sometimes and sometimes it does not work. Is there a problem with my code. How can I make the amends...
import cv2
Channel0 = cv2.VideoCapture(0)
IsOpen0, Image0 = Channel0.read()
Channel1 = cv2.VideoCapture(1)
IsOpen1, Image1 = Channel1.read()
while IsOpen0 and IsOpen1:
IsOpen0, Image0 = Channel0.read()
IsOpen1, Image1 = Channel1.read()
cv2.imshow("Webcamera",Image0)
cv2.imshow("Panasonic",Image1)
cv2.waitKey(10)
PS It always works when I use only one video source.

I think I figured out my error. For some reason the following code works. It must have been problem with threading...
import thread
import time
import cv2
def Webcamera():
Channel0 = cv2.VideoCapture(0)
IsOpen0, Image0 = Channel0.read()
while IsOpen0:
IsOpen0, Image0 = Channel0.read()
cv2.imshow("Webcamera",Image0)
cv2.waitKey(10)
if not IsOpen0:
time.delay(0.5)
print "Error opening Web camera"
def Panasonic():
Channel1 = cv2.VideoCapture(1)
IsOpen1, Image1 = Channel1.read()
while IsOpen1:
IsOpen1, Image1 = Channel1.read()
cv2.imshow("Panasonic",Image1)
cv2.waitKey(10)
if not IsOpen1:
time.sleep(0.5)
print "Error opening Panasonic"
try:
thread.start_new_thread(Webcamera,())
thread.start_new_thread(Panasonic,())
except:
print "Error: unable to start thread"
while 1:
pass

Related

Mediapipe process() first 'self' argument

I am trying to use mediapipe to track hands. I am using Python 3.7.9 on Windows 10, my code is below:
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(0)
while (True):
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = mp_hands.Hands.process(imgRGB)
print(result)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
I'm getting this error:
Traceback (most recent call last):
File "C:/Users/Tomáš/PycharmProjects/pythonProject/hand_detect.py", line 11, in <module>
results = mp_hands.Hands.process(imgRGB)
TypeError: process() missing 1 required positional argument: 'image'
[ WARN:1] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Error says that I need to pass one more argument 'self' before I pass argument 'image'. I've been browsing a lot and every related code doesnt use first argument in the process() function. Could anyone help me solve this error?
The problem is that you do not create an object of mp_hands.Hands before you want to process it. The following code solves it and prints some results. By the way, this was well documentated in the documentation link i commented before.. :
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # i had problems before reading webcam feeds, so i added cv2.CAP_DSHOW here
while True:
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# you have to create an object of mp_hands.Hands to get results
# alternatively you could do: results = mp_hands.Hands().process(imgRGB)
with mp_hands.Hands() as hands:
results = hands.process(imgRGB)
# continue loop if no results were found
if not results.multi_hand_landmarks:
continue
# print some results
for hand_landmarks in results.multi_hand_landmarks:
print(
f'Index finger tip coordinates: (',
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x}, '
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y})'
)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
Edit:
This is more or less the same code from here:
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
# initialize webcam
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
with mp_hands.Hands(model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Flip the image horizontally for a selfie-view display.
cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()

Opencv giving a C++ exception error form my python code

While making a barcode scanner I am facing the issue with opencv. \
My code is:
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
video = cv2.VideoCapture(1)
while True:
cap, frame = video.read()
rot = cv2.rotate(frame, cv2.ROTATE_180)
decodedObjects = pyzbar.decode(rot)
for obj in decodedObjects:
print("Product", obj.data)
cv2.imshow("Frame", rot)
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows
When I execute this code, it gives me an error.
cap, frame = video.read()
cv2.error: Unknown C++ exception from OpenCV code
All kinds of support will be appreciated.

Boto3 Kinesis Video GetMedia and OpenCV

I'm trying to use Boto3 to get a video stream from kinesis and then use OpenCV to display the feed and save it to a file at the same time.
The process of getting the signed URL and then the Getmedia request seems to work perfectly it's just when I'm trying to render it using OpenCV it doesn't seem to work.
Data is defiantly going to the stream
import boto3
import numpy as np
import cv2
kinesis_client = boto3.client('kinesisvideo',
region_name='eu-west-1',
aws_access_key_id='ACC',
aws_secret_access_key='KEY'
)
response = kinesis_client.get_data_endpoint(
StreamARN='ARN',
APIName='GET_MEDIA'
)
video_client = boto3.client('kinesis-video-media',
endpoint_url=response['DataEndpoint']
)
stream = video_client.get_media(
StreamARN='ARN',
StartSelector={'StartSelectorType': 'NOW'}
)
# print(stream)
datafeed = stream['Payload'].read()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(True):
ret, frame = stream['Payload'].read()
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
To finally answer this I found a basic solution using the HLS Output available form kineses video streams. Which became available JUL 2018
Blog post: https://aws.amazon.com/blogs/aws/amazon-kinesis-video-streams-adds-support-for-hls-output-streams/?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+AmazonWebServicesBlog+%28Amazon+Web+Services+Blog%29
Ive pasted the working version of my code below.
Im using AWS ENV varables for the BOTO3 authtication.
import boto3
import cv2
STREAM_NAME = "test-stream"
kvs = boto3.client("kinesisvideo")
# Grab the endpoint from GetDataEndpoint
endpoint = kvs.get_data_endpoint(
APIName="GET_HLS_STREAMING_SESSION_URL",
StreamName=STREAM_NAME
)['DataEndpoint']
print(endpoint)
# # Grab the HLS Stream URL from the endpoint
kvam = boto3.client("kinesis-video-archived-media", endpoint_url=endpoint)
url = kvam.get_hls_streaming_session_url(
StreamName=STREAM_NAME,
PlaybackMode="LIVE"
)['HLSStreamingSessionURL']
vcap = cv2.VideoCapture(url)
while True:
# Capture frame-by-frame
ret, frame = vcap.read()
if frame is not None:
# Display the resulting frame
cv2.imshow('frame',frame)
# Press q to close the video windows before it ends if you want
if cv2.waitKey(22) & 0xFF == ord('q'):
break
else:
print("Frame is None")
break
# When everything done, release the capture
vcap.release()
cv2.destroyAllWindows()
print("Video stop")

Taking snapshot with a webcam (either external or internal) with python using opencv

am new here and also new to opencv.
I have this project at hand - designing an application that is able to interface with my computer webcam and take snapshot and also record videos.
So far so good this as far as I can go
import cv2 as cv
import numpy
cv.namedWindow ("camera", 1)
capture = cv.VideoCapture (0)
while True:
ret, frame = capture.read ()
img = cv.cvtColor (frame, cv.COLOR_BGR2BGRA)
cv.imshow ("camera", img)
if cv.waitKey(10) & 0XFF == ord ("q")
break
capture.release ()
cv.destroyAllWindows ()
Now I think am suppose to use cv.VideoCapture.grab ()
And cv.VideoCapture.retrieve ()
But honestly I don't know how am gonna use.
Please I need your HELP
There's a great example of how to do that here:
http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html
The example below will take a snapshot every 30 seconds and save it to a file with a unique timestamp:
import cv2 as cv
import time
import datetime
cv.namedWindow("camera", 1)
capture = cv.VideoCapture(0)
while True:
ret, frame = capture.read ()
frame = cv.cvtColor (frame, cv.COLOR_BGR2BGRA)
file = "C:\Python34\CpV\%s.png" % datetime.datetime.now().strftime("%d-%m-%y--%H-%M-%S")
cv.imwrite (file, frame)
#cv.imshow("camera", frame)
time.sleep(30)
capture.release()
cv.destroyAllWindows ()
Use code below to record webcam capture to file :
import cv2
import cv
cap = cv2.VideoCapture(0)
ret,img=cap.read()
height , width , layers = img.shape
fps=20
video = cv2.VideoWriter("rec_out.avi", cv.CV_FOURCC(*'DIVX'), fps, (img.shape[1], img.shape[0]))
while True:
ret,img=cap.read()
height , width , layers = img.shape
video.write(img)
cv2.imshow('Video', img)
#video.write(img)
if(cv2.waitKey(10) & 0xFF == ord('b')):
break
Okay I got to edit my code and it worked
Check it out
import cv2 as cv
import time
cv.namedWindow("camera", 1)
capture = cv.VideoCapture(0)
while True:
ret, frame = capture.read ()
frame = cv.cvtColor (frame, cv.COLOR_BGR2BGRA)
file = "C:\Python34\CpV\test.png"
cv.imwrite (file, frame)
cv.imshow("camera", frame)
#it takes a snapshot when "q" is pressed and closes
the window
if cv.waitKey(10) & 0xFF == ord ('q'):
break
capture.release()
cv.destroyAllWindows ()
This is as far as I have gotten, I need to edit a little to make it short and concise.

How can i find optical flow of a live video stream (Android IPWebcam) in OpenCV Python?

I have written a code which works fine for a recorded video or video from webcam of my laptop.. but i need it to work with video directly from my phone.. Now i have been able to get video from phone and show it through python. But i need frames of this video to apply OpticalFlow function on it. (calcOpticalFlowFarneback).. Here's my two codes. i'll be thankful for your help.
This is code for getting video from android phone's camera to python
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
cv2.imshow(hoststr,i)
if cv2.waitKey(1) ==27:
exit(0)
And this one is relevant portion for motion flow, notice i need frames from video
vid=cv2.VideoCapture('vidaflv.flv')
ret, frame = vid.read()
imgrayp = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
while True:
ret, frame = vid.read()
if ret:
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
imgrayp=imgray
if cv2.waitKey(1)==ord('e'):
break
what i can't figure out is how to get a frame from live stream video code to put in my optical flow fuction..
Withourt being able to test this, I'd try something like:
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
FirstTime=True
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
if FirstTime=True:
FirstTime=False
imgrayp = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
imgray = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
if cv2.waitKey(1) ==27:
exit(0)

Categories