How to integrate barcode scanner with tkinter GUI application in python? - python

When I run the below code. The camera opens and we can read the barcode. What I need is that the camera window remains at the side of my Tkinter GUI application rather than poping up. Here's the code
from imutils.video import VideoStream
from pyzbar import pyzbar
import argparse
import datetime
from datetime import datetime
import imutils
import time
import cv2
import winsound
frequency = 600 # Set Frequency To 2500 Hertz
duration = 800 # Set Duration To 1000 ms == 1 second
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", type=str, default="barcodesData.csv",
help="path to output CSV file ")
args = vars(ap.parse_args())
print("Starting webcam")
vs = VideoStream(src=0).start()
time.sleep(2.0)
csvWrite = open(args["output"], "w")
found = set()
while True:
frameData = vs.read()
frameData = imutils.resize(frameData, width=600)
barcodes = pyzbar.decode(frameData)
for barcode in barcodes:
(x, y, width, height) = barcode.rect
cv2.rectangle(frameData, (x, y), (x + width, y + height), (0, 0, 255), 2)
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
textData = "{} ({})".format(barcodeData, barcodeType)
cv2.putText(frameData, textData, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
if barcodeData not in found:
csvWrite.write("{},{}\n".format(datetime.today().strftime('%Y-%m-%d'),
barcodeData))
csvWrite.flush()
found.add(barcodeData)
winsound.Beep(frequency, duration)
cv2.imshow("Barcode Scanner", frameData)
key = cv2.waitKey(1) & 0xFF
if key == ord("e"):
break
# close the output CSV file do a bit of cleanup
print("\nWait while we calculate cost...")
csvWrite.close()
cv2.destroyAllWindows()
vs.stop()
time.sleep(1.0)
TO be specific. I'm making a billing software where I can read the barcodes of the products and make a bill. The camera separate screen is annoying so if the camera is on any side of the canvas all the time. It would be more quick.

I encoded the IDs of each product/item in my database in a QR code. When that particular item is being sought for, I used CV2 to detect and decode the QR code.
Here is the code:
def encode_qr():
import qrcode
import random as r
item_code = r.randint(00000000,99999999)
data = item_code
qrfile = "qr_image_name_for_specified_item.png"
# generate qrcode
qrimage = qrcode.make(data)
# save image
fl = qrimage.save(qrfile)
print("Done generating qrcode")
def decode_qr():
import cv2
filename="qr_image_name_for_specified_item.png"
# alternatively webcam cv2.VideoCapture(0)
# read image
image = cv2.imread(filename)
# initialize qrcode detector
detector = cv2.QRCodeDetector()
# detect and decode
info, v_array, binary_qrcode=detector.detectAndDecode(image)
# if null?
if v_array is None:
print("No qrcode detected or probably some technical issues occurred")
else:
print("QRCODE data")
print(info)
# below i am working with the import sqlite3 as sql3
sqldb = sql3.connect("your_database.db")
cur = sqldb.cursor()
cur.execute("select * from table where ID=?, (info,))
rows = cur.fetchall()
for r in rows:
print(r) # this will loop all the item details with the itemcode
print(r[1]) # a specific detail with the item_code

Related

ROS and OpenCV: How can I convert a subscribed compressed image directly to OpenCV, and get new frame updates without using the video capture function

Context:
I am trying to process an image stream with a gesture recognition algorithm using ROS, OpenCV and Python. All the examples I've seen use OpenCV .cap directly from a webcam to process video, whereas I am inserting an image stream with a known image that updates at around 40fps.
Using a typical ROS2 image subscriber node, I convert a compressed image message that I'm sending to the device via a camera.
Then, I pass through the compressed image as an argument into my gesture recognition script. Once I do this, my code seems to get 'stuck' and the image data goes dark. I suspect it's because I've modified the while loop within my gesture recognition to try to accommodate new frames, except I know no way of reading in new frames to the gesture recognizer.
Please could anyone suggest any means of achieving my goal? (passing the compressed_img_msg straight into opencv video processing and getting frame by frame updates still without using build in video functionality)?
See code for ROS2 image node and gesture recognition main below:
My ROS2 compressed_image node is below:
////////////////////////////////////////////////////////////////////////////////////////////
compressed_image node
////////////////////////////////////////////////////////////////////////////////////////////
class CompressedImageSubscriber(Node):
def __init__(self, topic):
super().__init__("image_view_sub_node")
qos_policy = rclpy.qos.QoSProfile(reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT,
history=rclpy.qos.HistoryPolicy.KEEP_LAST,
depth=1)
self.sub = self.create_subscription(CompressedImage, topic,
self.subscriber_callback, qos_profile=qos_policy)
self.received_msg = False
def subscriber_callback(self, compressed_image_msg):
if self.received_msg == False:
print("Image Data Received... Displaying Image in CV2 window")
self.received_msg = True
#this is the unprocessed image decoded from Unity
subscribed_image = CvBridge().compressed_imgmsg_to_cv2(compressed_image_msg, desired_encoding="bgr8")
cv2.imshow("CompressedImage", subscribed_image)
#pass the unprocessed image into the gesture recognition app
cv2.waitKey(1)
gesture_app.main(subscribed_image)
cv2.waitKey(1)
#call the gesture script, pass throuigh image as an argument here
#main code
def main():
rclpy.init()
topic_name = input("what is the topic name?: ")
my_sub = CompressedImageSubscriber(topic_name)
print("Waiting for data to be publisher over topic...")
try:
rclpy.spin(my_sub)
except KeyboardInterrupt:
my_sub.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
//////////////////////////////////////////////////////////////////////////////////////////
As you can see, once the image is decoded I pass through it to the gesture algorithm as an argument. The main script for this can be seen below:
/////////////////////////////////////////////////////////////////////////////////////////
GESTURE RECOGNITION MAIN FUNCTION
(source code: https://github.com/kinivi/hand-gesture-recognition-mediapipe)
/////////////////////////////////////////////////////////////////////////////////////////
def main(subscribed_image):
# Argument parsing #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
use_static_image_mode = args.use_static_image_mode
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
use_brect = True
# Model load #############################################################
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=use_static_image_mode,
max_num_hands=1,
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
keypoint_classifier = KeyPointClassifier()
point_history_classifier = PointHistoryClassifier()
# Read labels ###########################################################
with open('model/keypoint_classifier/keypoint_classifier_label.csv',
encoding='utf-8-sig') as f:
keypoint_classifier_labels = csv.reader(f)
keypoint_classifier_labels = [
row[0] for row in keypoint_classifier_labels
]
with open(
'model/point_history_classifier/point_history_classifier_label.csv',
encoding='utf-8-sig') as f:
point_history_classifier_labels = csv.reader(f)
point_history_classifier_labels = [
row[0] for row in point_history_classifier_labels
]
# FPS Measurement ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
# Coordinate history #################################################################
history_length = 16
point_history = deque(maxlen=history_length)
# Finger gesture history ################################################
finger_gesture_history = deque(maxlen=history_length)
# ########################################################################
mode = 0
# Camera preparation ###############################################################
#cap = subscribed_image
cap = subscribed_image
# cap = cv.VideoCapture()
while True:
fps = cvFpsCalc.get()
# Process Key (ESC: end) #################################################
key = cv.waitKey(10)
if key == 27: # ESC
break
number, mode = select_mode(key, mode)
# Camera capture #####################################################
image = cap
if image is None:
break
image = cv.flip(image, 1) # Mirror display
debug_image = copy.deepcopy(image)
# Detection implementation #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
# ####################################################################
if results.multi_hand_landmarks is not None:
for hand_landmarks, handedness in zip(results.multi_hand_landmarks,
results.multi_handedness):
# Bounding box calculation
brect = calc_bounding_rect(debug_image, hand_landmarks)
# Landmark calculation
landmark_list = calc_landmark_list(debug_image, hand_landmarks)
# Conversion to relative coordinates / normalized coordinates
pre_processed_landmark_list = pre_process_landmark(
landmark_list)
pre_processed_point_history_list = pre_process_point_history(
debug_image, point_history)
# Write to the dataset file
logging_csv(number, mode, pre_processed_landmark_list,
pre_processed_point_history_list)
# Hand sign classification
hand_sign_id = keypoint_classifier(pre_processed_landmark_list)
if hand_sign_id == 2: # Point gesture
point_history.append(landmark_list[8])
else:
point_history.append([0, 0])
# Finger gesture classification
finger_gesture_id = 0
point_history_len = len(pre_processed_point_history_list)
if point_history_len == (history_length * 2):
finger_gesture_id = point_history_classifier(
pre_processed_point_history_list)
# Calculates the gesture IDs in the latest detection
finger_gesture_history.append(finger_gesture_id)
most_common_fg_id = Counter(
finger_gesture_history).most_common()
# Drawing part
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
debug_image = draw_landmarks(debug_image, landmark_list)
debug_image = draw_info_text(
debug_image,
brect,
handedness,
keypoint_classifier_labels[hand_sign_id],
point_history_classifier_labels[most_common_fg_id[0][0]],
)
else:
point_history.append([0, 0])
debug_image = draw_point_history(debug_image, point_history)
debug_image = draw_info(debug_image, fps, mode, number)
# Screen reflection #############################################################
cv.imshow('Hand Gesture Recognition', debug_image)
t = 1
blank_image = np.zeros(shape=[512, 512, 3], dtype=np.uint8)
image = blank_image
cv.destroyAllWindows()
Please could anyone advise, or have any ideas to another means of achieving this?

method DESCRIBE failed: 453 Not Enough Bandwidth - OpenCv python

I' m trying to open two rtsp streams using opencv python, when I open only one stream I don't get this error, but when I try to open two or more streams the error occurs. My server is a NVR.
import os
from imutils.video import VideoStream, FileVideoStream, FPS
import cv2
import numpy as np
data1= "20201229T171900z"
data2= "20201229T172200z"
rtsp1 = "rtsp://user:pass#192.168.0.4:554/streaming/tracks/101?"
rtsp1 = rtsp1 + "starttime="+data1+"&endtime="+data2
rtsp2 = "rtsp://user:pass#192.168.0.4:554/streaming/tracks/201?"
rtsp2 = rtsp2 + "starttime="+data1+"&endtime="+data2
vs1 = VideoStream(rtsp1).start()
vs2 = VideoStream(rtsp2).start()
while(True):
frame1 = vs1.read()
frame2 = vs2.read()
frame1 = cv2.resize(frame1, (400,400))
frame2 = cv2.resize(frame2, (400,400))
esquerda = np.concatenate((frame1, frame2), axis=0)
cv2.imshow('img', esquerda)
key = cv2.waitKey(1)
if key == ord("q"):
break
vs1.stop()
vs2.stop()

OpenCV(4.1.1) can't find starting number (in the name of file): (camera_stream_Url) in function 'cv::icvExtractPattern'""

>> my py script is running fine but when i created .py to .exe the below error is generated.
OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\videoio\src\cap_images.cpp:253: error: (-5:Bad argument) CAP_IMAGES: can't find starting number (in the name of file): http://admin:admin#192.168.1.86/axis-cgi/mjpg/video.cgi in function 'cv::icvExtractPattern'
My py script is below and its generate.
import face_recognition
import cv2
import time
import requests
import json
from http.client import responses
from datetime import datetime
from datetime import timedelta
import sys
from truepy import License
from getmac import get_mac_address as gma
import easygui
# global variables
ChangeLayoutTime = time.localtime()
IsApiCall = False
currentTime = datetime.now()
afterTime=datetime.now()
layout_duration=0
#import json file parameters here
with open('config.json', 'r') as f:
distros_dict = json.load(f)
XiboClient_id = distros_dict['client_id']
XiboClient_secret=distros_dict["client_secret"]
XiboUrl=distros_dict["url"]
XiboDisplaygroup=distros_dict["displaygroup"]
XiboLayoutId=distros_dict["layoutId"]
IpCamStreamUrl=distros_dict["ipCamStreamUrl"]
playerIpAddress=distros_dict["playerIpAddress"]
print(IpCamStreamUrl)
# print(xiboclient_id)
# print(xiboclient_secret)
# print(xibourl)
# print(xibodisplaygroup)
# print(xibolayoutid)
# sys.exit()
# url = "'"+XiboClient_id+"'"
# print(url)
#check lic
def validateLicense():
# Load the certificate
with open('certificate.pem', 'rb') as f:
certificate = f.read()
# Load the license
with open('license.lic', 'rb') as f:
license = License.load(f, b'stech_lic_78324')
# Verify the license; this will raise License.InvalidSignatureException if
# the signature is incorrect
license.verify(certificate)
if license.data.extra != gma(ip=playerIpAddress):
print(license.data.extra)
print(gma(ip=playerIpAddress))
# shutdown the application and notify user about invalid license
easygui.msgbox("Your camera module license is invalid please contact to S-Tech team", title="Invalid License!")
sys.exit()
def __draw_label(img, text, pos, bg_color):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = .4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(IpCamStreamUrl)
# video_capture = cv2.VideoCapture("http://192.168.1.20:8080/video")
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
if ret ==False:
break
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0,0),fx=1,fy=1,interpolation = cv2.INTER_AREA)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="hog")
print(face_locations)
if face_locations is ():
break
if datetime.now().strftime("%S") == afterTime.strftime("%S"):
IsApiCall=False
# Display the results
i=0
genders=[]
ages=()
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
height,width = face_image.shape[:2]
if width>144:
image='face{}.jpg'.format(i)
cv2.imwrite(image,face_image)
if datetime.now().strftime("%S") == afterTime.strftime("%S"):
IsApiCall=False
# customization by basit for layout change digisign player
if IsApiCall is False:
#check License
validateLicense()
# Access Token api
url = "http://"+XiboUrl+"/api/authorize/access_token"
data = {
'client_id': XiboClient_id,
'client_secret': XiboClient_secret,
'grant_type': 'client_credentials'
}
response = requests.request("POST", url, data=data)
obj=json.loads(response.text)
temp = obj["access_token"]
# Change layout Api
url = "http://"+XiboUrl+"/api/displaygroup/"+XiboDisplaygroup+"/action/changeLayout?envelope=1"
data = {
'layoutId': XiboLayoutId,
'changeMode': 'replace'
}
headers = {
'Authorization': 'Bearer '+temp
}
response = requests.request("POST", url, headers=headers, data=data)
print("Layout change = Success")
IsApiCall = True
# Get layout duration api
url = "http://"+XiboUrl+"/api/layout/status/"+XiboLayoutId+"?envelope=1"
headers = {
'Authorization': 'Bearer '+temp
}
response = requests.request("GET", url, headers=headers)
objj=json.loads(response.text)
temp1 = objj["data"]
layout_duration=temp1["duration"]
print("Current layout duration is " +str(layout_duration)+ " second")
currentTime = datetime.now()
afterTime = (datetime.now() + timedelta(seconds=layout_duration+10))
print("Next layout change after "+afterTime.strftime("%H")+":"+afterTime.strftime("%M")+":"+afterTime.strftime("%S"))
else:
currentTime=(datetime.now())
if (currentTime.strftime("%S") == afterTime.strftime("%S")):
IsApiCall = False
#For Vedio display window
cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

Python : upload picture without disturbing normal flow of a program

In my code I am constantly grabbing frames from a camera to check if there is any human body present or not. whenever there is a human, crop the body and upload it on server. and keep doing this.
PROBLEM: Whenever I start a thread to upload photo to server my program execution stops and wait for the upload thread to finish. I don't want my program execution to stop and wait. I want it to run without stopping. I want to start a separate thread to upload photo that runs parallel, does its job without disturbing normal flow and finishes after it. it should do this every time there is a body detected.
# USAGE
# python detect.py --images images
# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import time
import threading
import Queue
import multiprocessing
import requests
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
from urllib2 import Request, urlopen, URLError
import Queue
import urllib
import traceback
size = 2
i=0
#Queues to store data
queue_FACES = multiprocessing.Queue()
(im_width, im_height) = (112, 112)
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Capture Camera Stream
#webcam = cv2.VideoCapture('/home/irum/Desktop/WIN_20170529_09_53_13_Pro.mp4')
webcam = cv2.VideoCapture(0)
#h=4.27 w=4.29 AVG = 4.28
# Upload to server
def upload_internet(filename2,sampleFile,check_path1):
#print("upoading....")
filename2 = filename2+'.jpg'
#print (filename2)
register_openers()
datagen, headers = multipart_encode({"sampleFile": open(sampleFile), "name": filename2})
#request = urllib2.Request("http://videoupload.hopto.org:5000/api/Sync_log", datagen, headers)
request = urllib2.Request("http://videoupload.hopto.org:5002/api/Synclog", datagen, headers)
try:
#print ("***UPLOAD SERVER RESPONSE***")
response = urllib2.urlopen(request)
html=response.read()
print ("html ",html)
#resp = json.loads(html)
# with open('output_file.txt', "wb") as code: #CHANGE PATH
# code.write(curr_time+"\n"+html +"\n")
except URLError , e:
if hasattr(e, 'reason'):
#print ('We failed to reach a server.')
print ('Reason: ', e.reason)
elif hasattr(e, 'code'):
#print ('The server couldn\'t fulfill the request.')
print ('Error code: ', e.code)
except Exception:
print ('generic exception: ' + traceback.format_exc())
while True:
# read each frame
ret, frame = webcam.read()
# resize it
image = imutils.resize(frame, width=min(300, frame.shape[1]))
orig = image.copy()
# detect people in the frame
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# draw the original bounding boxes
for i in range(len(rects)):
body_i = rects[i]
(x, y, w, h) = [v * 1 for v in body_i]
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# apply non-maxima suppression
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
for i in range(len(rects)):
body_i = rects[i]
(xA, yA, xB, yB) = [int(v * 1) for v in body_i]
# rect on scaled image
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
# rects to map on original frame
(x1, y1, w1, h1) = [int(v * 4.28) for v in body_i]
cv2.rectangle(frame, (x1, y1), (w1, h1), (0, 45, 255), 2)
# Crop body from Original frame
body_big = frame[y1:y1+h1, x1:x1+w1]
# Save body
save_body_path = '/home/irum/Desktop/pedestrian-detection/BIG_BODY'
cur_date = (time.strftime("%Y-%m-%d"))
cur_time = (time.strftime("%H:%M:%S"))
new_pin =cur_date+"-"+cur_time
filename1 = 'BIG'
filename2 = str(filename1)+"-"+str(new_pin)
print ("filename2",filename2)
sampleFile = ('%s/%s.jpg' % (save_body_path, filename2))
print ("sampleFile",sampleFile)
cv2.imwrite('%s/%s.jpg' % (save_body_path, filename2), body_big)
# upload body
upload_process = threading.Thread(target=upload_internet(filename2,sampleFile,save_body_path))
upload_process.start()
# show the output images
cv2.imshow("Before NMS", orig)
cv2.imshow("After NMS", image)
cv2.imshow("BIG BODY", frame)
# cv2.imshow("FACE", body_big2)
key = cv2.waitKey(10)
if key == 27:
break
Correction:
Use cThread = threading.Thread( target= , args=() ) to define a
new thread instance
Use cThread.start() to launch it, of course you don't have join since your process is continuous.
simplified code so I could test-run it at my end:
import time
import threading
import multiprocessing
from time import sleep
def upload_internet(filename,sampleFile,check_path):
print ("//// WAITING FOR SERVER RESPONSE")
time.sleep(3)
print ("RECEIVED SERVER RESPONSE \\\\\\")
filename = "filename"
sampleFile = "sampleFile"
save_body_path = "save_body_path"
key = 1
while True:
rects = range(0,10)
# draw the original bounding boxes
range_len_rects = range(len(rects))
for i in range_len_rects:
print("Main starts")
rects = range(0,10)
thread_list = []
for i in range_len_rects:
# upload body
thread_list.append ( threading.Thread( target=upload_internet, args=( filename + "-" + str(i) ,sampleFile,save_body_path) ) )
thread_list[i].start()
print ("Exiting Launch Thread loop :"+ str(i) + "/" + str(range_len_rects[i]) )
print("Main sleep for 10 seconds")
time.sleep(10);
if key == 27:
break
PS: Remember the thread is not destroyed and you must ensure the upload_internet() doesn't stuck in memory for any reason, or you can control number of instance you have and set cap and manage zombie threads to avoid process crash and bad memory management

How can i find optical flow of a live video stream (Android IPWebcam) in OpenCV Python?

I have written a code which works fine for a recorded video or video from webcam of my laptop.. but i need it to work with video directly from my phone.. Now i have been able to get video from phone and show it through python. But i need frames of this video to apply OpticalFlow function on it. (calcOpticalFlowFarneback).. Here's my two codes. i'll be thankful for your help.
This is code for getting video from android phone's camera to python
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
cv2.imshow(hoststr,i)
if cv2.waitKey(1) ==27:
exit(0)
And this one is relevant portion for motion flow, notice i need frames from video
vid=cv2.VideoCapture('vidaflv.flv')
ret, frame = vid.read()
imgrayp = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
while True:
ret, frame = vid.read()
if ret:
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
imgrayp=imgray
if cv2.waitKey(1)==ord('e'):
break
what i can't figure out is how to get a frame from live stream video code to put in my optical flow fuction..
Withourt being able to test this, I'd try something like:
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
FirstTime=True
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
if FirstTime=True:
FirstTime=False
imgrayp = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
imgray = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
if cv2.waitKey(1) ==27:
exit(0)

Categories