python pyparrot image processing question - python

I'm trying to build code that wants to fly a drone with a camera with demoMamboVisionGUI.py below. When the code is executed, the camera screen comes up and press the button to start the flight. The code above displays four cam screens and detects a straight line while detecting a specified color value, blue (BGR2HSV). Using these two codes, the camera recognizes the blue straight line and flies forward little by little, and turns left and right at a certain angle, recognizes the bottom of the specified color (red), lands, and starts flying with another button. I want to make a code that recognizes green and lands. I would appreciate it if you could throw a simple hint.
enter image description here
import cv2
import numpy as np
def im_trim(img):
x = 160
y = 50
w = 280
h = 180
img_trim = img[y:y + h, x:x + w]
return img_trim
def go():
minimum = 9999;
min_theta=0;
try:
cap = cv2.VideoCapture(0)
except:
return
while True:
ret, P = cap.read()
img1 = P
cv2.imshow('asdf',img1)
img_HSV = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img_h, img_s, img_v = cv2.split(img_HSV)
cv2.imshow("HSV", img_HSV)
lower_b = np.array([100, 80, 100])
upper_b = np.array([120, 255, 255])
blue = cv2.inRange(img_HSV, lower_b, upper_b)
cv2.imshow('root',blue)
edges = cv2.Canny(blue, 50, 150, apertureSize =3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold = 100)
if lines is not None:
for line in lines:
r, theta = line[0]
#if (r<minimum and r>0) and (np.rad2deg(theta)>-90 and np.rad2deg(theta)<90):
#minimum = r
#min_theta = theta
#if (r > 0 and r < 250) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=-7, yaw=0, vertical_movement=0,
# duration=1)
#print("right")
#elif (r > 400 and r < 650) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=7, yaw=0, vertical_movement=0,
# duration=1)
print(r, np.rad2deg(theta))
#이하 if문을 while 문을 통해 반복하여 길 경로를 직진경로로 만든 이후 진행
#if(np.rad2deg(min_theta)>=몇도이상 or 이하):
# 이하 -> 왼쪽턴, 이상 -> 오른쪽턴, 사이 -> 직진
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img1, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.imshow('hough',img1)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
go()
print("??")
================================================================================================
"""
Demo of the Bebop vision using DroneVisionGUI that relies on libVLC. It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Minidrone import Mambo
from pyparrot.DroneVisionGUI import DroneVisionGUI
import cv2
# set this to true if you want to fly for the demo
testFlying = True
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
# print("in save pictures on image %d " % self.index)
img = self.vision.get_latest_valid_picture()
if (img is not None):
filename = "test_image_%06d.png" % self.index
# uncomment this if you want to write out images every time you get a new one
#cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_mambo_user_vision_function(mamboVision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
mambo = args[0]
if (testFlying):
print("taking off!")
mambo.safe_takeoff(5)
if (mambo.sensors.flying_state != "emergency"):
print("flying state is %s" % mambo.sensors.flying_state)
print("Flying direct: going up")
mambo.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=15, duration=2)
print("flip left")
print("flying state is %s" % mambo.sensors.flying_state)
success = mambo.flip(direction="left")
print("mambo flip result %s" % success)
mambo.smart_sleep(5)
print("landing")
print("flying state is %s" % mambo.sensors.flying_state)
mambo.safe_land(5)
else:
print("Sleeeping for 15 seconds - move the mambo around")
mambo.smart_sleep(15)
# done doing vision demo
print("Ending the sleep and vision")
mamboVision.close_video()
mambo.smart_sleep(5)
print("disconnecting")
mambo.disconnect()
if __name__ == "__main__":
# you will need to change this to the address of YOUR mambo
mamboAddr = "B0:FC:36:F4:37:F9"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
print("Preparing to open vision")
mamboVision = DroneVisionGUI(mambo, is_bebop=False, buffer_size=200,
user_code_to_run=demo_mambo_user_vision_function, user_args=(mambo, ))
userVision = UserVision(mamboVision)
mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
mamboVision.open_video()
==========================================================================

Related

how coping video depend on bounding box after face detection?

I faced an issue when I try to crop video depending on the bounding box after detecting the face so this is the sub of my code, pleases look for it and I hope you can help me if there is any code similar to my issues please tell me
Hi, I faced an issue when I try to crop video depending on the bounding box after detecting the face so this is the sub of my code, pleases look for it and I hope you can help me if there is any code similar to my issues please tell me
for entry in words_data:
# Extract speech to text data
print('entry:', type(entry), entry)
s_sec, s_millisec = divmod(float(entry['start']), 1)
e_sec, e_millisec = divmod(float(entry['end']), 1)
s_min = 0
e_min = 0
s_millisec = s_millisec * 1000
e_millisec = e_millisec * 1000
print('s_sec, s_millisec:', s_sec, s_millisec)
if s_sec >= 60:
s_min = math.floor(s_sec / 60.0)
s_sec = s_sec % 60
if e_sec >= 60:
e_min = math.floor(e_sec / 60.0)
e_sec = e_sec % 60
# Determine video frames involved in stt entry
min_frame = s_min*fps*60 + (s_sec*fps)
max_frame = e_min*fps*60 + (e_sec*fps)
# go to min_frame
cap.set(cv2.CAP_PROP_POS_FRAMES, min_frame)
frame_count = min_frame
# read frames from min_frame to max_frame
num_people = 0
valid_video = True
bbx = []
bby = []
bbh = []
bbw = []
bbx1 = []
bby1 = []
bbx2 = []
bby2 = []
landmarks = []
angles = []
x = []
y = []
w = []
h = []
consecutive_frames_no_people = 0
while frame_count < max_frame:
if count == 0:
t = cv2.getTickCount()
# capture next frame
ret, frame = cap.read()
if not ret:
continue
#frame = cv2.resize(frame,(0, 0), fx=scale, fy=scale,interpolation=cv2.INTER_LINEAR)
#frame = cv2.resize(frame, (480, 640),interpolation=cv2.INTER_LINEAR)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
num_people = hog_face_detector(gray,1)
# if it detects less than or more than 1 person
# go to next subtitle
if len(num_people) != 1:
consecutive_frames_no_people += 1
if consecutive_frames_no_people >= max_bad_frames:
print(consecutive_frames_no_people,
' frames without 1 person. Skiping to next subtitle')
valid_video = False
break
## if only one person in the scene
if len(num_people) == 1:
consecutive_frames_no_people = 0
rects = hog_face_detector(gray,1)
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = dlib_facelandmark(gray, rect)
shape = face_utils.shape_to_np(shape)
#bb = bounding_boxes[0]
#x1, y1 = int(rect.left()), int(rect.top())
#x2, y2 = int(rect.right()), int(rect.bottom())
#area = (x2 - x1) * (y2 - y1)
#if area < min_area:
# valid_video = False
# break
#save bounding box coordinates for final crop
#bbx1.append(bb[0])
#bby1.append(bb[1])
#bbx2.append(bb[2])
#bby2.append(bb[3])
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
bb = face_utils.rect_to_bb(rect)
#(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(frame, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), (0, 255, 0), 2)
#save bounding box coordinates for final crop
bbx1.append(bb[0])
bby1.append(bb[1])
bbx2.append(bb[2])
bby2.append(bb[3])
# Put fps at which we are processing camera feed on frame
cv2.putText(frame, "{0:.2f}-fps".format(fps_processing),
(50, height-50), cv2.FONT_HERSHEY_COMPLEX,
1, (0, 0, 255), 2)
# Display the image
cv2.imshow('Vid',frame)
# Read keyboard and exit if ESC was pressed
k = cv2.waitKey(1) & 0xFF
if k ==27:
exit()
elif k == ord('q'):
stop_videos = True
# increment frame counter
count = count + 1
# calculate fps at an interval of 100 frames
if (count == 30):
t = (cv2.getTickCount() - t)/cv2.getTickFrequency()
fps_processing = 30.0/t
count = 0
# if this was a valid video
#if valid_video and len(landmarks) > 0:
# num_output_video += 1
#entry['mouth3d'] = landmarks
#entry['angle'] = angles
if valid_video and len(bb) > 0:
num_output_video += 1
bbx1 = np.amin(bbx1)
bbx2 = np.amax(bbx2)
bby1 = np.amin(bby1)
bby2 = np.amax(bby2)
bbw = bbx2 - bbx1
bbh = bby2 - bby1
entry['bounding_box'] = [bb[0], bb[1], bb[2], bb[3]]
entry['landmark'] = bb
print('entry:', type(entry), entry)
if save_videos:
s_hr = 0
e_hr = 0
if s_min >= 60:
s_hr = math.floor(s_min / 60)
s_min = s_min % 60
if e_min >= 60:
e_hr = math.floor(e_min / 60)
e_min = e_min % 60
# cut and crop video
# ffmpeg -i input.mp4 -ss hh:mm:ss -filter:v crop=w:h:x:y -c:a copy -to hh:mm:ss output.mp4
ss = "{0:02d}:{1:02d}:{2:02d}.{3:03d}".format(
s_hr, s_min, int(s_sec), math.ceil(s_millisec))
es = "{0:02d}:{1:02d}:{2:02d}.{3:03d}".format(
e_hr, e_min, int(e_sec), math.ceil(e_millisec))
crop = "crop={0:1d}:{1:1d}:{2:1d}:{3:1d}".format(
bbw, bbh, bbx1, bby1)
out_name = os.path.join(output_dir, str(num_output_video))
subprocess.call(['ffmpeg', #'-hide_banner', '-loglevel', 'panic',
'-i', os.path.join(
videos_directory, vids_name, video_name),
'-ss', ss,
'-filter:v', crop, '-c:a', 'copy',
'-to', es, out_name +'.mp4'])
# save recognized speech
text_file = open(out_name +'.txt', "w")
text_file.write(entry['text'] + '\n')
text_file.write(str(entry['conf']))
text_file.close()
and this is output, so, how i can write the final BB for the video?
found 4 files
Processing video: health_news_1.mp4
video resolution: 608 x 1080
video framerate: 25.0
entry: <class 'dict'> {'link': 'build_Dataset', 'text': 'شانغهاي', 'conf': 0.58, 'start': 1.6, 'end': 2.24, 'bounding_box': []}
s_sec, s_millisec: 1.0 600.0000000000001
10 frames without 1 person. Skiping to next subtitle
entry: <class 'dict'> {'link': 'build_Dataset', 'text': 'تواجه', 'conf': 0.65, 'start': 2.24, 'end': 2.72, 'bounding_box': []}
s_sec, s_millisec: 2.0 240.00000000000023
Traceback (most recent call last):
File "extract_subvideos.py", line 467, in <module>
main(args)
File "extract_subvideos.py", line 315, in main
bbx1 = np.amin(bbx1)
File "<__array_function__ internals>", line 5, in amin
File "/Users/shaimaa/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py", line 2879, in amin
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
File "/Users/shaimaa/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py", line 86, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
ValueError: zero-size array to reduction operation minimum which has no identity
Error shows that you try to get amin() from empty list bbx1.
I can't test it but I think problem is that you clear bbx1 = [] but you don't clear bb = [] and later it may have not-empty bb from previous loop and empty bbx1 from current loop (because when it finds more that 1 person then it doesn't add values from bb to bbx1) and it may run code in if ... len(bb) > 1: and try to use empty bbx1
You should check len(bbx1)
if valid_video and len(bb) > 0 and len(bbx1) > 0:
OR you would have to use some boolean variable found_one_person which you would reset to False before while-loop, and set True when you find one person (if len(num_people) == 1:), and use it to write data
if valid_video and found_one_person:

ModuleNotFoundError: No module named 'gtk'

I've tried to follow several solutions on the internet also this one, but no luck. I'm in the process of implementing a object detection system. I am on Windows 10, and using PyCharm using python-3.8
I am getting errors for the packages, I've tried to add them through the package installer and through the terminal, no luck. Here's the error:
Traceback (most recent call last):
File "D:/CabaleGame/runner.py", line 2, in <module>
import processCards
File "D:\CabaleGame\processCards.py", line 1, in <module>
import gtk.gdk
ModuleNotFoundError: No module named 'gtk'
Process finished with exit code 1
I've downloaded the package from here and here, which one is correct?
My program file:
import gtk.gdk
import cv
import time
import os
import string
def takeScreenCapture(screenShotNum = ""):
time.sleep(1)
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
#print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
# Convert gtk.PixelBuf to a NumPy array
array = pb.get_pixels_array()
# Convert NumPy array to CvMat
mat = cv.fromarray(array)
# Convert RGB to BGR
cv.CvtColor(mat, mat, cv.CV_RGB2BGR)
#cv.ShowImage("win",mat)
#cv.WaitKey(0)
return mat
def getMeaningFromCards(cards):
"""
This takes a dictionary of the form:
(x, y) : Card image
and returns a dictionary of the form:
(x, y) : (number, suit)
(x, y) are the coordinates of the top left of the card
"""
imgdir = "LibraryImages"
templatesNums = os.listdir(os.path.join(imgdir,"Numbers"))
templatesSuits = os.listdir(os.path.join(imgdir,"Suits"))
#templates = filter(lambda s: s[-4:] == ".png", templates)
templatesNums = map(lambda s: os.path.join(imgdir,"Numbers", s), templatesNums)
templatesSuits = map(lambda s: os.path.join(imgdir, "Suits", s), templatesSuits)
for k in cards.keys():
card = cards[k]
cardImg = cv.CreateImageHeader((card.width, card.height), 8, 3)
cv.SetData(cardImg, card.tostring())
numAndSuit3 = cv.GetSubRect(cardImg, (0,0,30,80))
numAndSuit1 = cv.CreateImage((numAndSuit3.width, numAndSuit3.height), 8, 1)
cv.CvtColor(numAndSuit3, numAndSuit1, cv.CV_RGB2GRAY)
# Convert the 1 channel grayscale to 3 channel grayscale
# (GRAY2RGB doesn't actually introduce color)
cv.CvtColor(numAndSuit1, numAndSuit3, cv.CV_GRAY2RGB)
num = findBestTemplateMatch(templatesNums, numAndSuit3)
suit = findBestTemplateMatch(templatesSuits, numAndSuit3)
#print num, suit
# If this image was recognized as a card, but didn't match
# any template, it shouldn't be in the list in the first place
if num == None or suit == None:
del cards[k]
continue
num = string.split(os.path.basename(num), '.')[0]
suit = string.split(os.path.basename(suit), '.')[0]
# The alternate file names have underscores
# after their names
if num[-1] == '_':
num = num[:-1]
if suit[-1] == '_':
suit = suit[:-1]
cards[k] = (num, suit)
#cv.ShowImage("NumandSuit", numAndSuit)
#cv.WaitKey(0)
print
cards
return cards
def findBestTemplateMatch(tplList, img):
"""
Compares img against a list of templates.
tplList is a list of string filenames of template images
Returns a tuple (num, suit) if a template is suitably matched
or None if not
"""
minTpl = 200 # arbitrarily large number
tString = None
for t in tplList:
tpl = cv.LoadImage(t)
w = img.width - tpl.width + 1
h = img.height - tpl.height + 1
result = cv.CreateImage((w,h), 32, 1)
cv.MatchTemplate(img, tpl, result, cv.CV_TM_SQDIFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv.MinMaxLoc(result)
#print t
#print (minVal, maxVal, minLoc, maxLoc)
# 0.2 found by experiment (the non-card images end up being around
# 0.25 - 0.28, and all the card images were much around 0.08 and less
if minVal < minTpl and minVal < 0.2:
minTpl = minVal
tString = t
#print minTpl, tString
#cv.ShowImage("win", img)
#cv.ShowImage("win2", result)
#cv.WaitKey(0)
return tString
def extractCards(fileName = None):
"""
Given an image, this will extract the cards from it.
This takes a filename as an optional argument
This filename should be the name of an image file.
This returns a dictionary of the form:
(x, y) : Card image
It is likely that the output from this will go to the
getMeaningFromCards() function.
"""
if fileName == None:
mat = takeScreenCapture()
else:
mat = cv.LoadImage(fileName)
# First crop the image: but only crop out the bottom.
# It is useful to have all dimensions accurate to the screen
# because otherwise they will throw off the mouse moving and clicking.
# Cropping out the bottom does not change anything in terms of the mouse.
unnec_top_distance = 130
unnec_bottom_distance = 40
margin = 50
submat = cv.GetSubRect(mat, (0,0,mat.width, mat.height - unnec_bottom_distance))
subImg = cv.CreateImageHeader((submat.width, submat.height), 8, 3)
cv.SetData(subImg, submat.tostring())
gray = cv.CreateImage((submat.width, submat.height), 8, 1)
cv.CvtColor(submat, gray, cv.CV_RGB2GRAY)
thresh = 250
max_value = 255
cv.Threshold(gray, gray, thresh, max_value, cv.CV_THRESH_BINARY)
cv.Not(gray,gray)
#cv.ShowImage("sub", submat)
#cv.WaitKey(0)
storage = cv.CreateMemStorage (0)
cpy = cv.CloneImage(gray)
contours = cv.FindContours( cpy, storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0) );
#contours = cv.ApproxPoly(contours, cv.CreateMemStorage(), cv.CV_POLY_APPROX_DP, 3, 1)
bboxes = []
if contours:
while(contours):
area = cv.ContourArea(contours)
# It turns out that all the cards are about 44000 in area...
# It would definitely be nice to have a better way to do this:
# ie, find the size of the card programmatically and use it then
if(area > 44000 and area < submat.width*submat.height*2/3):
bb = cv.BoundingRect(contours)
bboxes.append(bb)
contours = contours.h_next()
#drawBoundingBoxes(bboxes, submat)
# cards is a dictionary of the form:
# (x, y) : card
cards = {}
for box in bboxes:
card = cv.GetSubRect(subImg, box)
#cv.ShowImage("card", card)
#cv.WaitKey(0)
cards[(box[0], box[1])] = card
return cards
def drawBoundingBoxes(bb, img):
for b in bb:
x = b[0]
y = b[1]
width = b[2]
height = b[3]
cv.Rectangle(img, (x,y), (x+width, y+height), (0,255,0,0))
cv.ShowI
mage("bb", img)
cv.WaitKey(0)
def drawSquares(listWithPoints,img):
for l in listWithPoints:
for p in range(len(l)-1):
cv.Line(img, l[p], l[p+1], (0,0,255,0),2)
cv.Line(img, l[-1], l[0], (0,0,255,0),2)
#cv.ShowImage("sub", img)
#cv.WaitKey(0)
def contourToPointList(contour):
plist = []
for (x,y) in contour:
plist.append((x,y))
return plist
if __name__ == '__main__':
cards = extractCards('CardImages/4_heart.jpg')
print
cards
#c = cards[cards.keys()[0]]
#print c
Is it possible to add it manually through the folders?
Would it work if i put the package her:
C:\Users\User1\AppData\Local\Programs\Python\Python38\include
From python documentation :
Replace import gtk.gdk by :
import gi
gi.require_version("Gtk", "insert your gtk version")
from gi.repository import Gtk

Creating trail from YOLO v2 + deep_sort object tracking with tensorflow

I'm trying to create trails and map of trails of all ID like this video: https://www.youtube.com/watch?v=tq0BgncuMhs
So far I haven't been able to, I'm currently using this repo from bendidi https://github.com/bendidi/Tracking-with-darkflow which I modifies to also show the trails.
I did try using cv2.line and extract it from track.to_tlbr() but right now the result look like this:
Here's the code that I modified to get the current result:
darkflow/darkflow/net/yolov2/predict.py
from collections import deque
import numpy as np
import math
import cv2
import os
import json
#from scipy.special import expit
#from utils.box import BoundBox, box_iou, prob_compare
#from utils.box import prob_compare2, box_intersection
from ...utils.box import BoundBox
from ...cython_utils.cy_yolo2_findboxes import box_constructor
ds = True
try :
from deep_sort.application_util import preprocessing as prep
from deep_sort.application_util import visualization
from deep_sort.deep_sort.detection import Detection
except :
ds = False
def expit(x):
return 1. / (1. + np.exp(-x))
def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def findboxes(self, net_out):
# meta
meta = self.meta
boxes = list()
boxes=box_constructor(meta,net_out)
return boxes
def extract_boxes(self,new_im):
cont = []
new_im=new_im.astype(np.uint8)
ret, thresh=cv2.threshold(new_im, 127, 255, 0)
p, contours, hierarchy=cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt=contours[i]
x, y, w, h=cv2.boundingRect(cnt)
if w*h > 30**2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])):
if self.FLAGS.tracker == "sort":
cont.append([x, y, x+w, y+h])
else : cont.append([x, y, w, h])
return cont
def postprocess(self,net_out, im,frame_id = 0,csv_file=None,csv=None,mask = None,encoder=None,tracker=None):
"""
Takes net output, draw net_out, save to disk
"""
boxes = self.findboxes(net_out)
# meta
meta = self.meta
nms_max_overlap = 0.1
threshold = meta['thresh']
colors = meta['colors']
labels = meta['labels']
if type(im) is not np.ndarray:
imgcv = cv2.imread(im)
else: imgcv = im
h, w, _ = imgcv.shape
thick = int((h + w) // 300)
resultsForJSON = []
if not self.FLAGS.track :
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if self.FLAGS.json:
resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}})
continue
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv,
(left, top), (right, bot),
colors[max_indx], thick)
cv2.putText(imgcv, mess, (left, top - 12),
0, 1e-3 * h, colors[max_indx],thick//3)
else :
if not ds :
print("ERROR : deep sort or sort submodules not found for tracking please run :")
print("\tgit submodule update --init --recursive")
print("ENDING")
exit(1)
detections = []
scores = []
lines = deque(maxlen=64)
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if mess not in self.FLAGS.trackObj :
continue
if self.FLAGS.tracker == "deep_sort":
detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))
scores.append(confidence)
elif self.FLAGS.tracker == "sort":
detections.append(np.array([left,top,right,bot]).astype(np.float64))
if len(detections) < 3 and self.FLAGS.BK_MOG:
detections = detections + extract_boxes(self,mask)
detections = np.array(detections)
if detections.shape[0] == 0 :
return imgcv
if self.FLAGS.tracker == "deep_sort":
scores = np.array(scores)
features = encoder(imgcv, detections.copy())
detections = [
Detection(bbox, score, feature) for bbox,score, feature in
zip(detections,scores, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict()
tracker.update(detections)
trackers = tracker.tracks
elif self.FLAGS.tracker == "sort":
trackers = tracker.update(detections)
for track in trackers:
if self.FLAGS.tracker == "deep_sort":
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
id_num = str(track.track_id)
elif self.FLAGS.tracker == "sort":
bbox = [int(track[0]),int(track[1]),int(track[2]),int(track[3])]
id_num = str(int(track[4]))
if self.FLAGS.csv:
csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])
csv_file.flush()
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
(255,255,255), thick//3)
cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
return imgcv
Or just the code that I added:
...
lines = deque(maxlen=64)
...
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
...
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
Could someone help me out on this? Or should I do something with the data first instead of plug in straight into cv2.line? If you also have any suggestion for using external software rather than using Python, that's also welcome (I have frame_id, track_id, x, y, w, h data)

Python Script for Art Museum Installation intermittently locks up, Removing Thermal Camera Sensor read function seems to work?

I have a python script for an installation in an art museum that is meant to run continuously playing sounds, driving an LED matrix, and sensing people via OpennCV and a thermal camera.
Each of the parts of the script work and all of them work together but randomly the script locks up and I need to restart it. I want to script to not lock up so no one has to reset it during the exhibition.
I have the code running on a spare Raspberry Pi and a spare LED matrix and it continues to cycle through fine. The only changes that I made were commenting out the start of a thread to check the IR sensor and a call to a function to get the max temp from the sensor.
To be clear, if I leave these bits of code in the script runs fine 1 -3 or sometimes 10 times. But it seems to lock up in the first "state" when IRcount = 0
I am stuck. Any help is greatly appreciated.
```
#!/usr/bin/python
import glob
import queue
import sys
import pygame
import cv2
import random
import math
import colorsys
import time
from rpi_ws281x import *
from PIL import Image
import numpy as np
import threading
global thresh
sys.path.insert(0, "/home/pi/irpython/build/lib.linux-armv7l-3.5")
import MLX90640 as mlx
currentTime = int(round(time.time() * 1000))
InflateWait = int(round(time.time() * 1000))
minTime = 6000
maxTime = 12000
lineHeight1 = 0
lineHue1 = float(random.randrange(1,360))/255
# IR Functions
# Function to just grab the Max Temp detected. If over threshold then start
# the sequence, if not stay in state 0
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
# Function to detect individual people's heat blob group of pixels
# run in a thread only at the end of the script
def irCounter():
img = Image.new( 'L', (24,32), "black") # make IR image
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
for x in range(24):
row = []
for y in range(32):
val = f[32 * (23-x) + y]
row.append(val)
img.putpixel((x, y), (int(val)))
# convert raw temp data to numpy array
imgIR = np.array(img)
# increase the 24x32 px image to 240x320px for ease of seeing
bigIR = cv2.resize(depth_uint8, dsize=(240,320), interpolation=cv2.INTER_CUBIC)
# Use a bilateral filter to blur while hopefully retaining edges
brightBlurIR = cv2.bilateralFilter(bigIR,9,150,150)
# Threshold the image to black and white
retval, threshIR = cv2.threshold(brightBlurIR, 26, 255, cv2.THRESH_BINARY)
# Define kernal for erosion and dilation and closing operations
kernel = np.ones((5,5),np.uint8)
erosionIR = cv2.erode(threshIR,kernel,iterations = 1)
dilationIR = cv2.dilate(erosionIR,kernel,iterations = 1)
closingIR = cv2.morphologyEx(dilationIR, cv2.MORPH_CLOSE, kernel)
# Detect countours
contours, hierarchy = cv2.findContours(closingIR, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Get the number of contours ( contours count when touching edge of image while blobs don't)
ncontours = str(len(contours))
# Show images in window during testing
cv2.imshow("Combined", closingIR)
cv2.waitKey(1)
#initialize pygame
pygame.init()
pygame.mixer.init()
pygame.mixer.set_num_channels(30)
print("pygame initialized")
# assign sound chennels for pygame
channel0 = pygame.mixer.Channel(0)
channel1 = pygame.mixer.Channel(1)
channel2 = pygame.mixer.Channel(2)
channel3 = pygame.mixer.Channel(3)
channel4 = pygame.mixer.Channel(4)
channel5 = pygame.mixer.Channel(5)
channel6 = pygame.mixer.Channel(6)
channel7 = pygame.mixer.Channel(7)
channel8 = pygame.mixer.Channel(8)
channel9 = pygame.mixer.Channel(9)
channel10 = pygame.mixer.Channel(10)
channel11 = pygame.mixer.Channel(11)
channel12 = pygame.mixer.Channel(12)
channel13 = pygame.mixer.Channel(13)
channel14 = pygame.mixer.Channel(14)
channel15 = pygame.mixer.Channel(15)
channel16 = pygame.mixer.Channel(16)
channel17 = pygame.mixer.Channel(17)
channel18 = pygame.mixer.Channel(18)
channel19 = pygame.mixer.Channel(19)
channel20 = pygame.mixer.Channel(20)
channel21 = pygame.mixer.Channel(21)
channel22 = pygame.mixer.Channel(22)
channel23 = pygame.mixer.Channel(23)
channel24 = pygame.mixer.Channel(24)
channel25 = pygame.mixer.Channel(25)
channel26 = pygame.mixer.Channel(26)
channel27 = pygame.mixer.Channel(27)
channel28 = pygame.mixer.Channel(28)
# load soundfiles
echoballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/echo balls FIX.ogg")
organbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/ORGAN BOUNCE fix.ogg")
jar = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/jar whoop fix.ogg")
garland = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/GARLAND_fix.ogg")
dribble= pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/dribble.ogg")
cowbell = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/cowbell fix.ogg")
clackyballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/clacky balls boucne.ogg")
burpees = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/burpees_fix.ogg")
brokensynth = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/broken synth bounce.ogg")
woolballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wool balls in jar FIX.ogg")
wiimoye = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wiimoye_fix.ogg")
warpyorgan = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/warpy organ bounce#.2.ogg")
vibrate = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/vibrate fix.ogg")
turtlesbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/turtles fix.ogg")
timer = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/timer.ogg")
tape = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/tape fix.ogg")
tambourine = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/TAMBOURINE.ogg")
springybounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/springy bounce.ogg")
smash3 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/smash fix.ogg")
bristle2 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/BRISTLE FIX.ogg")
blackkeys = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/black keys FIX.ogg")
zipper = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/zipper.ogg")
presatisfactionsweep = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/pre-satisfaction sweep .ogg")
satisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SATISFACTION.ogg")
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
solosatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SOLO_SATISFACTION.ogg")
print("sound files loaded")
# initializing sounds list
soundsList = [echoballs, organbounce, zipper, jar, garland, dribble, cowbell, clackyballs, burpees, brokensynth, woolballs,
wiimoye, warpyorgan, vibrate, turtlesbounce, timer, tambourine, springybounce, smash3, bristle2, blackkeys, zipper ]
IRcount = 0 # define initial state for main loop
pygame.display.set_mode((32, 8))
print("pygame dispaly open")
# LED strip configuration:
LED_COUNT = 256 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 100 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Define functions which animate LEDs in various ways.
# PNG to LED function used to shuffle througfh folders of numbered PNGs exported
# from animations created
def pngToLED (strip, pngfile):
RGBimage = Image.open(pngfile).convert('RGB')
np_image = np.array(RGBimage)
colours = [Color(x[0],x[1],x[2]) for rows in np_image for x in rows]
colours2d = np.reshape(colours, (32, 8), order='F')
colours2d[1::2, :] = colours2d[1::2, ::-1]
pic = colours2d.flatten('C')
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, int(pic[ i ]))
strip.show()
def colorWipe(strip, color,wait_ms=10):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(1)
def theaterChase(strip, color, wait_ms, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=90):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
# Plasma LED Function from Root 42
def plasmaLED (plasmaTime):
h = 8
w = 32
out = [ Color( 0, 0, 0 ) for x in range( h * w ) ]
plasmaBright = 100.0
for x in range( h ):
for y in range( w ):
hue = (4.0 + math.sin( plasmaTime + x ) + math.sin( plasmaTime + y / 4.5 ) \
+ math.sin( x + y + plasmaTime ) + math.sin( math.sqrt( ( x + plasmaTime ) ** 2.0 + ( y + 1.5 * plasmaTime ) ** 2.0 ) / 4.0 ))/8
hsv = colorsys.hsv_to_rgb( hue , 1, 1 )
if y % 2 == 0: #even
out[ x + (h * y)] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
else: #odd
out[ (y * h) + (h -1 -x) ] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, out[ i ]) # set pixel to color in picture
strip.show()
# variables for plasma
plasmaTime = 5.0 # time
plasmaSpeed = 0.05 # speed of time
# thread for IRcounter function
class TempTask:
def __init__(self):
self.ir_temp = 0
self.lock = threading.Lock() #control concurrent access for safe multi thread access
self.thread = threading.Thread(target=self.update_temp)
def update_temp(self):
while True:
with self.lock:
self.ir_temp = irCounter()
time.sleep(0.1)
def start(self):
self.thread.start()
# Millis timer count function
def CheckTime( lastTime, wait):
if currentTime - lastTime >= wait:
lastTime += wait
return True
return False
# Main program logic follows:
if __name__ == '__main__':
# not currently starting the trhead because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
try:
while True:
currentTime = int(round(time.time() * 1000))
if IRcount == 0:
#random solid color
colorWipe(strip, Color(random.randint(60,255), random.randint(60,255), random.randint(60,255)))
# use random.sample() to shuffle sounds list
shuffledSounds = random.sample(soundsList, len(soundsList))
if pygame.mixer.Channel(0).get_busy() == False: channel0.play(shuffledSounds[0],loops = -1)
thresh = 0
'''
# the threshold check below is the only thing I have taken out of
# Program on my test Raspberry Pi. It seems to not lock up without it
# not sure why this would be a problem.
thresh = int(maxTemp())
print (thresh)
if thresh >= 27:
InflateWait = int(round(time.time() * 1000))
print (thresh)
IRcount = 1
print("Threshold Temp Detected: Begin Sound Sequence")
else:
IRcount = 0
'''
if CheckTime(InflateWait,random.randint(minTime, maxTime)):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 1:
LEDimages = glob.glob("/home/pi/ruff-wavs/Crystal_Mirror/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(1).get_busy() == False:
channel1.play(shuffledSounds[1],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 2:
LEDimages = glob.glob("/home/pi/ruff-wavs/Mercury_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(2).get_busy() == False:
channel2.play(shuffledSounds[2],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 3:
LEDimages = glob.glob("/home/pi/ruff-wavs/Pink_Lava/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(3).get_busy() == False:
channel3.play(shuffledSounds[3],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 4:
LEDimages = glob.glob("/home/pi/ruff-wavs/Horiz_Mosaic/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(4).get_busy() == False:
channel4.play(shuffledSounds[4],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 5:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(5).get_busy() == False:
channel5.play(shuffledSounds[5],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 6:
LEDimages = glob.glob("/home/pi/ruff-wavs/Radio_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(6).get_busy() == False:
channel6.play(shuffledSounds[6],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 7:
LEDimages = glob.glob("/home/pi/ruff-wavs/Star_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(7).get_busy() == False:
channel7.play(shuffledSounds[7],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
elif IRcount == 14:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(14).get_busy() == False:
channel14.play(shuffledSounds[14],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
print (thresh)
elif IRcount == 15:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(15).get_busy() == False:
channel15.play(shuffledSounds[15],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 16:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(40,50))
pygame.mixer.fadeout(45000)
if pygame.mixer.Channel(22).get_busy() == False:
channel22.play(presatisfactionsweep)
IRcount = 17
print(IRcount)
print("sweep end start")
elif IRcount == 18:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(30,40))
if pygame.mixer.Channel(22).get_busy() == False:
pygame.mixer.stop()
channel23.play(satisfaction)
IRcount = 19
print(IRcount)
print("Play Satisfaction Sount")
elif IRcount == 19:
rainbowCycle(strip, 5)
if pygame.mixer.Channel(23).get_busy() == False: IRcount = 0
except KeyboardInterrupt:
colorWipe(strip, Color(0,0,0), 1)
pygame.mixer.stop()
pygame.quit()
```
Update 1 - Suspected Function(s)
When I left the script run overnight and came to the exhibit in the morning it would be stuck in the 1st state IRcount = 0 The only things that happen in that state is the maxTemp() function to get the max temp, the LED color wipe function to cycle colors.
When I would come in in the morning it would be stuck, playing a single sound from pygame, as it should, but it would not be cycling colors. I removed the maxTemp() from my test Pi and it has been working fine.
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
Update # 2
I thought that the thread might be the problem so I commented out the thread start call. That is why I made the simpler maxTemp() function to see if that would work better than the thread. So when I was using the max temp then the thread wasn't being called.
I don't understand threads very well. Is it possible to have the max temp variable update continuously and have the simple OpenCV numPy manipulations running continuously? That would be ideal. When I originally added the thread it seemed to stop after a few cycles.
I do not have a join on the thread. I know threads don't "restart" but do I need to call it again as the state machine starts again?
# not currently starting the thread because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
Update #3
I Uploaded new code that eliminated the duplicate functions. Everything is handled in the thread temp.task now. That seems to work fine. I also put the github suggestion of polling the thermal sensor if the image is a duplicate but that has not happened.
I left the program run over night and when I came in in the morning it was locked up. The SD card is set to read only mode. I ssh'd into the pi. I have my auto start python script in /etc/profile
It seems to start the script each time I log into ssh. When I logged in this morning to see if the pi was still up it game an out of memory error.
```
Traceback (most recent call last):
File "/home/pi/ruff-wavs/shufflewavdemo.py", line 210, in <module>
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
pygame.error: Unable to open file '/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg'
OSError: [Errno 28] No space left on device
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 223, in <module>
main()
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 145, in main
output.close()
OSError: [Errno 28] No space left on device
-bash: cannot create temp file for here-document: No space left on device
Could that be because it is in read only mode?
I used this script to switch from writable to read only and back.
[https://github.com/JasperE84/root-ro][1]
[1]: https://github.com/JasperE84/root-ro
I suspect the issue is that you're accessing the mlx device both in the main thread via maxTemp() as well as in the irCounter() thread. The fact that it works when you take out the maxTemp call, and that that call happens in the if IRcount == 0: state supports this.
I would add the maxTemp functionality to the irCounter thread, so that accessing it from only a single thread; and update a global variable (protected by a lock) with the maxTemp results if you need to retain this functionality.

Save Video from Opencv

I create space detection code by using gray, gausian blur but now I dont know where to put these code to save my opencv video.
I already tried to put the code in random line but it only comes out of the file at the output, I cant play it and the video also just 5.6KB. I even tried to record a video for a very long time.
My code runs fine without save feature but I want to add save video feature:
fourcc = open_cv.VideoWriter_fourcc(*'DIVX')
out = open_cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))
these is my code that i want to be add save video coding from above :
import cv2 as open_cv
import numpy as np
import logging
from drawing_utils import draw_contours
from colors import COLOR_GREEN, COLOR_WHITE, COLOR_BLUE
class MotionDetector:
LAPLACIAN = 1.4
DETECT_DELAY = 1
def __init__(self, video, coordinates, start_frame):
self.video = 0
self.coordinates_data = coordinates
self.start_frame = start_frame
self.contours = []
self.bounds = []
self.mask = []
def detect_motion(self):
capture = open_cv.VideoCapture(self.video)
capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)
coordinates_data = self.coordinates_data
logging.debug("coordinates data: %s", coordinates_data)
for p in coordinates_data:
coordinates = self._coordinates(p)
logging.debug("coordinates: %s", coordinates)
rect = open_cv.boundingRect(coordinates)
logging.debug("rect: %s", rect)
new_coordinates = coordinates.copy()
new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
logging.debug("new_coordinates: %s", new_coordinates)
self.contours.append(coordinates)
self.bounds.append(rect)
mask = open_cv.drawContours(
np.zeros((rect[3], rect[2]), dtype=np.uint8),
[new_coordinates],
contourIdx=-1,
color=255,
thickness=-1,
lineType=open_cv.LINE_8)
mask = mask == 255
self.mask.append(mask)
logging.debug("mask: %s", self.mask)
statuses = [False] * len(coordinates_data)
times = [None] * len(coordinates_data)
while capture.isOpened():
result, frame = capture.read()
if frame is None:
break
if not result:
raise CaptureReadError("Error reading video capture on frame %s" % str(frame))
blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
new_frame = frame.copy()
logging.debug("new_frame: %s", new_frame)
position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0
for index, c in enumerate(coordinates_data):
status = self.__apply(grayed, index, c)
if times[index] is not None and self.same_status(statuses, index, status):
times[index] = None
continue
if times[index] is not None and self.status_changed(statuses, index, status):
if position_in_seconds - times[index] >= MotionDetector.DETECT_DELAY:
statuses[index] = status
times[index] = None
continue
if times[index] is None and self.status_changed(statuses, index, status):
times[index] = position_in_seconds
for index, p in enumerate(coordinates_data):
coordinates = self._coordinates(p)
color = COLOR_GREEN if statuses[index] else COLOR_BLUE
draw_contours(new_frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)
open_cv.imshow(str(self.video), new_frame)
k = open_cv.waitKey(1)
if k == ord("q"):
break
capture.release()
open_cv.destroyAllWindows()
def __apply(self, grayed, index, p):
coordinates = self._coordinates(p)
logging.debug("points: %s", coordinates)
rect = self.bounds[index]
logging.debug("rect: %s", rect)
roi_gray = grayed[rect[1]:(rect[1] + rect[3]), rect[0]:(rect[0] + rect[2])]
laplacian = open_cv.Laplacian(roi_gray, open_cv.CV_64F)
logging.debug("laplacian: %s", laplacian)
coordinates[:, 0] = coordinates[:, 0] - rect[0]
coordinates[:, 1] = coordinates[:, 1] - rect[1]
status = np.mean(np.abs(laplacian * self.mask[index])) < MotionDetector.LAPLACIAN
logging.debug("status: %s", status)
return status
#staticmethod
def _coordinates(p):
return np.array(p["coordinates"])
#staticmethod
def same_status(coordinates_status, index, status):
return status == coordinates_status[index]
#staticmethod
def status_changed(coordinates_status, index, status):
return status != coordinates_status[index]
class CaptureReadError(Exception):
pass
Create the file for the video to go in. You can think of this file like a book, but a book with no pages. This code is used to create the file:
fourcc = open_cv.VideoWriter_fourcc(*'DIVX')
out = open_cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))
Not all codecs work on all systems. I use Ubuntu, and this is the code I use to create a video file:
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30,(w,h))
If you try to play this video, nothing will happen. There are no frames in the video, so there is nothing to play (like a book with no pages). After you process each frame, the frame needs to be written to the video (like putting a page in a book):
out.write(new_frame)
You should do this around .imshow():
out.write(new_frame)
open_cv.imshow(str(self.video), new_frame)
k = open_cv.waitKey(1)
if k == ord("q"):
break

Categories