How can i get bytes data using TCPSocket? - python

I want to use tcpsocket to send nv21 images between smartphone and server and convert it to rgb.I use Qt for android make the smartphone application and use python in server. When i use QByteArray to send the data, the picture is (1920*1080) so the size of data is 3110400. However i only receive 335620 bytes at server.
I want to know how can i send bypes correctly?
//QT client
void came::processFrame(const QVideoFrame& frame)
{
QVideoFrame f(frame);
QImage img;
f.map(QAbstractVideoBuffer::ReadOnly);
output.append((char*)f.bits(),f.mappedBytes());
tcpSocket->write(output,output.size());
}
//Python server
def recvfromTcpSocket(sock, blockSize=4096, accept_addr=None):
''' a function to lisen TCP socket,
and rece bytes till buffer has no more. '''
d = ''
while True:
print ("Got tcp connection: " + str(tcpServerAddr))
if accept_addr is None:
break
elif accept_addr == tcpServerAddr[0]:
break
else :
sock.close()
continue
while True:
block = sock.recv(blockSize)
d += block
if len(block) < blockSize and len(block)!=0:
print ("TCP recv done, all size: " + str(len(d)))
break
return d
if __name__ == '__main__':
data = recvfromTcpSocket(tcpServerSocket)
bin_y = data[0:rows * cols]
num_y = np.fromstring(bin_y, np.uint8)
img_y = np.reshape(num_y, (rows, cols))
bin_u = data[rows * cols::2]
num_u = np.fromstring(bin_u, np.uint8)
img_u = np.reshape(num_u, (rows / 2, cols / 2))
bin_v = data[rows * cols+1::2]
num_v = np.fromstring(bin_v, np.uint8)
img_v = np.reshape(num_v, (rows / 2, cols / 2))
enlarge_u = cv2.resize(img_u, dsize=(cols, rows), interpolation=cv2.INTER_CUBIC)
enlarge_v = cv2.resize(img_v, dsize=(cols, rows), interpolation=cv2.INTER_CUBIC)
dst = cv2.merge([img_y, enlarge_u, enlarge_v])
bgr = cv2.cvtColor(dst, cv2.COLOR_YUV2BGR)

Related

How to read data Asynchronously in C# from python script

I am trying to read python script output continuously (In python I am tracking a colored ball and printing it's x, y and radius) from WPF C#.
I tried socket communication
using (var requester = new ZSocket(ZSocketType.REQ))
{
// Connect
requester.Connect("tcp://127.0.0.1:5555");
for (; ; )
{
string requestText = "Hello";
Console.Write("Sending {0}...", requestText);
// Send
requester.Send(new ZFrame(requestText));
// Receive
using (ZFrame reply = requester.ReceiveFrame())
{
Console.WriteLine(" Received: {0} {1}!", requestText, reply.ReadString());
}
}
}
I tried calling python from C# Process
private void Window_Loaded(object sender, RoutedEventArgs e)
{
string python = #"C:\Desktop\test\venv\Scripts\python.exe";
// python app to call
string myPythonApp = #"C:\Users\Desktop\test\main.py";
// Create new process start info
ProcessStartInfo myProcessStartInfo = new ProcessStartInfo(python);
// make sure we can read the output from stdout
myProcessStartInfo.UseShellExecute = false;
myProcessStartInfo.RedirectStandardOutput = true;
myProcessStartInfo.Arguments = myPythonApp;
Process myProcess = new Process();
// assign start information to the process
myProcess.StartInfo = myProcessStartInfo;
// start the process
myProcess.OutputDataReceived += MyProcess_OutputDataReceived;
myProcess.Start();
myProcess.BeginOutputReadLine();
// Read the standard output of the app we called.
// in order to avoid deadlock we will read output first
// and then wait for process terminate:
//StreamReader myStreamReader = myProcess.StandardOutput;
//string myString = myStreamReader.ReadLine();
//await myStreamReader.ReadAsync(result, 0, (int)myStreamReader.BaseStream.Length);
// wait exit signal from the app we called and then close it.
//myProcess.WaitForExit();
//myProcess.Close();
// write the output we got from python app
//Console.WriteLine("Value received from script: " + myString);
}
private void MyProcess_OutputDataReceived(object sender, DataReceivedEventArgs e)
{
Console.WriteLine("++++");
}
Python writes data to txt file, C# reads from that file
Console.WriteLine("hi");
for (; ; )
{
if (File.Exists(textFile))
{
using (StreamReader file = new StreamReader(textFile))
{
int counter = 0;
string ln;
while ((ln = file.ReadLine()) != null)
{
Console.WriteLine(ln);
counter++;
}
file.Close();
}
}
All the cases python is holding the thread and not allowing c# to access it. Now I am working 2 method i.e. getting data from Process, but MyProcess_OutputDataReceived is not triggering.
import sys
from collections import deque
from PIL import Image, ImageOps, ImageDraw
import numpy as np
import argparse
import imutils
import cv2
import time
import pandas as pd
import matplotlib.pyplot as plt
#import zmq
#context = zmq.Context()
#socket = context.socket(zmq.REP)
#socket.bind("tcp://*:5555")
#object tracking def start
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
Data_Features = ['x', 'y', 'time']
Data_Points = pd.DataFrame(data=None, columns=Data_Features, dtype=float)
start = time.time()
#object tracking def end
#file1 = open("myfile.txt","w")
#L = ["This is Delhi \n","This is Paris \n","This is London \n"]
#file1.write("Hello \n")
#file1.writelines(L)
#file1.close()
while True:
# Wait for next request from client
#message = socket.recv()
#print("Received request: %s" % message)
# Do some 'work'
#time.sleep(1)
# Send reply back to client
#socket.send("World")
#object track vids start
(grabbed, frame) = camera.read()
current_time = time.time() - start
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=1600)
# frame2 = imutils.resize
#blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if (radius < 300) & (radius > 10):
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
sys.stdout.write(str(x) + '\n')
#print(str(x))
#file1 = open("myfile.txt", "w")
#file1.write(str(x))
#file1.close()
#time.sleep(1)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# Save The Data Points
Data_Points.loc[Data_Points.size / 3] = [x, y, current_time]
pts.appendleft(center)
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
#object tracking vids end
h = 0.2
X0 = -3
Y0 = 20
time0 = 0
theta0 = 0.3
# Applying the correction terms to obtain actual experimental data
Data_Points['x'] = Data_Points['x'] - X0
Data_Points['y'] = Data_Points['y'] - Y0
Data_Points['time'] = Data_Points['time'] - time0
# Calulataion of theta value
Data_Points['theta'] = 2 * np.arctan(
Data_Points['y'] * 0.0000762 / h) # the factor correspons to pixel length in real life
Data_Points['theta'] = Data_Points['theta'] - theta0
# Creating the 'Theta' vs 'Time' plot
plt.plot(Data_Points['theta'], Data_Points['time'])
plt.xlabel('Theta')
plt.ylabel('Time')
# Export The Data Points As cvs File and plot
Data_Points.to_csv('Data_Set.csv', sep=",")
plt.savefig('Time_vs_Theta_Graph.svg', transparent=True)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Please help me.
Thank you in advance.
Ranjith

Array creation too slow

I am trying to create an image array from scratch.
I got the code running but it takes arrounds 30 secs to run it.
I feel it could be faster by using numpy native functions.
How can I do this?
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = 256
image_channel = 3
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
for ii in range(len(volumes)-image_width):
# ===================== part to optimize start
final_image = np.zeros((image_heigh, image_width, image_channel))
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
# ===================== part to optimize end
if show_img:
image = np.float32(final_image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow("ok", image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
How can I do to make this image array creation faster?
I need to create the image every timesteps because I want to simulate real live data stream that come every new timesteps.
This is why I would like to optimize only this part of the code :
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
How can I do this?
First simplest optimizations are next:
Use comparing values to np.arange(...) instead of inner loop.
Use gray image instead of 3-channels RGB. 3 times less data to process.
Use np.uint8 type instead of np.float32, which is faster to process and doesn't need conversion to float32 for CV2 visualizing.
All these above optimizations give huge speedup (10x times), and my running time is 2.6 sec instead of 27 sec before.
Also another very useful optimization that I didn't do is that you don't need to recompute previous image pixels in a case when max/min of whole data within current window didn't change. You need to recompute previous image data only in the case if max/min changed. And I expect that your real-life data is gradually changing like Forex or Bitcoin prices, hence max/min change within a window is very non-often.
Optimizations 1)-3) mentioned above are implemented in the next code:
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = 256
image_channel = 3
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
aranges = np.arange(image_heigh, dtype = np.int32)[:, None]
for ii in range(len(volumes)-image_width):
# ===================== part to optimize start
#final_image = np.zeros((image_heigh, image_width, image_channel), dtype = np.float32)
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
final_image = (aranges < vol_norm[None, :].astype(np.int32)).astype(np.uint8) * 255
# ===================== part to optimize end
if show_img:
cv2.imshow('ok', final_image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
For above code I just did one more optimization of inner loop which speed-up code above even 2x times more to have timings of 1.3 sec. But also I put back 3 channels plus float32, this reduced speed resulting in final 2.8 sec, here is the code
Another next optimization is possible if re-computing old images data is not needed.
Main thing to be optimized was that you were re-computing almost same whole image on each step with 1 pixel shift-step along width. Instead of this you need to compute whole image once, then shift right not 1 pixel but whole image width.
Then after this optimization running time is 0.08 sec.
And do 1 pixel stepping only for showing animation, not for computing image data, image data should be computed just once if you need speed.
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = volumes.size #256
image_channel = 3
screen_width = 256
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
for ii in range(0, len(volumes), image_width):
# ===================== part to optimize start
final_image = np.zeros((image_heigh, image_width, image_channel))
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
# ===================== part to optimize end
if show_img:
for start in range(0, final_image.shape[1] - screen_width):
image = np.float32(final_image[:, start : start + screen_width])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow("ok", image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
I also created animation image out of your data:
If you want to create same animation just append next piece of code to the end of script above:
# Needs: python -m pip install pillow
import PIL.Image
imgs = [PIL.Image.fromarray(final_image[:, start : start + screen_width].astype(np.uint8) * 255) for start in range(0, final_image.shape[1] - screen_width, 6)]
imgs[0].save('result.png', append_images = imgs[1:], save_all = True, lossless = True, duration = 100)
I've implemented also simulation of real-time live stream data rendering/visualizing.
live_stream() generator spits out random amount of data at random points of time, this is to simulate data generation process.
stream_fetcher() listens to live stream and records all data received to python queue q0, this fetcher is run in one thread.
renderer() gets data recorded by fetcher and renders it into image through your mathematical formulas and normalization process, it renders as much data as available, resulting in images with varying widths, rendered images are saved to another queue q1.
visualizer() visualizes rendered data by fetching as much rendered images as available.
All functions run in separate threads not to block whole process. Also if any of threads works to slow then it skips some of data to catch-up with current real-time data, thus every queue doesn't overflow.
Also you may see that visualized process is jumpy, it is not because functions are somewhat slow, but because live stream spits out different amount of data in each time step, this is how usually real-time data may behave.
In the next code I did also extra optimization mentioned before, that is not-recomputing image if min/max didn't change.
import cv2, numpy as np
import time, random, threading, queue
image_height = 256
image_width = 512
# Make results reproducible and deterministic
np.random.seed(0)
random.seed(0)
def live_stream():
last = 0.
while True:
a = np.random.uniform(low = -1., high = 1., size = random.randint(1, 20)).astype(np.float64).cumsum() + last
yield a
last = a[-1]
time.sleep(random.random() * 0.1)
q0 = queue.Queue()
def stream_fetcher():
for e in live_stream():
q0.put(e)
threading.Thread(target = stream_fetcher, daemon = True).start()
aranges = np.arange(image_height, dtype = np.int32)[:, None]
q1 = queue.Queue()
def renderer():
def normalized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
prev_image = np.zeros((image_height, 0), dtype = np.uint8)
prev_vols = np.zeros((0,), dtype = np.float64)
while True:
data = []
data.append(q0.get())
try:
while True:
data.append(q0.get(block = False))
except queue.Empty:
pass
vols = np.concatenate(data)[-image_width:]
prev_vols = prev_vols[-(image_width - vols.size) or prev_vols.size:]
concat_vols = np.concatenate((prev_vols, vols))[-image_width:]
vols_min, vols_max = np.amin(concat_vols), np.amax(concat_vols)
if prev_vols.size > 0 and (vols_min < np.amin(prev_vols) - 10 ** -8 or vols_max > np.amax(prev_vols) + 10 ** -8):
vols = concat_vols
prev_image = prev_image[:, :-prev_vols.size]
prev_vols = prev_vols[:0]
vols_norm = normalized(
data = vols, data_min = vols_min,
data_max = vols_max, maximum_value = image_height,
)
image = (aranges < vols_norm.astype(np.int32)[None, :]).astype(np.uint8) * 255
whole_image = np.concatenate((prev_image, image), axis = 1)[:, -image_width:]
q1.put(whole_image)
prev_image = whole_image
prev_vols = concat_vols
threading.Thread(target = renderer, daemon = True).start()
def visualizer():
imgs = []
while True:
data = []
data.append(q1.get())
try:
while True:
data.append(q1.get(block = False))
except queue.Empty:
pass
image = np.concatenate(data, axis = 1)[:, -image_width:]
cv2.imshow('ok', image)
cv2.waitKey(1)
if imgs is not None:
try:
# Needs: python -m pip install pillow
import PIL.Image
has_pil = True
except:
has_pil = False
imgs = None
if has_pil:
imgs.append(PIL.Image.fromarray(np.pad(image, ((0, 0), (image_width - image.shape[1], 0)), constant_values = 0)))
if len(imgs) >= 1000:
print('saving...', flush = True)
imgs[0].save('result.png', append_images = imgs[1:], save_all = True, lossless = True, duration = 100)
imgs = None
print('saved!', flush = True)
threading.Thread(target = visualizer, daemon = True).start()
while True:
time.sleep(0.1)
Above live process simulation is rendered into result.png which I show down below:
I've also decided to improve visualization, by using more advanced matplotlib instead of cv2 to be able to show axes and doing real-time plot drawing. Visualization image is down below:
Next is a matplotlib-based code corresponding to last image above:
import cv2, numpy as np
import time, random, threading, queue
image_height = 256
image_width = 512
save_nsec = 20
dpi, fps = 100, 15
# Make results reproducible and deterministic
np.random.seed(0)
random.seed(0)
def live_stream():
last = 0.
pos = 0
while True:
a = np.random.uniform(low = -1., high = 1., size = random.randint(1, 30)).astype(np.float64).cumsum() + last
yield a, pos, pos + a.size - 1
pos += a.size
last = a[-1]
time.sleep(random.random() * 2.2 / fps)
q0 = queue.Queue()
def stream_fetcher():
for e in live_stream():
q0.put(e)
threading.Thread(target = stream_fetcher, daemon = True).start()
aranges = np.arange(image_height, dtype = np.int32)[:, None]
q1 = queue.Queue()
def renderer():
def normalized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
prev_image = np.zeros((image_height, 0), dtype = np.uint8)
prev_vols = np.zeros((0,), dtype = np.float64)
while True:
data = []
data.append(q0.get())
try:
while True:
data.append(q0.get(block = False))
except queue.Empty:
pass
data_vols = [e[0] for e in data]
data_minx, data_maxx = data[0][1], data[-1][2]
vols = np.concatenate(data_vols)[-image_width:]
prev_vols = prev_vols[-(image_width - vols.size) or prev_vols.size:]
concat_vols = np.concatenate((prev_vols, vols))[-image_width:]
vols_min, vols_max = np.amin(concat_vols), np.amax(concat_vols)
if prev_vols.size > 0 and (vols_min < np.amin(prev_vols) - 10 ** -8 or vols_max > np.amax(prev_vols) + 10 ** -8):
vols = concat_vols
prev_image = prev_image[:, :-prev_vols.size]
prev_vols = prev_vols[:0]
vols_norm = normalized(
data = vols, data_min = vols_min,
data_max = vols_max, maximum_value = image_height,
)
image = (aranges < vols_norm.astype(np.int32)[None, :]).astype(np.uint8) * 255
whole_image = np.concatenate((prev_image, image), axis = 1)[:, -image_width:]
q1.put((whole_image, data_maxx - whole_image.shape[1] + 1, data_maxx, vols_min, vols_max))
prev_image = whole_image
prev_vols = concat_vols
threading.Thread(target = renderer, daemon = True).start()
def visualizer():
import matplotlib.pyplot as plt, matplotlib.animation
def images():
while True:
data = []
data.append(q1.get())
try:
while True:
data.append(q1.get(block = False))
except queue.Empty:
pass
minx = min([e[1] for e in data])
maxx = min([e[2] for e in data])
miny = min([e[3] for e in data])
maxy = min([e[4] for e in data])
image = np.concatenate([e[0] for e in data], axis = 1)[:, -image_width:]
image = np.pad(image, ((0, 0), (image_width - image.shape[1], 0)), constant_values = 0)
image = np.repeat(image[:, :, None], 3, axis = -1)
yield image, minx, maxx, miny, maxy
it = images()
im = None
fig = plt.figure(figsize = (image_width / dpi, image_height / dpi), dpi = dpi)
def animate_func(i):
nonlocal it, im, fig
image, minx, maxx, miny, maxy = next(it)
print(f'.', end = '', flush = True)
if im is None:
im = plt.imshow(image, interpolation = 'none', aspect = 'auto')
else:
im.set_array(image)
im.set_extent((minx, maxx, miny, maxy))
return [im]
anim = matplotlib.animation.FuncAnimation(fig, animate_func, frames = round(save_nsec * fps), interval = 1000 / fps)
print('saving...', end = '', flush = True)
#anim.save('result.mp4', fps = fps, dpi = dpi, extra_args = ['-vcodec', 'libx264'])
anim.save('result.gif', fps = fps, dpi = dpi, writer = 'imagemagick')
print('saved!', end = '', flush = True)
plt.show()
threading.Thread(target = visualizer, daemon = True).start()
while True:
time.sleep(0.1)
Then I've decided to play a bit and colored last image with RGB palette, the higher the peak is more red-ish it is, if it is more in the middle then it is more green-ish, if it is low enough then it is more blue-ish. Resulting image below was achieved by this coloring code:
And another one colored animation below, line-style instead of bar-style, with the help of this code:

python pyparrot image processing question

I'm trying to build code that wants to fly a drone with a camera with demoMamboVisionGUI.py below. When the code is executed, the camera screen comes up and press the button to start the flight. The code above displays four cam screens and detects a straight line while detecting a specified color value, blue (BGR2HSV). Using these two codes, the camera recognizes the blue straight line and flies forward little by little, and turns left and right at a certain angle, recognizes the bottom of the specified color (red), lands, and starts flying with another button. I want to make a code that recognizes green and lands. I would appreciate it if you could throw a simple hint.
enter image description here
import cv2
import numpy as np
def im_trim(img):
x = 160
y = 50
w = 280
h = 180
img_trim = img[y:y + h, x:x + w]
return img_trim
def go():
minimum = 9999;
min_theta=0;
try:
cap = cv2.VideoCapture(0)
except:
return
while True:
ret, P = cap.read()
img1 = P
cv2.imshow('asdf',img1)
img_HSV = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img_h, img_s, img_v = cv2.split(img_HSV)
cv2.imshow("HSV", img_HSV)
lower_b = np.array([100, 80, 100])
upper_b = np.array([120, 255, 255])
blue = cv2.inRange(img_HSV, lower_b, upper_b)
cv2.imshow('root',blue)
edges = cv2.Canny(blue, 50, 150, apertureSize =3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold = 100)
if lines is not None:
for line in lines:
r, theta = line[0]
#if (r<minimum and r>0) and (np.rad2deg(theta)>-90 and np.rad2deg(theta)<90):
#minimum = r
#min_theta = theta
#if (r > 0 and r < 250) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=-7, yaw=0, vertical_movement=0,
# duration=1)
#print("right")
#elif (r > 400 and r < 650) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=7, yaw=0, vertical_movement=0,
# duration=1)
print(r, np.rad2deg(theta))
#이하 if문을 while 문을 통해 반복하여 길 경로를 직진경로로 만든 이후 진행
#if(np.rad2deg(min_theta)>=몇도이상 or 이하):
# 이하 -> 왼쪽턴, 이상 -> 오른쪽턴, 사이 -> 직진
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img1, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.imshow('hough',img1)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
go()
print("??")
================================================================================================
"""
Demo of the Bebop vision using DroneVisionGUI that relies on libVLC. It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Minidrone import Mambo
from pyparrot.DroneVisionGUI import DroneVisionGUI
import cv2
# set this to true if you want to fly for the demo
testFlying = True
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
# print("in save pictures on image %d " % self.index)
img = self.vision.get_latest_valid_picture()
if (img is not None):
filename = "test_image_%06d.png" % self.index
# uncomment this if you want to write out images every time you get a new one
#cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_mambo_user_vision_function(mamboVision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
mambo = args[0]
if (testFlying):
print("taking off!")
mambo.safe_takeoff(5)
if (mambo.sensors.flying_state != "emergency"):
print("flying state is %s" % mambo.sensors.flying_state)
print("Flying direct: going up")
mambo.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=15, duration=2)
print("flip left")
print("flying state is %s" % mambo.sensors.flying_state)
success = mambo.flip(direction="left")
print("mambo flip result %s" % success)
mambo.smart_sleep(5)
print("landing")
print("flying state is %s" % mambo.sensors.flying_state)
mambo.safe_land(5)
else:
print("Sleeeping for 15 seconds - move the mambo around")
mambo.smart_sleep(15)
# done doing vision demo
print("Ending the sleep and vision")
mamboVision.close_video()
mambo.smart_sleep(5)
print("disconnecting")
mambo.disconnect()
if __name__ == "__main__":
# you will need to change this to the address of YOUR mambo
mamboAddr = "B0:FC:36:F4:37:F9"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
print("Preparing to open vision")
mamboVision = DroneVisionGUI(mambo, is_bebop=False, buffer_size=200,
user_code_to_run=demo_mambo_user_vision_function, user_args=(mambo, ))
userVision = UserVision(mamboVision)
mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
mamboVision.open_video()
==========================================================================

Python Script for Art Museum Installation intermittently locks up, Removing Thermal Camera Sensor read function seems to work?

I have a python script for an installation in an art museum that is meant to run continuously playing sounds, driving an LED matrix, and sensing people via OpennCV and a thermal camera.
Each of the parts of the script work and all of them work together but randomly the script locks up and I need to restart it. I want to script to not lock up so no one has to reset it during the exhibition.
I have the code running on a spare Raspberry Pi and a spare LED matrix and it continues to cycle through fine. The only changes that I made were commenting out the start of a thread to check the IR sensor and a call to a function to get the max temp from the sensor.
To be clear, if I leave these bits of code in the script runs fine 1 -3 or sometimes 10 times. But it seems to lock up in the first "state" when IRcount = 0
I am stuck. Any help is greatly appreciated.
```
#!/usr/bin/python
import glob
import queue
import sys
import pygame
import cv2
import random
import math
import colorsys
import time
from rpi_ws281x import *
from PIL import Image
import numpy as np
import threading
global thresh
sys.path.insert(0, "/home/pi/irpython/build/lib.linux-armv7l-3.5")
import MLX90640 as mlx
currentTime = int(round(time.time() * 1000))
InflateWait = int(round(time.time() * 1000))
minTime = 6000
maxTime = 12000
lineHeight1 = 0
lineHue1 = float(random.randrange(1,360))/255
# IR Functions
# Function to just grab the Max Temp detected. If over threshold then start
# the sequence, if not stay in state 0
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
# Function to detect individual people's heat blob group of pixels
# run in a thread only at the end of the script
def irCounter():
img = Image.new( 'L', (24,32), "black") # make IR image
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
for x in range(24):
row = []
for y in range(32):
val = f[32 * (23-x) + y]
row.append(val)
img.putpixel((x, y), (int(val)))
# convert raw temp data to numpy array
imgIR = np.array(img)
# increase the 24x32 px image to 240x320px for ease of seeing
bigIR = cv2.resize(depth_uint8, dsize=(240,320), interpolation=cv2.INTER_CUBIC)
# Use a bilateral filter to blur while hopefully retaining edges
brightBlurIR = cv2.bilateralFilter(bigIR,9,150,150)
# Threshold the image to black and white
retval, threshIR = cv2.threshold(brightBlurIR, 26, 255, cv2.THRESH_BINARY)
# Define kernal for erosion and dilation and closing operations
kernel = np.ones((5,5),np.uint8)
erosionIR = cv2.erode(threshIR,kernel,iterations = 1)
dilationIR = cv2.dilate(erosionIR,kernel,iterations = 1)
closingIR = cv2.morphologyEx(dilationIR, cv2.MORPH_CLOSE, kernel)
# Detect countours
contours, hierarchy = cv2.findContours(closingIR, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Get the number of contours ( contours count when touching edge of image while blobs don't)
ncontours = str(len(contours))
# Show images in window during testing
cv2.imshow("Combined", closingIR)
cv2.waitKey(1)
#initialize pygame
pygame.init()
pygame.mixer.init()
pygame.mixer.set_num_channels(30)
print("pygame initialized")
# assign sound chennels for pygame
channel0 = pygame.mixer.Channel(0)
channel1 = pygame.mixer.Channel(1)
channel2 = pygame.mixer.Channel(2)
channel3 = pygame.mixer.Channel(3)
channel4 = pygame.mixer.Channel(4)
channel5 = pygame.mixer.Channel(5)
channel6 = pygame.mixer.Channel(6)
channel7 = pygame.mixer.Channel(7)
channel8 = pygame.mixer.Channel(8)
channel9 = pygame.mixer.Channel(9)
channel10 = pygame.mixer.Channel(10)
channel11 = pygame.mixer.Channel(11)
channel12 = pygame.mixer.Channel(12)
channel13 = pygame.mixer.Channel(13)
channel14 = pygame.mixer.Channel(14)
channel15 = pygame.mixer.Channel(15)
channel16 = pygame.mixer.Channel(16)
channel17 = pygame.mixer.Channel(17)
channel18 = pygame.mixer.Channel(18)
channel19 = pygame.mixer.Channel(19)
channel20 = pygame.mixer.Channel(20)
channel21 = pygame.mixer.Channel(21)
channel22 = pygame.mixer.Channel(22)
channel23 = pygame.mixer.Channel(23)
channel24 = pygame.mixer.Channel(24)
channel25 = pygame.mixer.Channel(25)
channel26 = pygame.mixer.Channel(26)
channel27 = pygame.mixer.Channel(27)
channel28 = pygame.mixer.Channel(28)
# load soundfiles
echoballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/echo balls FIX.ogg")
organbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/ORGAN BOUNCE fix.ogg")
jar = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/jar whoop fix.ogg")
garland = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/GARLAND_fix.ogg")
dribble= pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/dribble.ogg")
cowbell = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/cowbell fix.ogg")
clackyballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/clacky balls boucne.ogg")
burpees = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/burpees_fix.ogg")
brokensynth = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/broken synth bounce.ogg")
woolballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wool balls in jar FIX.ogg")
wiimoye = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wiimoye_fix.ogg")
warpyorgan = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/warpy organ bounce#.2.ogg")
vibrate = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/vibrate fix.ogg")
turtlesbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/turtles fix.ogg")
timer = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/timer.ogg")
tape = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/tape fix.ogg")
tambourine = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/TAMBOURINE.ogg")
springybounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/springy bounce.ogg")
smash3 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/smash fix.ogg")
bristle2 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/BRISTLE FIX.ogg")
blackkeys = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/black keys FIX.ogg")
zipper = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/zipper.ogg")
presatisfactionsweep = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/pre-satisfaction sweep .ogg")
satisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SATISFACTION.ogg")
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
solosatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SOLO_SATISFACTION.ogg")
print("sound files loaded")
# initializing sounds list
soundsList = [echoballs, organbounce, zipper, jar, garland, dribble, cowbell, clackyballs, burpees, brokensynth, woolballs,
wiimoye, warpyorgan, vibrate, turtlesbounce, timer, tambourine, springybounce, smash3, bristle2, blackkeys, zipper ]
IRcount = 0 # define initial state for main loop
pygame.display.set_mode((32, 8))
print("pygame dispaly open")
# LED strip configuration:
LED_COUNT = 256 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 100 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Define functions which animate LEDs in various ways.
# PNG to LED function used to shuffle througfh folders of numbered PNGs exported
# from animations created
def pngToLED (strip, pngfile):
RGBimage = Image.open(pngfile).convert('RGB')
np_image = np.array(RGBimage)
colours = [Color(x[0],x[1],x[2]) for rows in np_image for x in rows]
colours2d = np.reshape(colours, (32, 8), order='F')
colours2d[1::2, :] = colours2d[1::2, ::-1]
pic = colours2d.flatten('C')
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, int(pic[ i ]))
strip.show()
def colorWipe(strip, color,wait_ms=10):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(1)
def theaterChase(strip, color, wait_ms, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=90):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
# Plasma LED Function from Root 42
def plasmaLED (plasmaTime):
h = 8
w = 32
out = [ Color( 0, 0, 0 ) for x in range( h * w ) ]
plasmaBright = 100.0
for x in range( h ):
for y in range( w ):
hue = (4.0 + math.sin( plasmaTime + x ) + math.sin( plasmaTime + y / 4.5 ) \
+ math.sin( x + y + plasmaTime ) + math.sin( math.sqrt( ( x + plasmaTime ) ** 2.0 + ( y + 1.5 * plasmaTime ) ** 2.0 ) / 4.0 ))/8
hsv = colorsys.hsv_to_rgb( hue , 1, 1 )
if y % 2 == 0: #even
out[ x + (h * y)] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
else: #odd
out[ (y * h) + (h -1 -x) ] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, out[ i ]) # set pixel to color in picture
strip.show()
# variables for plasma
plasmaTime = 5.0 # time
plasmaSpeed = 0.05 # speed of time
# thread for IRcounter function
class TempTask:
def __init__(self):
self.ir_temp = 0
self.lock = threading.Lock() #control concurrent access for safe multi thread access
self.thread = threading.Thread(target=self.update_temp)
def update_temp(self):
while True:
with self.lock:
self.ir_temp = irCounter()
time.sleep(0.1)
def start(self):
self.thread.start()
# Millis timer count function
def CheckTime( lastTime, wait):
if currentTime - lastTime >= wait:
lastTime += wait
return True
return False
# Main program logic follows:
if __name__ == '__main__':
# not currently starting the trhead because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
try:
while True:
currentTime = int(round(time.time() * 1000))
if IRcount == 0:
#random solid color
colorWipe(strip, Color(random.randint(60,255), random.randint(60,255), random.randint(60,255)))
# use random.sample() to shuffle sounds list
shuffledSounds = random.sample(soundsList, len(soundsList))
if pygame.mixer.Channel(0).get_busy() == False: channel0.play(shuffledSounds[0],loops = -1)
thresh = 0
'''
# the threshold check below is the only thing I have taken out of
# Program on my test Raspberry Pi. It seems to not lock up without it
# not sure why this would be a problem.
thresh = int(maxTemp())
print (thresh)
if thresh >= 27:
InflateWait = int(round(time.time() * 1000))
print (thresh)
IRcount = 1
print("Threshold Temp Detected: Begin Sound Sequence")
else:
IRcount = 0
'''
if CheckTime(InflateWait,random.randint(minTime, maxTime)):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 1:
LEDimages = glob.glob("/home/pi/ruff-wavs/Crystal_Mirror/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(1).get_busy() == False:
channel1.play(shuffledSounds[1],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 2:
LEDimages = glob.glob("/home/pi/ruff-wavs/Mercury_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(2).get_busy() == False:
channel2.play(shuffledSounds[2],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 3:
LEDimages = glob.glob("/home/pi/ruff-wavs/Pink_Lava/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(3).get_busy() == False:
channel3.play(shuffledSounds[3],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 4:
LEDimages = glob.glob("/home/pi/ruff-wavs/Horiz_Mosaic/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(4).get_busy() == False:
channel4.play(shuffledSounds[4],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 5:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(5).get_busy() == False:
channel5.play(shuffledSounds[5],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 6:
LEDimages = glob.glob("/home/pi/ruff-wavs/Radio_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(6).get_busy() == False:
channel6.play(shuffledSounds[6],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 7:
LEDimages = glob.glob("/home/pi/ruff-wavs/Star_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(7).get_busy() == False:
channel7.play(shuffledSounds[7],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
elif IRcount == 14:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(14).get_busy() == False:
channel14.play(shuffledSounds[14],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
print (thresh)
elif IRcount == 15:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(15).get_busy() == False:
channel15.play(shuffledSounds[15],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 16:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(40,50))
pygame.mixer.fadeout(45000)
if pygame.mixer.Channel(22).get_busy() == False:
channel22.play(presatisfactionsweep)
IRcount = 17
print(IRcount)
print("sweep end start")
elif IRcount == 18:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(30,40))
if pygame.mixer.Channel(22).get_busy() == False:
pygame.mixer.stop()
channel23.play(satisfaction)
IRcount = 19
print(IRcount)
print("Play Satisfaction Sount")
elif IRcount == 19:
rainbowCycle(strip, 5)
if pygame.mixer.Channel(23).get_busy() == False: IRcount = 0
except KeyboardInterrupt:
colorWipe(strip, Color(0,0,0), 1)
pygame.mixer.stop()
pygame.quit()
```
Update 1 - Suspected Function(s)
When I left the script run overnight and came to the exhibit in the morning it would be stuck in the 1st state IRcount = 0 The only things that happen in that state is the maxTemp() function to get the max temp, the LED color wipe function to cycle colors.
When I would come in in the morning it would be stuck, playing a single sound from pygame, as it should, but it would not be cycling colors. I removed the maxTemp() from my test Pi and it has been working fine.
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
Update # 2
I thought that the thread might be the problem so I commented out the thread start call. That is why I made the simpler maxTemp() function to see if that would work better than the thread. So when I was using the max temp then the thread wasn't being called.
I don't understand threads very well. Is it possible to have the max temp variable update continuously and have the simple OpenCV numPy manipulations running continuously? That would be ideal. When I originally added the thread it seemed to stop after a few cycles.
I do not have a join on the thread. I know threads don't "restart" but do I need to call it again as the state machine starts again?
# not currently starting the thread because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
Update #3
I Uploaded new code that eliminated the duplicate functions. Everything is handled in the thread temp.task now. That seems to work fine. I also put the github suggestion of polling the thermal sensor if the image is a duplicate but that has not happened.
I left the program run over night and when I came in in the morning it was locked up. The SD card is set to read only mode. I ssh'd into the pi. I have my auto start python script in /etc/profile
It seems to start the script each time I log into ssh. When I logged in this morning to see if the pi was still up it game an out of memory error.
```
Traceback (most recent call last):
File "/home/pi/ruff-wavs/shufflewavdemo.py", line 210, in <module>
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
pygame.error: Unable to open file '/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg'
OSError: [Errno 28] No space left on device
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 223, in <module>
main()
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 145, in main
output.close()
OSError: [Errno 28] No space left on device
-bash: cannot create temp file for here-document: No space left on device
Could that be because it is in read only mode?
I used this script to switch from writable to read only and back.
[https://github.com/JasperE84/root-ro][1]
[1]: https://github.com/JasperE84/root-ro
I suspect the issue is that you're accessing the mlx device both in the main thread via maxTemp() as well as in the irCounter() thread. The fact that it works when you take out the maxTemp call, and that that call happens in the if IRcount == 0: state supports this.
I would add the maxTemp functionality to the irCounter thread, so that accessing it from only a single thread; and update a global variable (protected by a lock) with the maxTemp results if you need to retain this functionality.

reading an opencv image in python through a socket

I am trying to read an opencv image in a python socket that is sent from c++.
I am able to read the image into another c++ program or VB program and build an image but with python I don't understand what's happening.
The sending code where I send the mat.data:
char *start_s = "<S><size>43434234<cols>64<rows>64<SE>";//plus I send the image size, cols, rows, which varies, not like the static char string shown
char *end_e = "<E>";
cv::Mat image_send = some_mat;
iResult = send( ConnectSocket, start_s, (int)strlen(start_s), 0 );
iResult = send( ConnectSocket, (const char *) image_send.data, i_buffer_size, 0 );
iResult = send( ConnectSocket, end_e, (int)strlen(end_e), 0 );
This is what I have tried with the python, but haven't had any success yet. The image_cols and Image_rows are filtered from the socket, not shown here, and only the image_mat.data from the c++ mat is in the socket that I am trying to put into the image:
data = conn.recv(4757560)
if(i_Read_Image == 2) & (image_cols != 0) & (image_rows != 0):
print ("Entering")
#print(data)
data2 = np.fromstring(data, dtype='uint8')
img_np = cv2.imdecode(data2,cv2.IMREAD_COLOR )
cv2.imshow('image',img_np)
cv2.waitKey(0)
#Also tried this
#img = Image.new('RGB', (image_cols, image_rows))
#img.putdata(data)
#img5 = np.reshape(data2,(image_rows,image_cols))
i_Read_Image = 0
With the help of the comments I was able to get a working answer. The original image is in a single array RGB, this needs to be reshaped and placed into a 'RGB' image, it can be done in one line:
img = Image.fromarray(data2.reshape(image_rows,image_cols,3), 'RGB')
and when reading an opencv data array from a socket: this works:
data = conn.recv(567667)
if(i_Read_Image == 2) & (image_cols != 0) & (image_rows != 0):
data2 = np.fromstring(data, dtype='uint8')
img = Image.fromarray(data2.reshape(image_rows,image_cols,3), 'RGB')
img.show()

Categories