Issues with filtering Tweepy streams - python

I am working on a project where my code needs to react when a Twitter user tweets #foo using #bar. I have followed the tutorial here https://docs.tweepy.org/en/latest/streaming_how_to.html and I have my code partially working. The issue is that it works when I tweet "#foo I like #bar" but I get no response when I tweet "#foo #bar" (see my code below). Has anyone else had this issue?
import tweepy
import threading
import random
import board
import neopixel
import random
import colorsys
import numpy as np
import time
from adafruit_servokit import ServoKit
import busio
import adafruit_pca9685
from scipy.interpolate import interp1d
i2c = busio.I2C(board.SCL, board.SDA)
hat = adafruit_pca9685.PCA9685(i2c)
# Set channels to the number of servo channels on your kit.
# 8 for FeatherWing, 16 for Shield/HAT/Bonnet.
kit = ServoKit(channels=16, frequency=50)
pixel_pin = board.D18
num_pixels = 300
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER
)
access_token = "1345547423154888706-VibtuSn31Aeu4e827GKaZmEnCVxsj7"
access_token_secret = "VfciVdPUW5STkVePYewyMTEWYo5rXhgOOHqyLFW5vk7tU"
consumer_key = "jrJFgRGHTg8MH8gcwgUMiL9lO"
consumer_secret = "UXOCkkEj1Y2eDzqzfKtqnC8K0IvsrdtzBe2sprIwDkqrLTHQes"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# for tweet in tweepy.Cursor(api.search, q="#PerotMuseum", rpp=100).items():
# inCaps = tweet.text.upper()
# if not "RT " in inCaps:
# if "#FOSSILFRIDAY" in inCaps:
# print(tweet.user.name+":")
# print(tweet.text.encode(encoding='UTF-8',errors='strict'))
# #print("\n")
#
class Listener(tweepy.StreamListener):
def __init__(self):
super(Listener,self).__init__()
def on_status(self, status):
inCaps = status.text.upper()
if not "RT " in inCaps:
print("========== "+status.user.name+" ==========")
print(status.text+"\n")
t_end = time.time() + 12
t1 = threading.Thread(target=marquee, args=(10, pickColor(), t_end,))
t2 = threading.Thread(target=loop, args=(t_end,))
# t2 = threading.Thread(target=wave, args=())
t1.start()
t2.start()
t1.join()
t2.join()
motorDefault()
detachServos()
pixels.fill((0,0,0))
pixels.show()
def on_error(self, status_code):
print(status_code)
return False
def SMOOTHERSTEP(x):
# return((x) * (x) * (x) * ((x) * ((x) * 6 - 15) + 10))
return np.sin(x * 3.1415 / 2);
def interp(start, stop, t, totsteps):
v = t / totsteps
v = SMOOTHERSTEP(v)
val = (start * v) + (stop * (1 - v))
# print (val)
return val
def pickColor():
h = random.random()
color = colorsys.hsv_to_rgb(h, 1, 1)
color = np.multiply(color, 255)
color = np.floor(color).astype(int)
return(color)
def loop(t_end):
steps = 130
step = 0.0
steps = steps/4
while time.time() < t_end:
# marquee(10, color, int(step))
if step < steps:
i = step
kit.servo[0].angle = interp(130, 95, i, steps)
kit.servo[1].angle = interp(180, 30, i, steps)
kit.servo[2].angle = interp(110, 140, i, steps)
kit.servo[3].angle = interp(120, 90, i, steps)
step+=1
elif step < steps * 2:
i = step - steps
kit.servo[0].angle = interp(95, 130, i, steps)
kit.servo[1].angle = interp(30, 180, i, steps)
kit.servo[2].angle = interp(140, 110, i, steps)
kit.servo[3].angle = interp(90, 120, i, steps)
step+=1
elif step < steps * 3:
i = step - steps * 2
kit.servo[0].angle = interp(130, 95, i, steps)
kit.servo[1].angle = interp(60, 30, i, steps)
kit.servo[2].angle = interp(110, 140, i, steps)
kit.servo[3].angle = interp(60, 90, i, steps)
step+=1
elif step < steps * 4:
i = step - steps * 3
kit.servo[0].angle = interp(95, 130, i, steps)
kit.servo[1].angle = interp(30, 60, i, steps)
kit.servo[2].angle = interp(140, 110, i, steps)
kit.servo[3].angle = interp(90, 60, i, steps)
step+=1
else:
step = 0
def marquee(width, color, t_end):
t = 0
while time.time() < t_end:
i = (t%width)
while i < num_pixels:
for x in range(int(width/2)):
pixels[int(i-x)] = color
i+=width
pixels.show()
pixels.fill((0,0,0))
t+=1
pixels.fill((0,0,0))
pixels.show()
# def marquee(width, color, step):
# for i in range(num_pixels):
# if(i % width < width/2):
# if(i + step < num_pixels):
# pixels[i+step] = color
# else:
# pixels[i+step-num_pixels] = color
# pixels.show()
# pixels.fill((0,0,0))
def wave():
kit.servo[1].angle = 180#30 right arm
time.sleep(2)
kit.servo[1].angle = 160#30 right arm
time.sleep(.25)
kit.servo[1].angle = 180#30 right arm
time.sleep(.25)
kit.servo[1].angle = 160#30 right arm
time.sleep(.25)
kit.servo[1].angle = 180#30 right arm
time.sleep(.25)
kit.servo[1].angle = 30#30 right arm
def detachServos():
for i in range(16):
channel = hat.channels.__getitem__(i)
channel.duty_cycle = 0
def motorDefault():
kit.servo[0].angle = 100#90 skull
kit.servo[1].angle = 30#30 right arm
kit.servo[2].angle = 140#150 left arm
kit.servo[3].angle = 90#90 head
time.sleep(.5)
try:
marquee(10, pickColor(), time.time() + 3)
myStreamListener = Listener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
myStream.filter(track=['#Foo', '#Bar'], languages=['en'])
except:
motorDefault()
detachServos()
print("something went wrong")

I figured it out. By filtering only English results it was excluding tweets that include only hashtags and handles. I assume this is because hashtags and handles are excluded in language calculations because they can be in any language and not necessarily reflect the language of the tweet itself.
so the solution was changing this:
myStream.filter(track=['#Foo', '#Bar'], languages=['en'])
to this:
myStream.filter(track=['#Foo', '#Bar'])

Related

Python async dataprocessing function

I want to run the runBacktest() function in async is this possible?
import pandas as pd
from pathlib import Path
from datetime import datetime
from indicators import *
#Loading the file.
dfCryptoCap = pd.read_csv(f"{Path(__file__).parent.resolve()}\CRYPTOCAP_TOTAL, 720_b2571.csv")
dfBtcUsd = pd.read_csv(f"{Path(__file__).parent.resolve()}\INDEX_BTCUSD, 720_17561.csv")
# Add Column for converted unix timestamp to datetime
dfCryptoCap['timeiso'] = pd.to_datetime(dfCryptoCap['time'],unit='s')
dfBtcUsd['timeiso'] = pd.to_datetime(dfBtcUsd['time'],unit='s')
dfCryptoCapHA = generateHeikinAshi(dfCryptoCap)
dfBtcUsdHA = generateHeikinAshi(dfBtcUsd)
results = []
def runBacktest(lenSmooth1, winningLenSmooth1, winningPNL):
dfCryptoCapEMA = dfCryptoCapHA.copy()
dfCryptoCapEMA['open'] = calculateEMA(dfCryptoCapHA['open'], lenSmooth1)
dfCryptoCapEMA['high'] = calculateEMA(dfCryptoCapHA['high'], lenSmooth1)
dfCryptoCapEMA['low'] = calculateEMA(dfCryptoCapHA['low'], lenSmooth1)
dfCryptoCapEMA['close'] = calculateEMA(dfCryptoCapHA['close'], lenSmooth1)
# print(dfCryptoCapSMA1)
portfoliosize = 1000
entryPrice = 0.0
traderesult = 0.0
for i in range(1, len(dfCryptoCapEMA)):
if dfCryptoCapEMA.iloc[i]['close'] > dfCryptoCapEMA.iloc[i]['open'] and dfCryptoCapEMA.iloc[i -1]['close'] <= dfCryptoCapEMA.iloc[i -1]['open']:
btcOHLC = dfBtcUsd.loc[dfBtcUsd['time'] == dfCryptoCapEMA.iloc[i]['time']]
entryPrice = btcOHLC.iloc[0]['close'].tolist()
elif dfCryptoCapEMA.iloc[i]['close'] < dfCryptoCapEMA.iloc[i]['open'] and dfCryptoCapEMA.iloc[i -1]['close'] >= dfCryptoCapEMA.iloc[i -1]['open']:
btcOHLC = dfBtcUsd.loc[dfBtcUsd['time'] == dfCryptoCapEMA.iloc[i]['time']]
try:
traderesult = (btcOHLC.iloc[0]['close'].tolist() - entryPrice) / entryPrice * 100
except:
traderesult = 0
if traderesult > 0:
portfoliosize = portfoliosize * (1 + (traderesult / 100))
elif traderesult < 0:
portfoliosize = portfoliosize * (1 - (abs(traderesult) / 100))
result = f"Round - lenSmooth1 = {lenSmooth1} | PNL = {round(portfoliosize,2)} || currentWinner = {winningLenSmooth1} | currentWinnerPNL = {round(winningPNL,2)}"
#print(result)
if portfoliosize > winningPNL:
results.append(result)
winningPNL = portfoliosize
winningLenSmooth1 = lenSmooth1
return [winningLenSmooth1, winningPNL]
result = []
for x in range(1, 151, 1):
if x == 1:
result = runBacktest(x, 0, 0)
else:
result = runBacktest(x, result[0], result[1])
print(results[len(results) - 1])
Currently, the backtest runs synchronously and with larger datasets each iteration take up to a minute at the moment. I want to speed up the process by run runBacktest() asynchronous with different lenSmooth1 value and review the results at the end.
i tried to add the following to my script but i don't see any improvements in duration
import asyncio
async def run_tasks():
tasks = [runBacktest(x, 0, 0) for x in range(1, 151, 1)]
#await asyncio.wait(tasks)
await asyncio.gather(*tasks)
def main():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(run_tasks())
loop.close()
main()
print(results)
Async will improve performance when you are working with IO operations (e.g. waiting for a response over a network, loading a file, etc.) but it won't do much to help with CPU-bound processes.
This article does a great job of breaking down different means of achieving concurrency in Python. What you're looking for is likely an implementation of the multiprocessing library.

Mpi4py: printing and plotting during execution

I recently start working with mpi in order to use it to accelerate some code (a sph/gravity simulation).
So far it seem to be working. The position of my particle have change between the beginning and the end of the program, task manager show that several python thread are working...
But i have two problem:
1/ i can't print text during the execution of the program. The text is only print once it's finished
2/ I'm unable to create a graph using matplotlib.
In both case, neither python nor mpi return any error. My guess, for the text at least, is that its print when the execution end with mpiexec.
thanks for any insight !
here the code i run it with !mpiexec -n 8 python mpi4py_test.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpi4py import MPI
class particule:
def __init__(self, h, pos, vel = [0, 0], m = 1, P = 0, density = 0, acc = [0, 0]):
self.x, self.y = pos
self.u, self.v = vel
self.acc_x, self.acc_y = acc
self.m = m
self.h = h # kernel lenght
self.P = P # Pressure
self.density = density
def kernel(part_a, part_b ):
" monaghan cubic spline "
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / h
tmp = 0
if r < 1:
tmp = cst * (1 - 3/2* r**2 + 3/4*r**3)
elif r < 2:
tmp = cst * (1/4*(2-r)**3)
return tmp
def grad_kernel(part_a, part_b):
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / part_a.h
tmp = 0
if r < 1:
tmp = cst * (9/4 * r**2 - 3*r)
elif r < 2:
tmp = cst * (-3/4*(2-r)**2)
return tmp
class hash_grid:
def __init__(self, cell_size):
self.cell_size = cell_size
self.cell = {}
self.part_list = []
def key(self, part):
return (part.x//self.cell_size,
part.y//self.cell_size)
def add(self, part):
tmp = self.key(part)
self.cell.setdefault(tmp, []).append(part)
self.part_list.append(part)
def neighbours(self, part):
idx = self.key(part)
cell_N = None
cell_S = None
cell_E = None
cell_W = None
cell_NE = None
cell_NW = None
cell_SE = None
cell_SW = None
if (idx[0], idx[1]+1) in self.cell: cell_N = (idx[0], idx[1]+1)
if (idx[0], idx[1]-1) in self.cell: cell_S = (idx[0], idx[1]-1)
if (idx[0]+1, idx[1]) in self.cell: cell_E = (idx[0]+1, idx[1])
if (idx[0]-1, idx[1]) in self.cell: cell_W = (idx[0]-1, idx[1])
if (idx[0]+1, idx[1]+1) in self.cell: cell_NE = (idx[0]+1, idx[1]+1)
if (idx[0]-1, idx[1]+1) in self.cell: cell_NW = (idx[0]-1, idx[1]+1)
if (idx[0]+1, idx[1]-1) in self.cell: cell_SE = (idx[0]+1, idx[1]-1)
if (idx[0]-1, idx[1]-1) in self.cell: cell_SW = (idx[0]-1, idx[1]-1)
return [value for cel in (idx, cell_N, cell_S, cell_E, cell_W, cell_NE, cell_NW, cell_SE, cell_SW) if cel!=None for value in self.cell.get(cel) ]
def split(to_split, nb_chunk):
"take a list and split it most evenly possible"
result = []
q, r = divmod(len(to_split), nb_chunk)
curr = 0
last = 0
for i in range(nb_chunk):
if r>0:
last = curr + q + 1
result.append(to_split[curr: last])
r = r-1
curr = last
else:
last = curr + q
result.append(to_split[curr: last])
curr = last
return result
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
n_part = 2000
h = 2
grid = hash_grid(h)
points = np.zeros((n_part, 2))
sim_range = 20
dt = 0.005
n_iter = 2500
k = 5
for _i in range(n_part):
pos = np.random.uniform(-sim_range, sim_range, 2)
vel = np.random.uniform(-.2, .2, 2)
p = particule(h, pos, vel)
grid.add(p)
points[_i, :] = pos
for part in grid.part_list:
part.density = 0
part.P = 0
for p in grid.neighbours(part):
part.density = part.density + p.m * kernel(part, p)
part.P = k * ( part.density )**2 # - density_0)
data = split(grid.part_list,8)
for t in range(10):
if rank == 0 :
"1 - verlet 1/2 ; serial"
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
part.x, part.y = part.x + part.u * dt, part.y + part.v * dt
"2 - update grid ; serial"
jnk = grid.part_list
del grid
grid=hash_grid(h)
for p in jnk:
grid.add(p)
grid_bcast = grid
data_b = grid
chunk_of_part_list = split(grid.part_list,8)
else:
grid_bcast=None
chunk_of_part_list = None
data_b=None
grid_bcast = comm.bcast(grid_bcast, root=0)
chunk_of_part_list = comm.scatter(chunk_of_part_list, root=0)
"3 - get acc ; parallel"
for part in chunk_of_part_list:
part.acc_x = 0
part.acc_y = 0
for p in grid_bcast.neighbours(part):
if p != part:
r = np.sqrt((p.x-part.x)**2 + (p.y-part.y)**2)
if r==0: pass
else:
part.acc_x = part.acc_x - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.x - part.x)/r
part.acc_y = part.acc_y - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.y - part.y)/r
dist = np.sqrt(part.x**2+part.y**2)
part.acc_x = part.acc_x - .5 * part.x -1*part.u
part.acc_y = part.acc_y - .5 * part.y -1*part.v
chunk_of_part_list = comm.gather(chunk_of_part_list,root=0)
"4 - verlet 2/2 ; serial"
if rank == 0:
grid.part_list = list(matplotlib.cbook.flatten(chunk_of_part_list))
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
points[i,0], points[i,1] = part.x, part.y # for the figure
if rank==0:
print(t, flush=True)
if rank == 0:
print('point 0', points[0,:])
fig = plt.figure()
ax = fig.add_subplot()
sc = ax.scatter(points[:, 0], points[:, 1],s=3)
ax.set_aspect('equal', 'box')
ax.set_xlim(-sim_range,sim_range)
ax.set_ylim(-sim_range,sim_range)

python pyparrot image processing question

I'm trying to build code that wants to fly a drone with a camera with demoMamboVisionGUI.py below. When the code is executed, the camera screen comes up and press the button to start the flight. The code above displays four cam screens and detects a straight line while detecting a specified color value, blue (BGR2HSV). Using these two codes, the camera recognizes the blue straight line and flies forward little by little, and turns left and right at a certain angle, recognizes the bottom of the specified color (red), lands, and starts flying with another button. I want to make a code that recognizes green and lands. I would appreciate it if you could throw a simple hint.
enter image description here
import cv2
import numpy as np
def im_trim(img):
x = 160
y = 50
w = 280
h = 180
img_trim = img[y:y + h, x:x + w]
return img_trim
def go():
minimum = 9999;
min_theta=0;
try:
cap = cv2.VideoCapture(0)
except:
return
while True:
ret, P = cap.read()
img1 = P
cv2.imshow('asdf',img1)
img_HSV = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img_h, img_s, img_v = cv2.split(img_HSV)
cv2.imshow("HSV", img_HSV)
lower_b = np.array([100, 80, 100])
upper_b = np.array([120, 255, 255])
blue = cv2.inRange(img_HSV, lower_b, upper_b)
cv2.imshow('root',blue)
edges = cv2.Canny(blue, 50, 150, apertureSize =3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold = 100)
if lines is not None:
for line in lines:
r, theta = line[0]
#if (r<minimum and r>0) and (np.rad2deg(theta)>-90 and np.rad2deg(theta)<90):
#minimum = r
#min_theta = theta
#if (r > 0 and r < 250) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=-7, yaw=0, vertical_movement=0,
# duration=1)
#print("right")
#elif (r > 400 and r < 650) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=7, yaw=0, vertical_movement=0,
# duration=1)
print(r, np.rad2deg(theta))
#이하 if문을 while 문을 통해 반복하여 길 경로를 직진경로로 만든 이후 진행
#if(np.rad2deg(min_theta)>=몇도이상 or 이하):
# 이하 -> 왼쪽턴, 이상 -> 오른쪽턴, 사이 -> 직진
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img1, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.imshow('hough',img1)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
go()
print("??")
================================================================================================
"""
Demo of the Bebop vision using DroneVisionGUI that relies on libVLC. It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Minidrone import Mambo
from pyparrot.DroneVisionGUI import DroneVisionGUI
import cv2
# set this to true if you want to fly for the demo
testFlying = True
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
# print("in save pictures on image %d " % self.index)
img = self.vision.get_latest_valid_picture()
if (img is not None):
filename = "test_image_%06d.png" % self.index
# uncomment this if you want to write out images every time you get a new one
#cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_mambo_user_vision_function(mamboVision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
mambo = args[0]
if (testFlying):
print("taking off!")
mambo.safe_takeoff(5)
if (mambo.sensors.flying_state != "emergency"):
print("flying state is %s" % mambo.sensors.flying_state)
print("Flying direct: going up")
mambo.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=15, duration=2)
print("flip left")
print("flying state is %s" % mambo.sensors.flying_state)
success = mambo.flip(direction="left")
print("mambo flip result %s" % success)
mambo.smart_sleep(5)
print("landing")
print("flying state is %s" % mambo.sensors.flying_state)
mambo.safe_land(5)
else:
print("Sleeeping for 15 seconds - move the mambo around")
mambo.smart_sleep(15)
# done doing vision demo
print("Ending the sleep and vision")
mamboVision.close_video()
mambo.smart_sleep(5)
print("disconnecting")
mambo.disconnect()
if __name__ == "__main__":
# you will need to change this to the address of YOUR mambo
mamboAddr = "B0:FC:36:F4:37:F9"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
print("Preparing to open vision")
mamboVision = DroneVisionGUI(mambo, is_bebop=False, buffer_size=200,
user_code_to_run=demo_mambo_user_vision_function, user_args=(mambo, ))
userVision = UserVision(mamboVision)
mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
mamboVision.open_video()
==========================================================================

Python Script for Art Museum Installation intermittently locks up, Removing Thermal Camera Sensor read function seems to work?

I have a python script for an installation in an art museum that is meant to run continuously playing sounds, driving an LED matrix, and sensing people via OpennCV and a thermal camera.
Each of the parts of the script work and all of them work together but randomly the script locks up and I need to restart it. I want to script to not lock up so no one has to reset it during the exhibition.
I have the code running on a spare Raspberry Pi and a spare LED matrix and it continues to cycle through fine. The only changes that I made were commenting out the start of a thread to check the IR sensor and a call to a function to get the max temp from the sensor.
To be clear, if I leave these bits of code in the script runs fine 1 -3 or sometimes 10 times. But it seems to lock up in the first "state" when IRcount = 0
I am stuck. Any help is greatly appreciated.
```
#!/usr/bin/python
import glob
import queue
import sys
import pygame
import cv2
import random
import math
import colorsys
import time
from rpi_ws281x import *
from PIL import Image
import numpy as np
import threading
global thresh
sys.path.insert(0, "/home/pi/irpython/build/lib.linux-armv7l-3.5")
import MLX90640 as mlx
currentTime = int(round(time.time() * 1000))
InflateWait = int(round(time.time() * 1000))
minTime = 6000
maxTime = 12000
lineHeight1 = 0
lineHue1 = float(random.randrange(1,360))/255
# IR Functions
# Function to just grab the Max Temp detected. If over threshold then start
# the sequence, if not stay in state 0
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
# Function to detect individual people's heat blob group of pixels
# run in a thread only at the end of the script
def irCounter():
img = Image.new( 'L', (24,32), "black") # make IR image
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
for x in range(24):
row = []
for y in range(32):
val = f[32 * (23-x) + y]
row.append(val)
img.putpixel((x, y), (int(val)))
# convert raw temp data to numpy array
imgIR = np.array(img)
# increase the 24x32 px image to 240x320px for ease of seeing
bigIR = cv2.resize(depth_uint8, dsize=(240,320), interpolation=cv2.INTER_CUBIC)
# Use a bilateral filter to blur while hopefully retaining edges
brightBlurIR = cv2.bilateralFilter(bigIR,9,150,150)
# Threshold the image to black and white
retval, threshIR = cv2.threshold(brightBlurIR, 26, 255, cv2.THRESH_BINARY)
# Define kernal for erosion and dilation and closing operations
kernel = np.ones((5,5),np.uint8)
erosionIR = cv2.erode(threshIR,kernel,iterations = 1)
dilationIR = cv2.dilate(erosionIR,kernel,iterations = 1)
closingIR = cv2.morphologyEx(dilationIR, cv2.MORPH_CLOSE, kernel)
# Detect countours
contours, hierarchy = cv2.findContours(closingIR, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Get the number of contours ( contours count when touching edge of image while blobs don't)
ncontours = str(len(contours))
# Show images in window during testing
cv2.imshow("Combined", closingIR)
cv2.waitKey(1)
#initialize pygame
pygame.init()
pygame.mixer.init()
pygame.mixer.set_num_channels(30)
print("pygame initialized")
# assign sound chennels for pygame
channel0 = pygame.mixer.Channel(0)
channel1 = pygame.mixer.Channel(1)
channel2 = pygame.mixer.Channel(2)
channel3 = pygame.mixer.Channel(3)
channel4 = pygame.mixer.Channel(4)
channel5 = pygame.mixer.Channel(5)
channel6 = pygame.mixer.Channel(6)
channel7 = pygame.mixer.Channel(7)
channel8 = pygame.mixer.Channel(8)
channel9 = pygame.mixer.Channel(9)
channel10 = pygame.mixer.Channel(10)
channel11 = pygame.mixer.Channel(11)
channel12 = pygame.mixer.Channel(12)
channel13 = pygame.mixer.Channel(13)
channel14 = pygame.mixer.Channel(14)
channel15 = pygame.mixer.Channel(15)
channel16 = pygame.mixer.Channel(16)
channel17 = pygame.mixer.Channel(17)
channel18 = pygame.mixer.Channel(18)
channel19 = pygame.mixer.Channel(19)
channel20 = pygame.mixer.Channel(20)
channel21 = pygame.mixer.Channel(21)
channel22 = pygame.mixer.Channel(22)
channel23 = pygame.mixer.Channel(23)
channel24 = pygame.mixer.Channel(24)
channel25 = pygame.mixer.Channel(25)
channel26 = pygame.mixer.Channel(26)
channel27 = pygame.mixer.Channel(27)
channel28 = pygame.mixer.Channel(28)
# load soundfiles
echoballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/echo balls FIX.ogg")
organbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/ORGAN BOUNCE fix.ogg")
jar = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/jar whoop fix.ogg")
garland = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/GARLAND_fix.ogg")
dribble= pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/dribble.ogg")
cowbell = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/cowbell fix.ogg")
clackyballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/clacky balls boucne.ogg")
burpees = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/burpees_fix.ogg")
brokensynth = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/broken synth bounce.ogg")
woolballs = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wool balls in jar FIX.ogg")
wiimoye = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/wiimoye_fix.ogg")
warpyorgan = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/warpy organ bounce#.2.ogg")
vibrate = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/vibrate fix.ogg")
turtlesbounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/turtles fix.ogg")
timer = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/timer.ogg")
tape = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/tape fix.ogg")
tambourine = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/TAMBOURINE.ogg")
springybounce = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/springy bounce.ogg")
smash3 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/smash fix.ogg")
bristle2 = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/BRISTLE FIX.ogg")
blackkeys = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/black keys FIX.ogg")
zipper = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/zipper.ogg")
presatisfactionsweep = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/pre-satisfaction sweep .ogg")
satisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SATISFACTION.ogg")
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
solosatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/SOLO_SATISFACTION.ogg")
print("sound files loaded")
# initializing sounds list
soundsList = [echoballs, organbounce, zipper, jar, garland, dribble, cowbell, clackyballs, burpees, brokensynth, woolballs,
wiimoye, warpyorgan, vibrate, turtlesbounce, timer, tambourine, springybounce, smash3, bristle2, blackkeys, zipper ]
IRcount = 0 # define initial state for main loop
pygame.display.set_mode((32, 8))
print("pygame dispaly open")
# LED strip configuration:
LED_COUNT = 256 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 100 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Define functions which animate LEDs in various ways.
# PNG to LED function used to shuffle througfh folders of numbered PNGs exported
# from animations created
def pngToLED (strip, pngfile):
RGBimage = Image.open(pngfile).convert('RGB')
np_image = np.array(RGBimage)
colours = [Color(x[0],x[1],x[2]) for rows in np_image for x in rows]
colours2d = np.reshape(colours, (32, 8), order='F')
colours2d[1::2, :] = colours2d[1::2, ::-1]
pic = colours2d.flatten('C')
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, int(pic[ i ]))
strip.show()
def colorWipe(strip, color,wait_ms=10):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(1)
def theaterChase(strip, color, wait_ms, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=90):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
# Plasma LED Function from Root 42
def plasmaLED (plasmaTime):
h = 8
w = 32
out = [ Color( 0, 0, 0 ) for x in range( h * w ) ]
plasmaBright = 100.0
for x in range( h ):
for y in range( w ):
hue = (4.0 + math.sin( plasmaTime + x ) + math.sin( plasmaTime + y / 4.5 ) \
+ math.sin( x + y + plasmaTime ) + math.sin( math.sqrt( ( x + plasmaTime ) ** 2.0 + ( y + 1.5 * plasmaTime ) ** 2.0 ) / 4.0 ))/8
hsv = colorsys.hsv_to_rgb( hue , 1, 1 )
if y % 2 == 0: #even
out[ x + (h * y)] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
else: #odd
out[ (y * h) + (h -1 -x) ] = Color( *[ int( round( c * plasmaBright ) ) for c in hsv ] )
for i in range( 0, strip.numPixels(), 1 ):# iterate over all LEDs - range(start_value, end_value, step)
strip.setPixelColor(i, out[ i ]) # set pixel to color in picture
strip.show()
# variables for plasma
plasmaTime = 5.0 # time
plasmaSpeed = 0.05 # speed of time
# thread for IRcounter function
class TempTask:
def __init__(self):
self.ir_temp = 0
self.lock = threading.Lock() #control concurrent access for safe multi thread access
self.thread = threading.Thread(target=self.update_temp)
def update_temp(self):
while True:
with self.lock:
self.ir_temp = irCounter()
time.sleep(0.1)
def start(self):
self.thread.start()
# Millis timer count function
def CheckTime( lastTime, wait):
if currentTime - lastTime >= wait:
lastTime += wait
return True
return False
# Main program logic follows:
if __name__ == '__main__':
# not currently starting the trhead because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
try:
while True:
currentTime = int(round(time.time() * 1000))
if IRcount == 0:
#random solid color
colorWipe(strip, Color(random.randint(60,255), random.randint(60,255), random.randint(60,255)))
# use random.sample() to shuffle sounds list
shuffledSounds = random.sample(soundsList, len(soundsList))
if pygame.mixer.Channel(0).get_busy() == False: channel0.play(shuffledSounds[0],loops = -1)
thresh = 0
'''
# the threshold check below is the only thing I have taken out of
# Program on my test Raspberry Pi. It seems to not lock up without it
# not sure why this would be a problem.
thresh = int(maxTemp())
print (thresh)
if thresh >= 27:
InflateWait = int(round(time.time() * 1000))
print (thresh)
IRcount = 1
print("Threshold Temp Detected: Begin Sound Sequence")
else:
IRcount = 0
'''
if CheckTime(InflateWait,random.randint(minTime, maxTime)):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 1:
LEDimages = glob.glob("/home/pi/ruff-wavs/Crystal_Mirror/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(1).get_busy() == False:
channel1.play(shuffledSounds[1],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 2:
LEDimages = glob.glob("/home/pi/ruff-wavs/Mercury_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(2).get_busy() == False:
channel2.play(shuffledSounds[2],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 3:
LEDimages = glob.glob("/home/pi/ruff-wavs/Pink_Lava/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(3).get_busy() == False:
channel3.play(shuffledSounds[3],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 4:
LEDimages = glob.glob("/home/pi/ruff-wavs/Horiz_Mosaic/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(4).get_busy() == False:
channel4.play(shuffledSounds[4],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 5:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(5).get_busy() == False:
channel5.play(shuffledSounds[5],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 6:
LEDimages = glob.glob("/home/pi/ruff-wavs/Radio_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(6).get_busy() == False:
channel6.play(shuffledSounds[6],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 7:
LEDimages = glob.glob("/home/pi/ruff-wavs/Star_Loop/*.png")
for LEDimage in sorted(LEDimages):
pngToLED (strip, LEDimage)
if pygame.mixer.Channel(7).get_busy() == False:
channel7.play(shuffledSounds[7],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
elif IRcount == 14:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(14).get_busy() == False:
channel14.play(shuffledSounds[14],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
print (thresh)
elif IRcount == 15:
plasmaLED()
plasmaTime = plasmaTime + plasmaSpeed # increment plasma time
if pygame.mixer.Channel(15).get_busy() == False:
channel15.play(shuffledSounds[15],loops = -1)
waitTime = random.randint(minTime, maxTime)
if CheckTime(InflateWait,waitTime):
InflateWait = int(round(time.time() * 1000))
IRcount += 1
print(IRcount)
elif IRcount == 16:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(40,50))
pygame.mixer.fadeout(45000)
if pygame.mixer.Channel(22).get_busy() == False:
channel22.play(presatisfactionsweep)
IRcount = 17
print(IRcount)
print("sweep end start")
elif IRcount == 18:
# random color theater chase increment random ms to speed up with sounds
theaterChase(strip, Color(random.randint(1,255), random.randint(1,255), random.randint(1,255)), random.randint(30,40))
if pygame.mixer.Channel(22).get_busy() == False:
pygame.mixer.stop()
channel23.play(satisfaction)
IRcount = 19
print(IRcount)
print("Play Satisfaction Sount")
elif IRcount == 19:
rainbowCycle(strip, 5)
if pygame.mixer.Channel(23).get_busy() == False: IRcount = 0
except KeyboardInterrupt:
colorWipe(strip, Color(0,0,0), 1)
pygame.mixer.stop()
pygame.quit()
```
Update 1 - Suspected Function(s)
When I left the script run overnight and came to the exhibit in the morning it would be stuck in the 1st state IRcount = 0 The only things that happen in that state is the maxTemp() function to get the max temp, the LED color wipe function to cycle colors.
When I would come in in the morning it would be stuck, playing a single sound from pygame, as it should, but it would not be cycling colors. I removed the maxTemp() from my test Pi and it has been working fine.
def maxTemp():
mlx.setup(8) #set frame rate of MLX90640
f = mlx.get_frame()
mlx.cleanup()
# get max and min temps from sensor
# v_min = min(f)
v_max = int(max(f))
return v_max
Update # 2
I thought that the thread might be the problem so I commented out the thread start call. That is why I made the simpler maxTemp() function to see if that would work better than the thread. So when I was using the max temp then the thread wasn't being called.
I don't understand threads very well. Is it possible to have the max temp variable update continuously and have the simple OpenCV numPy manipulations running continuously? That would be ideal. When I originally added the thread it seemed to stop after a few cycles.
I do not have a join on the thread. I know threads don't "restart" but do I need to call it again as the state machine starts again?
# not currently starting the thread because program is locking up without it
# want to figure out initial problem first
#start thread
#task = TempTask()
#task.start()
Update #3
I Uploaded new code that eliminated the duplicate functions. Everything is handled in the thread temp.task now. That seems to work fine. I also put the github suggestion of polling the thermal sensor if the image is a duplicate but that has not happened.
I left the program run over night and when I came in in the morning it was locked up. The SD card is set to read only mode. I ssh'd into the pi. I have my auto start python script in /etc/profile
It seems to start the script each time I log into ssh. When I logged in this morning to see if the pi was still up it game an out of memory error.
```
Traceback (most recent call last):
File "/home/pi/ruff-wavs/shufflewavdemo.py", line 210, in <module>
altsatisfaction = pygame.mixer.Sound("/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg")
pygame.error: Unable to open file '/home/pi/ruff-wavs/sounds/alt_satisfaction_trimmed.ogg'
OSError: [Errno 28] No space left on device
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 223, in <module>
main()
File "/usr/local/lib/python3.5/dist-packages/virtualenvwrapper/hook_loader.py", line 145, in main
output.close()
OSError: [Errno 28] No space left on device
-bash: cannot create temp file for here-document: No space left on device
Could that be because it is in read only mode?
I used this script to switch from writable to read only and back.
[https://github.com/JasperE84/root-ro][1]
[1]: https://github.com/JasperE84/root-ro
I suspect the issue is that you're accessing the mlx device both in the main thread via maxTemp() as well as in the irCounter() thread. The fact that it works when you take out the maxTemp call, and that that call happens in the if IRcount == 0: state supports this.
I would add the maxTemp functionality to the irCounter thread, so that accessing it from only a single thread; and update a global variable (protected by a lock) with the maxTemp results if you need to retain this functionality.

Optimizing a complex algorithm

I know this is not an ideal place for questions of this scope, but I'm not sure where else to ask this or how to break it down. I've been working on a function for the past couple weeks, that runs, but for it to be feasible for my purposes, I need to speed it up 200-300x.
I have an image array, where all pixels of similar color have been averaged and set to that average value. Then I have a 2D array of the same height and width, which labels each unique and non-contiguous feature of the image.
Using these I need to assess the size of each feature and its level of contrast to each of its neighbors. These values are used in an equation and if the output of that equation is below a certain threshold, that feature is merged with its most similar neighbor.
I've uploaded the image and the feature label array (printed with numpy.savetext()) to OneDrive and attached links
code:
def textureRemover(pix, labeledPix, ratio = 1.0):
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
for regionID in range(numElements):
start = time.clock()
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 200:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
pix
labeledPix
I know I'm asking for a lot of help, but I've been stuck on this for a few weeks and am unsure what else I can do. Any help will be greatly appreciated!
Here is an optimized version of most of your logic (I underestimated the amount of work that would be...). I skipped the >200 branch and am using fake data because I couldn't access your link. When I switch off your >200 branch your and my code appear to give the same result but mine is quite a bit faster on the fake example.
Sample output:
original
26.056154000000003
optimized
0.763613000000003
equal
True
Code:
import numpy as np
from numpy.lib.stride_tricks import as_strided
def mockdata(m, n, k):
colors = np.random.random((m, n, 3))
i, j = np.ogrid[:m, :n]
labels = np.round(k*k * (np.sin(0.05 * i) + np.sin(0.05 * j)**2)).astype(int) % k
return colors, labels
DIAG_NEIGHBORS = True
MAXIMUMCONTRAST = 443.405
def textureRemover2(pix, labeledPix, ratio=1.0):
start = time.clock()
pix, labeledPix = pix.copy(), labeledPix.copy()
pixf, labeledPixf = pix.reshape(-1, 3), labeledPix.ravel()
m, n = labeledPix.shape
s, t = labeledPix.strides
# find all sizes in O(n)
sizes = np.bincount(labeledPixf)
n_ids = len(sizes)
# make index for quick access to labeled areas
lblidx = np.split(np.argsort(labeledPixf), np.cumsum(sizes[:-1]))
lblidx[0] = None
# find all mean colors in O(n)
regionMeans = np.transpose([np.bincount(labeledPix.ravel(), px)
/ np.maximum(sizes, 1)
for px in pix.reshape(-1, 3).T])
# find all neighbors in O(n)
horz = set(frozenset(p) for bl in as_strided(labeledPix, (m,n-1,2), (s,t,t))
for p in bl)
vert = set(frozenset(p) for bl in as_strided(labeledPix, (m-1,n,2), (s,t,s))
for p in bl)
nb = horz|vert
if DIAG_NEIGHBORS:
dwnrgt = set(frozenset(p) for bl in as_strided(
labeledPix, (m-1,n-1,2), (s,t,s+t)) for p in bl)
dwnlft = set(frozenset(p) for bl in as_strided(
labeledPix[::-1], (m-1,n-1,2), (-s,t,t-s)) for p in bl)
nb = nb|dwnrgt|dwnlft
nb = {p for p in nb if len(p) == 2 and not 0 in p}
nb_dict = {}
for a, b in nb:
nb_dict.setdefault(a, set()).add(b)
nb_dict.setdefault(b, set()).add(a)
maxSize = labeledPix.size - sizes[0]
for id_ in range(1, n_ids):
nbs = list(nb_dict.get(id_, set()))
if not nbs:
continue
d = regionMeans[id_] - regionMeans[nbs]
d = np.einsum('ij,ij->i', d, d)
mnd = np.argmin(d)
if d[mnd] < ((ratio - sizes[id_]/maxSize) * MAXIMUMCONTRAST)**2:
mn = nbs[mnd]
lrg, sml = (id_, mn) if sizes[id_] >= sizes[mn] else (mn, id_)
sizes[lrg], sizes[sml] = sizes[lrg] + sizes[sml], 0
for nb in nb_dict[sml]:
nb_dict[nb].remove(sml)
nb_dict[nb].add(lrg)
nb_dict[lrg].update(nb_dict[sml])
nb_dict[lrg].remove(lrg)
nb_dict[sml] = set()
pixf[lblidx[sml]] = regionMeans[lrg]
labeledPixf[lblidx[sml]] = lrg
lblidx[lrg], lblidx[sml] = np.r_[lblidx[lrg],lblidx[sml]], None
print(time.clock() - start)
return pix
from scipy.ndimage.morphology import binary_dilation
import time
STRUCTEL = np.ones((3,3), int) if DIAG_NEIGHBORS else np.array([[0,1,0],[1,1,1],[0,1,0]], int)
def getNeighbors(labeledPix, regionID):
nb = set(labeledPix[binary_dilation(labeledPix == regionID, structure=STRUCTEL)])
nb.remove(regionID)
return sorted(nb)
numpy = np
def textureRemover(pix, labeledPix, ratio = 1.0):
pix, labeledPix = pix.copy(), labeledPix.copy()
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
start = time.clock()
for regionID in range(numElements):
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 20000:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
data = mockdata(200, 200, 1000)
print('original')
res0 = textureRemover(*data)
print('optimized')
res2 = textureRemover2(*data)
print('equal')
print(np.allclose(res0, res2))

Categories