running two difference method at the same time - python

I am trying to run two methods at the same time in Python. One of them plays a sound of a sine wave and the other one measures it. Both methods work fine but I could not figure it out how to start them at the same time.
import sounddevice as sd
from numpy import linalg as LA
import numpy as np
from pysine import sine
import time
from threading import Thread
import math
duration = 10 # seconds
def print_sound(indata,outdata, frames, time, status):
volume_norm = np.linalg.norm(indata)*10
dB = 20*math.log(volume_norm)
print (int(dB))
def sine_wave():
frequency = 5000
sine(frequency = (frequency),duration = 1.0)
sine_wave()
with sd.Stream(callback=print_sound):
sd.sleep(duration * 10000)
def main():
thr1 = Thread(target=print_sound)
thr2 = Thread(target=sine_wave)
if __name__ == "__main__":
thr1.start()
thr2.start()

Your syntax is wrong, if it helps, mark it as answer. Try THIS:
import sounddevice as sd
from numpy import linalg as LA
import numpy as np
from pysine import sine
import time
from threading import Thread
import math
duration = 10 # seconds
def print_sound(indata,outdata, frames, time, status):
volume_norm = np.linalg.norm(indata)*10
dB = 20*math.log(volume_norm)
print (int(dB))
def sine_wave():
frequency = 5000
sine(frequency = (frequency),duration = 1.0)
with sd.Stream(callback=print_sound):
sd.sleep(duration * 10000)
def main():
thr1 = Thread(target=print_sound)
thr2 = Thread(target=sine_wave)
thr1.start()
thr2.start()
if __name__ == "__main__":
main()

Related

How to use multiprocess in another code and funcion? [duplicate]

This question already has answers here:
What does if __name__ == "__main__": do?
(45 answers)
Closed 6 months ago.
I am trying to multiprocess cell detection via the deepcell package. Because the deepcell detection works fine on small images. But for bigger images it does not work or takes a really really long time.
So I'm trying to cut the images into small patches, and then use multiprocessing to feed them to the cell detection.
I need to be able to run the pool_cell_detection() function in another code, and get its return value (allPoints). Whereas if I use it in a if name=='main' wrapper, then I could not get the return value. Can you suggest how I can do this?
Here is my code1
import numpy as np
import os
import matplotlib.pyplot as plt
import cv2 as cv
from multiprocessing import Pool
from deepcell.mesmer import Mesmer
import time
blevel_image = cv.imread("./images/blevel_eq_p.png",0)
app = Mesmer()
def deepcell_detection(image0, mpp):
print(type(image0))
cv.imwrite("./images/image1.png", image0)
image = np.stack((image0,image0), axis=-1)
image = np.expand_dims(image,0)
labeled_image, coords = app.predict(image, image_mpp=mpp)
print(len(coords))
return coords
def pool_cell_detection(img_channel):
blobs_log = []
r,c = img_channel.shape[0:2]
mpp = 2
rstep=r//10
cstep=c//10
patches=[]
for i in range(10):
for j in range(10):
img_patch = img_channel[i*rstep:(i+1)*rstep,j*cstep:(j+1)*cstep]
patches.append([img_patch, mpp])
with Pool(4) as p:
print("pooling")
allPoints=p.map(deepcell_detection, patches)
return allPoints
def main():
allPoints = pool_cell_detection(blevel_image)
if __name__ == '__main__':
main()
I need in code 2 something like the following:
import code1
def func_something():
#Many operations
allPoints = pool_cell_detection(blevel_image)
But I'm not sure how to write code 2 to be able to get the allpoints
As the multiprocessing documentation says, the entry point of a multiprocessing program must be wrapped in if __name__ == '__main__': when using the spawn start method, which is the only available option on Windows, which you're on.
Change the final invocation from
blobs_log = pool_cell_detection(blevel_image)
to e.g.
def main():
blobs_log = pool_cell_detection(blevel_image)
if __name__ == '__main__':
main()
Following up on the comment threads, all in all, you might have, let's say, multiprocessing_cell_detect.py:
import multiprocessing
import cv2 as cv
import numpy as np
from deepcell.mesmer import Mesmer
def deepcell_detection(image0, mpp):
cv.imwrite("./images/image1.png", image0)
image = np.stack((image0, image0), axis=-1)
image = np.expand_dims(image, 0)
labeled_image, coords = Mesmer().predict(image, image_mpp=mpp)
return coords
def pool_cell_detection(img_channel):
r, c = img_channel.shape[0:2]
mpp = 2
rstep = r // 10
cstep = c // 10
patches = []
for i in range(10):
for j in range(10):
img_patch = img_channel[i * rstep:(i + 1) * rstep, j * cstep:(j + 1) * cstep]
patches.append([img_patch, mpp])
with multiprocessing.Pool(4) as p:
return p.map(deepcell_detection, patches)
and my_cell_program.py:
from multiprocessing_cell_detect import pool_cell_detection
def main():
blevel_image = cv.imread("./images/blevel_eq_p.png", 0)
allPoints = pool_cell_detection(blevel_image)
print(allPoints)
if __name__ == '__main__':
main()
and you run python my_cell_program.py.
So long as the main entry point of the program is guarded, things will work. The module you import a multiprocessing thing from will not need to be guarded (unless you also wish to use that module stand-alone).

How to implement synchronization in multi threading with python and ROS Services?

Im trying to implement multi threading (parallel processing) with python and using mutex threading. I have first process that check the Pressure Value and the modem update(in the code implemented with odom_callback and callback_modem functions), and second process that calls ROS SERVICES ( in the code implemented with ros_serice_server server and imu_client client functions). Here is the implementation code in python
#!/usr/bin/env python3
from __future__ import print_function
import rospy
import numpy as np
from os import system
import time
import threading
import Microcontroller_Manager_Serial as Serial
import IMU_Functions as IMU
import Motors_Functions as Motor
import Pressure_Functions as Pressure
from geometry_msgs.msg import Vector3
import Modem_Functions as Modem
import threading
import time
import serial
import serial.tools.list_ports
from time import sleep
from std_msgs.msg import Float32
from std_msgs.msg import String
from demo_teleop.srv import ImuValue,ImuValueResponse
P0 = 1.01325 #Default Pressure
mutex = threading.Lock()
Communication_Mode_ = 0
pub_pressure = rospy.Publisher('depth',Vector3,queue_size=1)
pub_modem = rospy.Publisher('modem_data',Float32,queue_size=1)
def handle_ros_services(req):
mutex.acquire(blocking=True)
print("Server Read Data:")
global T0
data_received = Pressure.Pressure_Get_Final_Values(1,1)
#print("Server Read Data:")
T0 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
T=T0
temperature = T
current_x_orientation_s = temperature
print("Returning Service Temperature Data", current_x_orientation_s)
return ImuValueResponse(current_x_orientation_s, True)
mutex.release()
def ros_serice_server():
s = rospy.Service('imu_value', ImuValue, handle_ros_services)
print("Ready to get_value")
def odom_callback():
# reentrang processing
mutex.acquire(blocking=True)
# work serial port here, e.g. send msg to serial port
global P0
data_received = Pressure.Pressure_Get_Final_Values(1,1)
#P1 = (np.int16((data_received_pressure[6]<<24) | (data_received_pressure[7]<<16) | (data_received_pressure[8]<<8) | (data_received_pressure[9])))/10000
P1 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
#P0 = (np.int16((data_received_pressure[6]<<24) | (data_received_pressure[7]<<16) | (data_received_pressure[8]<<8) | (data_received_pressure[9])))/10000
P0 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
P = P0 # Relative Measured Pressure
feedback =Vector3()
feedback.x = 0 #Angular Velocity
feedback.y = 0
feedback.z = P/9.81 #Depth
pressure = feedback.z
print("Pressure : ", pressure)
pub_pressure.publish(feedback)
# reentrant processing
mutex.release()
def callback_modem(event):
# reentrant processing
mutex.acquire(blocking=True)
# work serial port here, e.g. check for incoming data
event = Serial.Serial_Port_Receive_Data(20,0.2)
if (event == 1): # Received data from acoustic modem
modem_data= event
pub_modem.publish(modem_data)
print("received ")
else:
print("not received...... ")
mutex.release()
if __name__ == '__main__':
# initialize serial port here
Serial.Serial_Port_Standard()
rospy.init_node('imu_value')
ros_serice_server()
rospy.Timer(rospy.Duration(1), callback_modem)
while not rospy.is_shutdown():
try:
odom_callback()
except:
print('pass')
And the client node
#!/usr/bin/env python3
from __future__ import print_function
import rospy
import sys
import numpy as np
from os import system
import threading
import Microcontroller_Manager_Serial as Serial
import IMU_Functions as IMU
import Motors_Functions as Motor
import Pressure_Functions as Pressure
from geometry_msgs.msg import Vector3
import Modem_Functions as Modem
import time
import serial
import serial.tools.list_ports
from time import sleep
from std_msgs.msg import Float32
from std_msgs.msg import String
from demo_teleop.srv import *
mutex = threading.Lock()
Communication_Mode_ = 0
pub_modem = rospy.Publisher('modem_data',Float32,queue_size=1)
def imu_client():
mutex.acquire(blocking=True)
rospy.wait_for_service('imu_value')
imu_value = rospy.ServiceProxy('imu_value', ImuValue)
print("Request call send")
resp1 = imu_value(0.05)
return resp1.current_x_orientation_s
mutex.release()
if __name__ == "__main__":
rospy.init_node('client_node_f')
while not rospy.is_shutdown():
try:
print("entering client")
value = imu_client()
print(value)
time.sleep(1)
except:
print('pass')
So the output is following. The output of the first process with the ROS Services Server is
Pressure : 0.10602446483180428
Server Read Data:
Returning Service Temperature Data 1.0401
And then after calling the client I got
entering client
Request call send
1.0401
entering client
The problem is that after calling the ROS SERVICE client node the process stop so doesn't continue with the first process (Pressure value and modem update) . The ROS SERVICES process should be call only on demand and should HALT the first process (Pressure and modem) and then is should resume with the work. So, do I need to implement SEMAPHORES for the ROS SERVICES call ? If yes how it should be in the code. So I do need kind of synchronization , right?Please any help?
Your problem is:
def handle_ros_services(req):
mutex.acquire(blocking=True)
...
return ImuValueResponse(current_x_orientation_s, True)
mutex.release()
Because of the return statement, the release is never executed.
You need at the end:
value = ImValueResponse(...)
mutex.release()
return value
Even better would be to use your mutex as part of a with statement:
with mutex:
do anything you want, knowing that the lock will be released
at the end, even if you return or throw an exception.

Getting Pool apply_async return too slow

I'm trying to make a bot for IQ Option.
I already did it, but i did it one by one, like, i had to open 10 bots so i could check 10 pairs.
I've been trying all day long doing with ThreadPool, Threadings, map and starmap (i think i didn't use them as good as they can be).
The thing is: i'm checking pairs (EURUSD, EURAUD...) values of the last 100 minutes. When i do it one by one, it takes between 80 and 300ms to return each. I'm trying now to do this in a way that i could do like all the calls at the same time and get their results around the same time to their respective var.
Atm my code is like this:
from iqoptionapi.stable_api import IQ_Option
from functools import partial
from multiprocessing.pool import ThreadPool as Pool
from time import *
from datetime import datetime, timedelta
import os
import sys
import dados #my login data
import config #atm is just payoutMinimo = 0.79
parAtivo = {}
class PAR:
def __init__(self, par, velas):
self.par = par
self.velas = velas
self.lucro = 0
self.stoploss = 50000
self.stopgain = 50000
def verificaAbertasPayoutMinimo(API, payoutMinimo):
status = API.get_all_open_time()
profits = API.get_all_profit()
abertasPayoutMinimo = []
for x in status['turbo']:
if status['turbo'][x]['open'] and profits[x]['turbo'] >= payoutMinimo:
abertasPayoutMinimo.append(x)
return abertasPayoutMinimo
def getVelas(API, par, tempoAN, segundos, numeroVelas):
return API.get_candles(par, tempoAN*segundos, numeroVelas, time()+50)
def logVelas(velas, par):
global parAtivo
parAtivo[par] = PAR(par, velas)
def verificaVelas(API, abertasPayoutMinimo, tempoAN, segundos, numeroVelas):
pool = Pool()
global parAtivo
for par in abertasPayoutMinimo:
print(f"Verificando par {par}")
pool = Pool()
if par not in parAtivo:
callbackFunction = partial(logVelas, par=par)
pool.apply_async(
getVelas,
args=(API, par, tempoAN, segundos, numeroVelas),
callback=callbackFunction
)
pool.close()
pool.join()
def main():
tempoAN = 1
segundos = 60
numeroVelas = 20
tempoUltimaVerificacao = datetime.now() - timedelta(days=99)
global parAtivo
conectado = False
while not conectado:
API = IQ_Option(dados.user, dados.pwd)
API.connect()
if API.check_connect():
os.system("cls")
print("Conectado com sucesso.")
sleep(1)
conectado = True
else:
print("Erro ao conectar.")
sleep(1)
conectado = False
API.change_balance("PRACTICE")
while True:
if API.get_balance() < 2000:
API.reset_practice_balance()
if datetime.now() > tempoUltimaVerificacao + timedelta(minutes=5):
abertasPayoutMinimo = verificaAbertasPayoutMinimo(API, config.payoutMinimo)
tempoUltimaVerificacao = datetime.now()
verificaVelas(API, abertasPayoutMinimo, tempoAN, segundos, numeroVelas)
for item in parAtivo:
print(parAtivo[item])
break #execute only 1 time for testing
if __name__ == "__main__":
main()
#edit1: just complemented with more info, actually this is the whole code right now.
#edit2: when i print it like this:
for item in parAtivo:
print(parAtivo[item].velas[-1]['close']
I get:
0.26671
0.473878
0.923592
46.5628
1.186974
1.365679
0.86263
It's correct, the problem is it takes too long, like almost 3 seconds, the same as if i was doing without ThreadPool.
Solved.
Did it using threadings.Thread, like this:
for par in abertasPayoutMinimo:
t = threading.Thread(
target=getVelas,
args=(API, par, tempoAN, segundos)
)
t.start()
t.join()

accelerate DEAP using multiprocessing report OSError

I want to accelerate DEAP using multiprocessing but always get OSError. Here is abbreviated version of my code:
import operator
import math
import random
import numpy as np
import pandas as pd
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
import multiprocessing
# protectedDiv
def protectedDiv(left, right):
try:
return left / right
except ZeroDivisionError:
return 1
# omitting some other functions
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax)
# here is DEAP strong typed GP setting
pset = gp.PrimitiveSetTyped("MAIN", [np.ndarray] * 12, np.ndarray)
pset.addPrimitive(operator.add, [np.ndarray, np.ndarray], np.ndarray)
pset.addPrimitive(operator.sub, [np.ndarray, np.ndarray], np.ndarray)
pset.renameArguments(ARG0='close')
pset.renameArguments(ARG1='open')
# here is fitness function. My goal is maximum stock return's ICIR.
def evalSymbReg(individual):
# omitting code
return icir,
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=3)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
toolbox.register("evaluate", evalSymbReg)
toolbox.register("select", tools.selTournament, tournsize=10)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
toolbox.register("mutUniform", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=10))
toolbox.decorate("mutUniform", gp.staticLimit(key=operator.attrgetter("height"), max_value=10))
def main():
n_sample = 5000
n_gen = 40
cxpb = 0.6
mutUniformpb = 0.4
pop = toolbox.population(n=n_sample)
hof = tools.HallOfFame(10)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.nanmean)
mstats.register("min", np.nanmin)
mstats.register("max", np.nanmax)
pop, log = algorithms.my_eaSimple(pop, toolbox, cxpb, mutUniformpb, mutNodeReplacementpb, mutEphemeralpb, mutShrinkpb,
n_gen, stats=mstats, halloffame=hof, verbose=True)
# print log
return pop, log, hof, info, top10
# here is my data file.
df = pd.read_csv(r'C:\Users\xxyao\research\国债期货\data\data_summary.csv')
df['pct-1'] = df['close'].pct_change().shift(-1)
df['month'] = [x[0:7] for x in df['date']]
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=6)
toolbox.register('map', pool.map)
pop, log, hof, info, top10 = main()
When I run the code I got error message like this:
This message repeated show in the window quickly. I don't know where is wrong. I protect the Pool() in __name__ == __main__ as DEAP document says. But it still can't work. Can somebody help me please.
Put this code in the main and the function will work.
pool = multiprocessing.Pool(processes=6)
toolbox.register('map', pool.map)

django time checker database

I am trying to create a thread function that allow me to check a database field in order to see if the time.now() is bigger than the one recorded in the database(postgresql); the problem is that the view.py where I am calling this, is blocked by this thread, this is my actual code:
PD: expire_pet is a text field, then I cast it to datetime.
import socket
import struct
from time import *
from datetime import datetime
from models import Zone
from multiprocessing import pool
import threading
class ControlHora(threading.Thread):
def __init__(self,zone_id):
threading.Thread.__init__(self)
self.zone_id = zone_id
def run(self):
while(True):
zone_pet = Zone.objects.filter(id = self.zone_id)
for i in zone_pet:
if i.pet_state == True:
hour = datetime.datetime.strptime(i.expire_pet, '%I:%M')
if hour <= datetime.datetime.now():
Zone.objects.filter(id = self.zone_id).update(vitrasa_pet = False)
Zone.objects.filter(id = self.zone_id).update(esycsa_pet = False)
Zone.objects.filter(id = self.zone_id).update(pet_state = False)
Zone.objects.filter(id = self.zone_id).update(expire_pet='')
sleep(5)
It works, the problem was that I have been calling the run in the wrong place, thanks

Categories