Discord py - multiprocessing blocks tasks.loop - python

I am creating a discord bot with Python on Replit.
One function of the bot is that it checks whether the current time is equal to a given time, so I have a tasks.loop event that loops every second. Another function of the bot is a command that generates a graph with data taken from an api.
Both blocks of codes run fine on their own. But sometimes after calling the graph command, it stops the tasks.loop: now is no longer printed every second after bot.pt_list is printed. The following is my code:
import datetime
from discord.ext import tasks
from multiprocessing import Pool
import requests
#tasks.loop(seconds = 1)
async def notif():
now = datetime.datetime.now() + datetime.timedelta(hours = 8)
now = now.strftime("%H:%M:%S")
print(now)
bot.pt_list = []
#bot.command(name = 'graph')
async def graph(ctx):
bot.rank = rank
timestamp_url = "https://api.sekai.best/event/29/rankings/time?region=tw"
timestamp_response = requests.get(timestamp_url)
timestamp_data = timestamp_response.json()["data"]
i = 1
timestamp_filtered = []
while i <= len(timestamp_data):
timestamp_filtered.append(timestamp_data[i])
i += 12
timestamp_url = []
if __name__ == '__main__':
for timestamp in timestamp_filtered:
timestamp_url.append("https://api.sekai.best/event/29/rankings?region=tw&timestamp=" + timestamp)
with Pool(20) as p:
bot.pt_list = p.map(pt, timestamp_url)
print(bot.pt_list)
def pt(timestamp_url):
pt_response = requests.get(timestamp_url)
pt_data = pt_response.json()["data"]["eventRankings"]
for data in pt_data:
if data["rank"] == 1:
return data["score"]
And below is the output:
# prints time every second
15:03:01
15:03:02
15:03:03
15:03:04
[414505, 6782930, 13229090, 19650440, 27690605, 34044730, 34807680, 38346228, 43531083, 48973205, 52643633, 56877023, 62323476, 67464731, 69565641, 74482140, 78791756, 84277236, 87191476, 91832031, 97207348, 102692443, 104280559, 106288572, 111710142, 112763082, 112827552, 113359257, 116211652, 117475362, 117529967, 117560102, 118293877, 118293877, 118430000, 118430000]
15:03:15
15:03:15
# printing stops
However, the tasks.loop does not get stopped every time, sometimes it works and will continue to print now after printing bot.pt_list. I'm relatively new to Python and I don't know what the issue is, could someone help explain why this is happening and how to fix this? Thank you!

Related

I want build a alert for ema indicator crypto in a special list

First one:
### configuration details
TELEGRAM_TOKEN = '' # telegram bot token
TELEGRAM_CHANNEL ='' # channel id
INTERVAL = '1m' # binance time interval
SHORT_EMA = 7 # short interval for ema
LONG_EMA = 21 # long interval for ema
Here is my second code:
import requests
import talib
import time
import numpy as np
import websocket
from config import TELEGRAM_TOKEN, TELEGRAM_CHANNEL , INTERVAL, SHORT_EMA , LONG_EMA
def streamKline(currency, interval):
websocket.enableTrace(False)
socket = f'wss://stream.binance.com:9443/ws/{currency}#kline_{interval}'
ws = websocket.WebSocketApp(socket)
ws.run_forever()
#SYMBOLS TO LOOK FOR ALERTS
SYMBOLS = [
"ETHUSDT",
"BTCUSDT",
"ATOMUSDT",
"BNBUSDT",
"FTMBUSD",
"ENJUSDT",
"WAXPUSDT"
]
#sending alerts to telegram
def send_message(message):
url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}&parse_mode=markdown".format(TELEGRAM_TOKEN,TELEGRAM_CHANNEL,message)
res = requests.get(url);print(url);
return res
# getting klines data to process
def streamKline(symbol):
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the trade off between speed and time
return_data = []
# taking closing data for each kline
for each in data:
return_data.append(float(each[4])) # 4 is the index of the closing data in each kline
return np.array(return_data) # returning as numpy array for better precision and performance
def main():
# making a infinite loop that keeps checking for condition
while True:
#looping through each coin
for each in SYMBOLS:
data = streamKline(each)
ema_short = talib.EMA(data,int(SHORT_EMA))
ema_long = talib.EMA(data,int(LONG_EMA))
last_ema_short = ema_short[-2]
last_ema_long = ema_long[-2]
ema_short = ema_short[-1]
ema_long = ema_long[-1]
# conditions for alerts
if(ema_short > ema_long and last_ema_short < last_ema_long):
message = each + "bullcoming "+ str(SHORT_EMA) + " over "+str(LONG_EMA);print(each ,"alert came");
send_message(message);
time.sleep(0.5);
# calling the function
if __name__ == "__main__":
main()
The part of config is all settle done, just second for the kline data, the error mention lot like this.
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the
trade off between speed and time
NameError: name 'socket' is not defined
I just don't know how to do it, I want build a ema alert that can give me a message when I am not watching chart, through this way seems not work, I have tried many times, and also find many video but still, I am just an beginner, nothing improving at all.

Overwrite appended list during background scheduling process using apscheduler python

Every time I run the program below, the output keeps adding to previous outputs because the object_list is appending in the background since apscheduler is set to run on intervals. What my output needs to be is obtaining the real time up to date list of the objects in the bucket. Right now I get an appended list that includes objects that are no longer in the bucket because the appending list. When I run the program manually, I get the expected results because the list appends once and completes the process. Is there a way to run this program in the background and have a new appended list created each time the program produces the output? The program is using an exclude list to filter out unwanted results.
import boto3
from plyer import notification
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.background import BlockingScheduler
from time import sleep
import datetime
import schedule
import time
Exclude_List= []
Object_List = []
FTP_File_List = []
file = open('ftp_exclude.txt', 'r')
excplist = file.readlines()
file.close
for x in excplist:
Exclude_List.append(x.strip())
def AWS_PROD_Check():
print(f"AWS_PROD START: {datetime.datetime.now()}")
session = boto3.Session(profile_name='My_Profile')
s3 = session.resource('s3')
my_bucket = s3.Bucket('my_bucket')
objects = my_bucket.objects.filter(Prefix = 'My_folder/')
for object in objects:
Object_List.append(object.key)
FTP_File_List = set(Object_List) - {x for y in Exclude_List for x in Object_List if y in x}
FTP_File_List_Sorted = sorted(FTP_File_List)
for x in FTP_File_List_Sorted:
if '/My_directory/' in x and '.' in x:
print(x)
print(f"AWS_PROD END: {datetime.datetime.now()}")
notification.notify(
title='AWS_PROD Check',
message='Report Generated',
app_icon=None,
timeout=20, )
AWS_PROD_Check()
sched = BackgroundScheduler()
sched.add_job(AWS_PROD_Check, 'interval', minutes = 5)
sched.start()
while True:
sleep(1)

Fetching the order depth with python-binance, but the code does not complete

I am running a python script to fetch all the current order books for all symbols that ends with USDT.
Whenever I try to run it, it fetches the orderbook for the first three symbols (in this case BTCUSDT, ETHUSDT and BNBUSDT). Any takers on what I am messing up here?
I am using this logic to get a list of the symbols and the order book;
import asyncio
import config as c #from config.py
import infinity as inf #userdefined function for infinity (probably not needed)
from binance import AsyncClient, DepthCacheManager, Client
client = Client(c.API_KEY, c.API_SECRET, tld = 'com')
info = client.get_exchange_info()
symbols = info['symbols']
ls = []
for s in symbols:
if 'USDT' in s['symbol']:
#if 'BUSD' not in s['symbol']:
ls.append(s['symbol'])
async def main():
# initialise the client
client = await AsyncClient.create()
for i in ls:
async with DepthCacheManager(client, symbol= i, limit = 10000) as dcm_socket:
depth_cache = await dcm_socket.recv()
symbol = i
asks = depth_cache.get_asks()[:5]
bids = depth_cache.get_bids()[:5]
full = [symbol, asks, bids]
print(full)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
It wouldn't complete, because it's not supposed to.
DepthCacheManager is designed to establish a connection (WebSockets), get a snapshot of the order information, and then subscribes to a stream of updates to the current outstanding orders that it applies locally in it's "DepthCache". Each time that gets updated, it deliver the updated set of current asks/bids as you can see.
The trading and orders never stop, so why would it stop?
Maybe you wana try: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-local-depth-cache
import unicorn_binance_local_depth_cache
ubldc = unicorn_binance_local_depth_cache.BinanceLocalDepthCacheManager(exchange="binance.com")
ubldc.create_depth_cache("LUNABTC")
asks = ubldc.get_asks("LUNABTC")
bids = ubldc.get_bids("LUNABTC")
Thats it :)

Get output as a list from time module

I have a code that running in every 2 seconds. This code prints the coordinate information every two seconds. I want to collect these coordinates in a list but I cannot. How can I do that ?
Code:
import time
import requests
import schedule
def executeSomething():
r = requests.get('https://get.geojs.io/')
ip_request = requests.get("https://get.geojs.io/v1/ip.json")
ippAdd = ip_request.json()["ip"]
url = 'https://get.geojs.io/v1/ip/geo/' + ippAdd + '.json'
geo_request = requests.get(url)
geo_data = geo_request.json()
co=[]
co.append([float(geo_data["latitude"]),float(geo_data["longitude"])])
print(co)
schedule.every(2).seconds.do(executeSomething)#This code run every 10 seconds
#schedule.every().hour.do(executeSomething())
while 1:
schedule.run_pending()
time.sleep(1)
Output:
[[39.9208, 32.8375]]
[[39.7856, 32.2174]]
But I want output like this:
[[39.9208, 32.8375], [39.7856, 32.2174]]
Edit:
I have an another problem. When change print(co) to return co and import this function to another code and try to get "co" list, I cannot get.
import dynamic
d = dynamic.executeSomething()
print(d)
What am I doing wrong?
You're resetting the list every time the loop runs by including co=[] in your function as it calls the function every time.
Move your co=[] above and outside of the function.
import time
import requests
import schedule
co=[]
def executeSomething():
r = requests.get('https://get.geojs.io/')
ip_request = requests.get("https://get.geojs.io/v1/ip.json")
ippAdd = ip_request.json()["ip"]
url = 'https://get.geojs.io/v1/ip/geo/' + ippAdd + '.json'
geo_request = requests.get(url)
geo_data = geo_request.json()
co.append([float(geo_data["latitude"]),float(geo_data["longitude"])])
print(co)
schedule.every(2).seconds.do(executeSomething)#This code run every 10 seconds
#schedule.every().hour.do(executeSomething())
while 1:
schedule.run_pending()
time.sleep(1)

Multiprocessing function not writing to file or printing

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!
I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

Categories