Get output as a list from time module - python

I have a code that running in every 2 seconds. This code prints the coordinate information every two seconds. I want to collect these coordinates in a list but I cannot. How can I do that ?
Code:
import time
import requests
import schedule
def executeSomething():
r = requests.get('https://get.geojs.io/')
ip_request = requests.get("https://get.geojs.io/v1/ip.json")
ippAdd = ip_request.json()["ip"]
url = 'https://get.geojs.io/v1/ip/geo/' + ippAdd + '.json'
geo_request = requests.get(url)
geo_data = geo_request.json()
co=[]
co.append([float(geo_data["latitude"]),float(geo_data["longitude"])])
print(co)
schedule.every(2).seconds.do(executeSomething)#This code run every 10 seconds
#schedule.every().hour.do(executeSomething())
while 1:
schedule.run_pending()
time.sleep(1)
Output:
[[39.9208, 32.8375]]
[[39.7856, 32.2174]]
But I want output like this:
[[39.9208, 32.8375], [39.7856, 32.2174]]
Edit:
I have an another problem. When change print(co) to return co and import this function to another code and try to get "co" list, I cannot get.
import dynamic
d = dynamic.executeSomething()
print(d)
What am I doing wrong?

You're resetting the list every time the loop runs by including co=[] in your function as it calls the function every time.
Move your co=[] above and outside of the function.
import time
import requests
import schedule
co=[]
def executeSomething():
r = requests.get('https://get.geojs.io/')
ip_request = requests.get("https://get.geojs.io/v1/ip.json")
ippAdd = ip_request.json()["ip"]
url = 'https://get.geojs.io/v1/ip/geo/' + ippAdd + '.json'
geo_request = requests.get(url)
geo_data = geo_request.json()
co.append([float(geo_data["latitude"]),float(geo_data["longitude"])])
print(co)
schedule.every(2).seconds.do(executeSomething)#This code run every 10 seconds
#schedule.every().hour.do(executeSomething())
while 1:
schedule.run_pending()
time.sleep(1)

Related

I want build a alert for ema indicator crypto in a special list

First one:
### configuration details
TELEGRAM_TOKEN = '' # telegram bot token
TELEGRAM_CHANNEL ='' # channel id
INTERVAL = '1m' # binance time interval
SHORT_EMA = 7 # short interval for ema
LONG_EMA = 21 # long interval for ema
Here is my second code:
import requests
import talib
import time
import numpy as np
import websocket
from config import TELEGRAM_TOKEN, TELEGRAM_CHANNEL , INTERVAL, SHORT_EMA , LONG_EMA
def streamKline(currency, interval):
websocket.enableTrace(False)
socket = f'wss://stream.binance.com:9443/ws/{currency}#kline_{interval}'
ws = websocket.WebSocketApp(socket)
ws.run_forever()
#SYMBOLS TO LOOK FOR ALERTS
SYMBOLS = [
"ETHUSDT",
"BTCUSDT",
"ATOMUSDT",
"BNBUSDT",
"FTMBUSD",
"ENJUSDT",
"WAXPUSDT"
]
#sending alerts to telegram
def send_message(message):
url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}&parse_mode=markdown".format(TELEGRAM_TOKEN,TELEGRAM_CHANNEL,message)
res = requests.get(url);print(url);
return res
# getting klines data to process
def streamKline(symbol):
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the trade off between speed and time
return_data = []
# taking closing data for each kline
for each in data:
return_data.append(float(each[4])) # 4 is the index of the closing data in each kline
return np.array(return_data) # returning as numpy array for better precision and performance
def main():
# making a infinite loop that keeps checking for condition
while True:
#looping through each coin
for each in SYMBOLS:
data = streamKline(each)
ema_short = talib.EMA(data,int(SHORT_EMA))
ema_long = talib.EMA(data,int(LONG_EMA))
last_ema_short = ema_short[-2]
last_ema_long = ema_long[-2]
ema_short = ema_short[-1]
ema_long = ema_long[-1]
# conditions for alerts
if(ema_short > ema_long and last_ema_short < last_ema_long):
message = each + "bullcoming "+ str(SHORT_EMA) + " over "+str(LONG_EMA);print(each ,"alert came");
send_message(message);
time.sleep(0.5);
# calling the function
if __name__ == "__main__":
main()
The part of config is all settle done, just second for the kline data, the error mention lot like this.
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the
trade off between speed and time
NameError: name 'socket' is not defined
I just don't know how to do it, I want build a ema alert that can give me a message when I am not watching chart, through this way seems not work, I have tried many times, and also find many video but still, I am just an beginner, nothing improving at all.

Discord py - multiprocessing blocks tasks.loop

I am creating a discord bot with Python on Replit.
One function of the bot is that it checks whether the current time is equal to a given time, so I have a tasks.loop event that loops every second. Another function of the bot is a command that generates a graph with data taken from an api.
Both blocks of codes run fine on their own. But sometimes after calling the graph command, it stops the tasks.loop: now is no longer printed every second after bot.pt_list is printed. The following is my code:
import datetime
from discord.ext import tasks
from multiprocessing import Pool
import requests
#tasks.loop(seconds = 1)
async def notif():
now = datetime.datetime.now() + datetime.timedelta(hours = 8)
now = now.strftime("%H:%M:%S")
print(now)
bot.pt_list = []
#bot.command(name = 'graph')
async def graph(ctx):
bot.rank = rank
timestamp_url = "https://api.sekai.best/event/29/rankings/time?region=tw"
timestamp_response = requests.get(timestamp_url)
timestamp_data = timestamp_response.json()["data"]
i = 1
timestamp_filtered = []
while i <= len(timestamp_data):
timestamp_filtered.append(timestamp_data[i])
i += 12
timestamp_url = []
if __name__ == '__main__':
for timestamp in timestamp_filtered:
timestamp_url.append("https://api.sekai.best/event/29/rankings?region=tw&timestamp=" + timestamp)
with Pool(20) as p:
bot.pt_list = p.map(pt, timestamp_url)
print(bot.pt_list)
def pt(timestamp_url):
pt_response = requests.get(timestamp_url)
pt_data = pt_response.json()["data"]["eventRankings"]
for data in pt_data:
if data["rank"] == 1:
return data["score"]
And below is the output:
# prints time every second
15:03:01
15:03:02
15:03:03
15:03:04
[414505, 6782930, 13229090, 19650440, 27690605, 34044730, 34807680, 38346228, 43531083, 48973205, 52643633, 56877023, 62323476, 67464731, 69565641, 74482140, 78791756, 84277236, 87191476, 91832031, 97207348, 102692443, 104280559, 106288572, 111710142, 112763082, 112827552, 113359257, 116211652, 117475362, 117529967, 117560102, 118293877, 118293877, 118430000, 118430000]
15:03:15
15:03:15
# printing stops
However, the tasks.loop does not get stopped every time, sometimes it works and will continue to print now after printing bot.pt_list. I'm relatively new to Python and I don't know what the issue is, could someone help explain why this is happening and how to fix this? Thank you!

Overwrite appended list during background scheduling process using apscheduler python

Every time I run the program below, the output keeps adding to previous outputs because the object_list is appending in the background since apscheduler is set to run on intervals. What my output needs to be is obtaining the real time up to date list of the objects in the bucket. Right now I get an appended list that includes objects that are no longer in the bucket because the appending list. When I run the program manually, I get the expected results because the list appends once and completes the process. Is there a way to run this program in the background and have a new appended list created each time the program produces the output? The program is using an exclude list to filter out unwanted results.
import boto3
from plyer import notification
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.background import BlockingScheduler
from time import sleep
import datetime
import schedule
import time
Exclude_List= []
Object_List = []
FTP_File_List = []
file = open('ftp_exclude.txt', 'r')
excplist = file.readlines()
file.close
for x in excplist:
Exclude_List.append(x.strip())
def AWS_PROD_Check():
print(f"AWS_PROD START: {datetime.datetime.now()}")
session = boto3.Session(profile_name='My_Profile')
s3 = session.resource('s3')
my_bucket = s3.Bucket('my_bucket')
objects = my_bucket.objects.filter(Prefix = 'My_folder/')
for object in objects:
Object_List.append(object.key)
FTP_File_List = set(Object_List) - {x for y in Exclude_List for x in Object_List if y in x}
FTP_File_List_Sorted = sorted(FTP_File_List)
for x in FTP_File_List_Sorted:
if '/My_directory/' in x and '.' in x:
print(x)
print(f"AWS_PROD END: {datetime.datetime.now()}")
notification.notify(
title='AWS_PROD Check',
message='Report Generated',
app_icon=None,
timeout=20, )
AWS_PROD_Check()
sched = BackgroundScheduler()
sched.add_job(AWS_PROD_Check, 'interval', minutes = 5)
sched.start()
while True:
sleep(1)

How to Speed Up This Python Loop

downloadStart = datetime.now()
while (True):
requestURL = transactionAPI.format(page = tempPage,limit = 5000)
response = requests.get(requestURL,headers=headers)
json_data = json.loads(response.content)
tempMomosTransactionHistory.extend(json_data["list"])
if(datetime.fromtimestamp(json_data["list"][-1]["crtime"]) < datetime(datetime.today().year,datetime.today().month,datetime.today().day - dateRange)):
break
tempPage += 1
downloadEnd = datetime.now()
Any suggestions please threading or something like that ?
Outputs here
downloadtime 0:00:02.056010
downloadtime 0:00:05.680806
downloadtime 0:00:05.447945
You need to improve it in two ways.
Optimise code within loop
Parallelize code execution
#1
By looking at your code I can see one improvement ie. create datetime.today object instead of doing 3 times. Check other methods like transactionAPI optimise further.
#2:
If you multi core CPU machine then you take advantage of machine by spanning thread per page. Refer to modified code of above.
import threading
def processRequest(tempPage):
requestURL = transactionAPI.format(page = tempPage,limit = 5000)
response = requests.get(requestURL,headers=headers)
json_data = json.loads(response.content)
tempMomosTransactionHistory.extend(json_data["list"])
downloadStart = datetime.now()
while (True):
#create thread per page
t1 = threading.Thread(target=processRequest, args=(tempPage, ))
t1.start()
#Fetch datetime today object once instaed 3 times
datetimetoday = datetime()
if(datetime.fromtimestamp(json_data["list"][-1]["crtime"]) < datetime(datetimetoday.year,datetimetoday.month,datetimetoday.day - dateRange)):
break
tempPage += 1
downloadEnd = datetime.now()

python selenium, slow xpath 'all elements'. add timeout

I need to get all the elements on a page and iterate through them to search each element.
currently I am using, driver.find_elements_by_xpath('//*[#*]')
However, there can be a delay in completing the line of code above on larger pages. Is there a way to retrieve the results in increments of 100 elements? Or at least add a timeout?
Terminating driver.find_elements_by_xpath('//*[#*]') inside a multithread is the only why I currently think I can solve this.
I need to find all elements on a page that contain certain strings. For example. elem.get_attribute('outerHTML').find('type="submit"') != -1 … and so on and so forth … I also need their proximity to each other to compare index positions
Thanks!
import Globalz ###### globals import is an empty .py file
import threading
import time
import ctypes
def find_xpath():
for i in range(5):
print(i)
time.sleep(1)
Globalz.curr_value = 'DONE!'
### this is where the xpath retrieval goes (ABOVE loop is for example purposes only)
def stopwatch(info):
curr_time = 0
failed = False
Globalz.curr_value = ''
thread1 = threading.Thread(target=info['function'])
thread1.start()
while thread1.is_alive() is True:
if curr_time >= info['timeout']: failed = True; ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread1.ident), ctypes.py_object(SystemExit))
curr_time += 1; time.sleep(1)
if failed is True: return info['failed_returns']
if failed is False: return Globalz.curr_value
betty = stopwatch({'function': find_xpath, 'timeout': 10, 'failed_returns': 'failed'})
print(betty)
If anyone is interested here is a solution. I've created a wrapper called stopwatch()

Categories