I have a large number of import statements (with prints for debug...) for my code:
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, struct, json, time, socket
print("start import BS")
from BS import run_BS # custom library
print("end import BS")
print("Import 1")
print("Import 2")
import numpy as np
print("Import 3")
import platform
import datetime
from posixpath import join as pjoin
print("Import 4")
from multiprocessing import Process, Queue
import matplotlib
import tkinter
print("Import 5")
print("Import 6")
print("Import 7")
from send_tune_request import tune
print("Import 8")
print("Import 9")
and the code structure is roughly:
class ed_bs:
def __init__(self, check=False, test_mode=False):
curr_BS_proc = Process(target=run_BS, args=(a, b, c))
def main_loop(self):
while 1:
pass # run forever...
if __name__ == '__main__':
e_d = ed_bs(check=True)
test_mode = True
if test_mode:
test_start_thread = Process(target=tune)
test_start_thread.setDaemon = True
test_start_thread.start()
The print statements look like as follows:
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
I'm guessing that it's printing three times because there's 3 processes in this flow (the __main__ process, tune, and run_BS), but neither BS nor send_tune_request import the __file__ file so I am a little confused. The imports take a long time (run_BS has Tensforflow trace compiling) and use a lot of memory to import everything thrice over three processors so I'd like to limit imports more appropriately.
Why are the import taking a long time and how do I make everything import only once?
Thanks for the help.
EDIT: BS.py looks something like the below:
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, struct, json, time
from posixpath import join as pjoin
from termcolor import colored
import numpy as np
import scipy.signal as sig
import tensorflow as tf
import platform
import tensorflow_probability as tfp
def run_BS():
bs = BS()
while 1:
bs.process_data()
class BS:
def process_data(self):
self.tf_process_1()
self.tf_process_2()
self.non_tf_process()
#tf.function
def tf_process_1(self):
tf.cast()
...
#tf.function
def tf_process_2(self):
tf.cast()
...
def non_tf_process(self):
x = np.zeros(100)
x = x+1
...
Related
I have to compare the lag in a server-client model in 2,4G and 5G.
My anticipation is that 5G is faster than the 2,4G by a large margin. I have already taken the 5G measurments. The average lag turned out to be 40.2ms -which is above what I was predicting-. The issue beacme visible when I tried the same with the 2,4G setup, and the lag was calculated to be a negative value. The two computers on which I was running the codes werent really synchronised. I would appriciate any input on how I would solve this issue.
I wrote the code on Jupyter as a Notebook.
Below you can find powerpoints of the setups used and the respective code used for the client and the server. The results displayed are in micro seconds.
5G Setup
2,4G Setup
Server Code:
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from flask import Flask
from flask import request
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import time
from time import sleep
from decimal import Decimal
# In[2]:
test = 1
# In[3]:
#create csv. file to append data
file_name = "2.4G_Time_Data_" + str(test)
test = test + 1
print(file_name)
with open(file_name+'.csv', 'w', newline='') as time_file:
spamwriter = csv.writer(time_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Packet','Lag(uS)'])
# In[ ]:
#start running a server, saves the in coming data in a csv file
received_package = 0
app = Flask(__name__)
#app.route('/postjson', methods = ['POST'])
def postJsonHandler():
global received_package
received_package = received_package + 1
print(request.is_json)
content = request.get_json()
print (content)
now = datetime.now();
time = content["time"]
time_now = datetime.timestamp(now)
print("Sent : " + str(time))
print("Received : " + str(time_now) )
delta_time = (time_now - time) * (10**6) # in micro seconds
print("Packet Travel Time(s) : " + str(delta_time) )
with open(file_name+'.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([str(received_package), str(delta_time)])
return 'JSON Received'
app.run(host = '0.0.0.0' , port = 8090)
Client Code:
from datetime import datetime
import requests
import signal
from time import sleep
import time
import os
import sys
import json
sample_size = 1000
for i in range(sample_size) :
now = datetime.now()
time = now.strftime("%H:%M:%S") + ":" + str(now.microsecond)
#time = str(now)
timestamp = datetime.timestamp(now)
requests.post('http://myIP:8090/postjson', json={'time': timestamp})
print ("Estimated size: " + str(sys.getsizeof(json) / 1024) + "KB")
sleep(0.1)
My 2.4G Measurment
I am using Windows and python. The objective is simple, running the main.py starts a speech recognition and once it recognizes what was said returns the text to main.py. The speech recognition program recognizes without any issues, the problem is at multithreading and getting the result back to main.py.
Here is the main.py:
import threading
from speechEngine.recognize import *
from SpeechSynthesis.speech import *
from core.AI import *
spch= "default"
newthread=threading.Thread(target=speechrec())
newthread.start()
while(True):
if(spch == "default"):
print("at default")
continue
else:
print(spch)
result=process(spch)
speak(result)
spch="default"
And here is speech recognition which is called as a new thread:
import argparse
import os
import queue
from typing import Text
import sounddevice as sd
import vosk
import sys
import json
#from vosk import SetLogLevel
#SetLogLevel(-1)
def speechrec():
q = queue.Queue()
"a lot of argument lines have been deleted to increase readability"
try:
if args.model is None:
args.model = "model"
if args.samplerate is None:
device_info = sd.query_devices(args.device, 'input')
args.samplerate = int(device_info['default_samplerate'])
model = vosk.Model(args.model)
with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device, dtype='int16', channels=1, callback=callback):
rec = vosk.KaldiRecognizer(model, args.samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
vc=rec.FinalResult() #produces raw output of what the user said
vc=json.loads(vc)
vc=vc['text'] #converts the user speech to text format
if vc != '':
global spch
spch = vc
except KeyboardInterrupt:
parser.exit(0)
I am running below scripts it taking almost 35 sec for all stocks. Is there any lib to run faster for all stocks at a time
import schedule
import time
from kiteconnect import KiteConnect
import CSV
import JSON
import requests
import pandas_datareader.data as pdr
import pandas as pd
import matplotlib.pyplot as plt
import time
import subprocess
Def job():
api_key='YOUR_API'
api_secret='YOUR_SECRETKEY'
api_token='YOUR_ACESSTOKEN'
kite=KiteConnect(api_key=api_key)
kite.set_access_token('YOUR_ACCESStoken')
Stocks = ['BANKINDIA','CAPF','CHENNPETRO','DLF',
'EQUITAS','ESCORTS','FORTIS','HEXAWARE',
'IDBI','IDFCBANK','IOC','IRB','ITC','JUBLFOOD',
'KPIT','OFSS','ONGC','PFC','PNB',
'RPOWER','TATAPOWER','VGUARD','WOCKPHARMA']
for testst in Stocks:
print(testst)
Kite_TODAY="https://api.kite.trade/instruments/NSE/%s?api_key='YOUR_API'&access_token='ACCESS_TOKEN'"
print(Kite_TODAY % testst)
r = requests.get(Kite_TODAY % testst)
rjson=r.json()
r1=rjson['data']['last_price']
Open = rjson['data']['ohlc']['open']
High = rjson['data']['ohlc']['high']
Low = rjson['data']['ohlc']['low']
Close = rjson['data']['ohlc']['close']
print(" Stock %s Open %s High %s Low %s Close %s",testst,Open,High,Low,Close)
if ( Open == High ):
testkite = (("kite.order_place(tradingsymbol='%s',exchange='NSE',quantity=1,price=%s,squareoff_value=1,stoploss_value=5,variety='bo',transaction_type='SELL',order_type='LIMIT',product='MIS',validity='DAY')") % (testst,Open))
order1=testkite
order2=exec(order1)
print(order2)
print (" working...")
return
schedule.every().day.at ("09:15").do (job)
While True:
schedule.run_pending()
time.sleep (1)
Is there a way of printing the time output on one line instead of scrolling down the page.
while True:
import datetime
import pytz
T = datetime.datetime.now()
print(T)
in python 2, do below, i try on my python2.7, its also working even without the sys.stdout.flush()
while True:
import datetime
T = datetime.datetime.now()
print "{}\r".format(T), ;sys.stdout.flush()
in python 3
while True:
import datetime
T = datetime.datetime.now()
print(T, end='\r', flush=True)
import datetime
import sys
import os
os.system('setterm -cursor off')
while True:
T = datetime.datetime.now()
sys.stdout.write(str(T))
sys.stdout.write('\b' * 50)
you can import print from python 3 and do:
from __future__ import print_function
import time
import sys
import datetime
while True:
T = datetime.datetime.now()
print(T, end="\r")
sys.stdout.flush()
time.sleep(.4)
This solution is based on the following gist
I have been working on a program where ICAO codes are parsed from an online text file as well as their latitudes and longitudes using urllib. The program takes the ICAO codes and plugs them into a url to a different website. So far I've been successful in seeing which urls work and which ones do not, but when I try to print the latitude with the urls that work, they end up giving me a false latitude.
Here is my code...
import re
import cookielib
from cookielib import CookieJar
import time
import scipy.interpolate
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from string import *
import urllib2
from urllib2 import urlopen
from urllib2 import Request,HTTPError, URLError
import time
import csv
from StringIO import StringIO
from mpl_toolkits.basemap import Basemap
import scipy
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor)
opener.addheaders = [('User-agent','mr_anderson')]
keywords = map(''.join, product(ascii_lowercase, repeat=3))
keywords = ["k"+a+b+c for a,b,c in product(ascii_lowercase, repeat=3)]
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
text_file = open("nws_gov.txt","a")
try:
a = 1
b = 1
c = 0
List=[""]
for element in range(1,10):
i=1
i+=1
a+=1
b+=1
c+=1
keywargs = str(keywords[a]).upper()
argument = 'http://weather.rap.ucar.edu/surface/stations.txt'
sourceCode = opener.open(argument).read()
airportcode = re.findall(r'\K\w\w\w.*?',str(sourceCode))
lat = re.findall(r'\d{1,2}\s\d{1,2}\N',str(sourceCode))
lata = lat[a]
arg = 'http://w1.weather.gov/xml/current_obs/'+str(airportcode[a])+'.rss'
try:
page_open = opener.open(arg)
except:
None
else:
print(arg+str(lata))
except Exception, e:
print(str(e))
Thanks,
Scott Reinhardt