I am running below scripts it taking almost 35 sec for all stocks. Is there any lib to run faster for all stocks at a time
import schedule
import time
from kiteconnect import KiteConnect
import CSV
import JSON
import requests
import pandas_datareader.data as pdr
import pandas as pd
import matplotlib.pyplot as plt
import time
import subprocess
Def job():
api_key='YOUR_API'
api_secret='YOUR_SECRETKEY'
api_token='YOUR_ACESSTOKEN'
kite=KiteConnect(api_key=api_key)
kite.set_access_token('YOUR_ACCESStoken')
Stocks = ['BANKINDIA','CAPF','CHENNPETRO','DLF',
'EQUITAS','ESCORTS','FORTIS','HEXAWARE',
'IDBI','IDFCBANK','IOC','IRB','ITC','JUBLFOOD',
'KPIT','OFSS','ONGC','PFC','PNB',
'RPOWER','TATAPOWER','VGUARD','WOCKPHARMA']
for testst in Stocks:
print(testst)
Kite_TODAY="https://api.kite.trade/instruments/NSE/%s?api_key='YOUR_API'&access_token='ACCESS_TOKEN'"
print(Kite_TODAY % testst)
r = requests.get(Kite_TODAY % testst)
rjson=r.json()
r1=rjson['data']['last_price']
Open = rjson['data']['ohlc']['open']
High = rjson['data']['ohlc']['high']
Low = rjson['data']['ohlc']['low']
Close = rjson['data']['ohlc']['close']
print(" Stock %s Open %s High %s Low %s Close %s",testst,Open,High,Low,Close)
if ( Open == High ):
testkite = (("kite.order_place(tradingsymbol='%s',exchange='NSE',quantity=1,price=%s,squareoff_value=1,stoploss_value=5,variety='bo',transaction_type='SELL',order_type='LIMIT',product='MIS',validity='DAY')") % (testst,Open))
order1=testkite
order2=exec(order1)
print(order2)
print (" working...")
return
schedule.every().day.at ("09:15").do (job)
While True:
schedule.run_pending()
time.sleep (1)
Related
I have to compare the lag in a server-client model in 2,4G and 5G.
My anticipation is that 5G is faster than the 2,4G by a large margin. I have already taken the 5G measurments. The average lag turned out to be 40.2ms -which is above what I was predicting-. The issue beacme visible when I tried the same with the 2,4G setup, and the lag was calculated to be a negative value. The two computers on which I was running the codes werent really synchronised. I would appriciate any input on how I would solve this issue.
I wrote the code on Jupyter as a Notebook.
Below you can find powerpoints of the setups used and the respective code used for the client and the server. The results displayed are in micro seconds.
5G Setup
2,4G Setup
Server Code:
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from flask import Flask
from flask import request
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import time
from time import sleep
from decimal import Decimal
# In[2]:
test = 1
# In[3]:
#create csv. file to append data
file_name = "2.4G_Time_Data_" + str(test)
test = test + 1
print(file_name)
with open(file_name+'.csv', 'w', newline='') as time_file:
spamwriter = csv.writer(time_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Packet','Lag(uS)'])
# In[ ]:
#start running a server, saves the in coming data in a csv file
received_package = 0
app = Flask(__name__)
#app.route('/postjson', methods = ['POST'])
def postJsonHandler():
global received_package
received_package = received_package + 1
print(request.is_json)
content = request.get_json()
print (content)
now = datetime.now();
time = content["time"]
time_now = datetime.timestamp(now)
print("Sent : " + str(time))
print("Received : " + str(time_now) )
delta_time = (time_now - time) * (10**6) # in micro seconds
print("Packet Travel Time(s) : " + str(delta_time) )
with open(file_name+'.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([str(received_package), str(delta_time)])
return 'JSON Received'
app.run(host = '0.0.0.0' , port = 8090)
Client Code:
from datetime import datetime
import requests
import signal
from time import sleep
import time
import os
import sys
import json
sample_size = 1000
for i in range(sample_size) :
now = datetime.now()
time = now.strftime("%H:%M:%S") + ":" + str(now.microsecond)
#time = str(now)
timestamp = datetime.timestamp(now)
requests.post('http://myIP:8090/postjson', json={'time': timestamp})
print ("Estimated size: " + str(sys.getsizeof(json) / 1024) + "KB")
sleep(0.1)
My 2.4G Measurment
I have a large number of import statements (with prints for debug...) for my code:
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, struct, json, time, socket
print("start import BS")
from BS import run_BS # custom library
print("end import BS")
print("Import 1")
print("Import 2")
import numpy as np
print("Import 3")
import platform
import datetime
from posixpath import join as pjoin
print("Import 4")
from multiprocessing import Process, Queue
import matplotlib
import tkinter
print("Import 5")
print("Import 6")
print("Import 7")
from send_tune_request import tune
print("Import 8")
print("Import 9")
and the code structure is roughly:
class ed_bs:
def __init__(self, check=False, test_mode=False):
curr_BS_proc = Process(target=run_BS, args=(a, b, c))
def main_loop(self):
while 1:
pass # run forever...
if __name__ == '__main__':
e_d = ed_bs(check=True)
test_mode = True
if test_mode:
test_start_thread = Process(target=tune)
test_start_thread.setDaemon = True
test_start_thread.start()
The print statements look like as follows:
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
start import BS
end import BS
Import 1
Import 2
Import 3
Import 4
Import 5
Import 6
Import 7
Import 8
Import 9
I'm guessing that it's printing three times because there's 3 processes in this flow (the __main__ process, tune, and run_BS), but neither BS nor send_tune_request import the __file__ file so I am a little confused. The imports take a long time (run_BS has Tensforflow trace compiling) and use a lot of memory to import everything thrice over three processors so I'd like to limit imports more appropriately.
Why are the import taking a long time and how do I make everything import only once?
Thanks for the help.
EDIT: BS.py looks something like the below:
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, struct, json, time
from posixpath import join as pjoin
from termcolor import colored
import numpy as np
import scipy.signal as sig
import tensorflow as tf
import platform
import tensorflow_probability as tfp
def run_BS():
bs = BS()
while 1:
bs.process_data()
class BS:
def process_data(self):
self.tf_process_1()
self.tf_process_2()
self.non_tf_process()
#tf.function
def tf_process_1(self):
tf.cast()
...
#tf.function
def tf_process_2(self):
tf.cast()
...
def non_tf_process(self):
x = np.zeros(100)
x = x+1
...
I have a lot of Images from my Apache server that I want to put to azure.
I cannot afford to do it in a sequential manner , SO I will add threading afterwards. I can access those images from a given URL and build a list on that. Easy.
Now I do not have enough disk space for downloading the image and uploading it then delete it. I would like something cleaner.
Now is there a method to do that ?
Something like :
block_blob_service.AZURECOMMAND(container, source_URL, target_blob_name)
If not possible, is there a workaround ?
here is the complete code I have today ( download and then upload which I want to avoid ):
EDIT : Thanks to Gaurav Mantri I got it now. I update the code.
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
#from azure.storage.blob import BlobService
from azure.storage import *
#from azure.storage.blob.blobservice import BlobService
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
def copy_azure_files(source_url,destination_object,destination_container):
blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
blob_service.copy_blob(destination_container, destination_object, source_url)
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,filename):
start = time.clock()
block_blob_service.create_blob_from_path(
container,
blobname,
filename)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
file_name_path_local = STORING_DIRECTORY+"/"+file_name_downloaded
with open(file_name_path_local, "wb") as f:
f.write(requests.get(image_url).content)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
#upload_func(StoAcc_var_container,filename_in_clouddir,file_name_path_local)
copy_azure_files(image_url,filename_in_clouddir,StoAcc_var_container)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue
Indeed there's something exactly like this: copy_blob
block_blob_service.copy_blob(container, target_blob_name, source_URL)
Please keep in mind that this copy operation is asynchronous server side copying, thus:
Source of the copy should be publicly available.
You must wait for the copy operation to finish before deleting source items.
UPDATE
Modified code (I have not tried running it)
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,sourceurl):
start = time.clock()
block_blob_service.copy_blob(
container,
blobname,
sourceurl)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
upload_func(StoAcc_var_container,filename_in_clouddir,image_url)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue
Is there a way of printing the time output on one line instead of scrolling down the page.
while True:
import datetime
import pytz
T = datetime.datetime.now()
print(T)
in python 2, do below, i try on my python2.7, its also working even without the sys.stdout.flush()
while True:
import datetime
T = datetime.datetime.now()
print "{}\r".format(T), ;sys.stdout.flush()
in python 3
while True:
import datetime
T = datetime.datetime.now()
print(T, end='\r', flush=True)
import datetime
import sys
import os
os.system('setterm -cursor off')
while True:
T = datetime.datetime.now()
sys.stdout.write(str(T))
sys.stdout.write('\b' * 50)
you can import print from python 3 and do:
from __future__ import print_function
import time
import sys
import datetime
while True:
T = datetime.datetime.now()
print(T, end="\r")
sys.stdout.flush()
time.sleep(.4)
This solution is based on the following gist
I have been working on a program where ICAO codes are parsed from an online text file as well as their latitudes and longitudes using urllib. The program takes the ICAO codes and plugs them into a url to a different website. So far I've been successful in seeing which urls work and which ones do not, but when I try to print the latitude with the urls that work, they end up giving me a false latitude.
Here is my code...
import re
import cookielib
from cookielib import CookieJar
import time
import scipy.interpolate
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from string import *
import urllib2
from urllib2 import urlopen
from urllib2 import Request,HTTPError, URLError
import time
import csv
from StringIO import StringIO
from mpl_toolkits.basemap import Basemap
import scipy
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor)
opener.addheaders = [('User-agent','mr_anderson')]
keywords = map(''.join, product(ascii_lowercase, repeat=3))
keywords = ["k"+a+b+c for a,b,c in product(ascii_lowercase, repeat=3)]
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
text_file = open("nws_gov.txt","a")
try:
a = 1
b = 1
c = 0
List=[""]
for element in range(1,10):
i=1
i+=1
a+=1
b+=1
c+=1
keywargs = str(keywords[a]).upper()
argument = 'http://weather.rap.ucar.edu/surface/stations.txt'
sourceCode = opener.open(argument).read()
airportcode = re.findall(r'\K\w\w\w.*?',str(sourceCode))
lat = re.findall(r'\d{1,2}\s\d{1,2}\N',str(sourceCode))
lata = lat[a]
arg = 'http://w1.weather.gov/xml/current_obs/'+str(airportcode[a])+'.rss'
try:
page_open = opener.open(arg)
except:
None
else:
print(arg+str(lata))
except Exception, e:
print(str(e))
Thanks,
Scott Reinhardt