Matching ICAO codes with latitudes with Python using urllib - python

I have been working on a program where ICAO codes are parsed from an online text file as well as their latitudes and longitudes using urllib. The program takes the ICAO codes and plugs them into a url to a different website. So far I've been successful in seeing which urls work and which ones do not, but when I try to print the latitude with the urls that work, they end up giving me a false latitude.
Here is my code...
import re
import cookielib
from cookielib import CookieJar
import time
import scipy.interpolate
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from string import *
import urllib2
from urllib2 import urlopen
from urllib2 import Request,HTTPError, URLError
import time
import csv
from StringIO import StringIO
from mpl_toolkits.basemap import Basemap
import scipy
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor)
opener.addheaders = [('User-agent','mr_anderson')]
keywords = map(''.join, product(ascii_lowercase, repeat=3))
keywords = ["k"+a+b+c for a,b,c in product(ascii_lowercase, repeat=3)]
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
text_file = open("nws_gov.txt","a")
try:
a = 1
b = 1
c = 0
List=[""]
for element in range(1,10):
i=1
i+=1
a+=1
b+=1
c+=1
keywargs = str(keywords[a]).upper()
argument = 'http://weather.rap.ucar.edu/surface/stations.txt'
sourceCode = opener.open(argument).read()
airportcode = re.findall(r'\K\w\w\w.*?',str(sourceCode))
lat = re.findall(r'\d{1,2}\s\d{1,2}\N',str(sourceCode))
lata = lat[a]
arg = 'http://w1.weather.gov/xml/current_obs/'+str(airportcode[a])+'.rss'
try:
page_open = opener.open(arg)
except:
None
else:
print(arg+str(lata))
except Exception, e:
print(str(e))
Thanks,
Scott Reinhardt

Related

Problem with Time Synchronisation in 5G and 2,4G Packet Lag measurment

I have to compare the lag in a server-client model in 2,4G and 5G.
My anticipation is that 5G is faster than the 2,4G by a large margin. I have already taken the 5G measurments. The average lag turned out to be 40.2ms -which is above what I was predicting-. The issue beacme visible when I tried the same with the 2,4G setup, and the lag was calculated to be a negative value. The two computers on which I was running the codes werent really synchronised. I would appriciate any input on how I would solve this issue.
I wrote the code on Jupyter as a Notebook.
Below you can find powerpoints of the setups used and the respective code used for the client and the server. The results displayed are in micro seconds.
5G Setup
2,4G Setup
Server Code:
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from flask import Flask
from flask import request
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import time
from time import sleep
from decimal import Decimal
# In[2]:
test = 1
# In[3]:
#create csv. file to append data
file_name = "2.4G_Time_Data_" + str(test)
test = test + 1
print(file_name)
with open(file_name+'.csv', 'w', newline='') as time_file:
spamwriter = csv.writer(time_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Packet','Lag(uS)'])
# In[ ]:
#start running a server, saves the in coming data in a csv file
received_package = 0
app = Flask(__name__)
#app.route('/postjson', methods = ['POST'])
def postJsonHandler():
global received_package
received_package = received_package + 1
print(request.is_json)
content = request.get_json()
print (content)
now = datetime.now();
time = content["time"]
time_now = datetime.timestamp(now)
print("Sent : " + str(time))
print("Received : " + str(time_now) )
delta_time = (time_now - time) * (10**6) # in micro seconds
print("Packet Travel Time(s) : " + str(delta_time) )
with open(file_name+'.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([str(received_package), str(delta_time)])
return 'JSON Received'
app.run(host = '0.0.0.0' , port = 8090)
Client Code:
from datetime import datetime
import requests
import signal
from time import sleep
import time
import os
import sys
import json
sample_size = 1000
for i in range(sample_size) :
now = datetime.now()
time = now.strftime("%H:%M:%S") + ":" + str(now.microsecond)
#time = str(now)
timestamp = datetime.timestamp(now)
requests.post('http://myIP:8090/postjson', json={'time': timestamp})
print ("Estimated size: " + str(sys.getsizeof(json) / 1024) + "KB")
sleep(0.1)
My 2.4G Measurment

(Python) How can I get instant GPS data from Thingspeak?

#!/usr/bin/env python
from urllib import request
import requests
from urllib.request import urlopen
import threading # import threadding
import json # import json
READ_API_KEY= "ZG0YZXYKP9LOMMB9"
CHANNEL_ID= "1370649"
while True:
TS = urllib3.urlopen("http://api.thingspeak.com/channels/%s/feeds/last.json?api_key=%s" \
% (CHANNEL_ID,READ_API_KEY))
response = TS.read()
data=json.loads(response)
b = data['field1']
c = data['field2']
print (b)
print (c)
time.sleep(15)
TS.close()
I uploaded the data from raspberry pi to thingspeak, but in order to use it in the follium map, I need to access these data instantly. The code here seems to work but I keep getting errors. Can you help me?
I would recommend you research a bit more about python imports.
In the meantime, this change should fix the issue:
import time # you forgot to import
TS = urllib3.urlopen(#url)
# should be
TS = urlopen(#url)
while True:
TS = urllib3.urlopen(#"http://api.thingspeak.com/channels/1370649/feeds/last.json?api_key=ZG0YZXYKP9LOMMB9")
response = TS.read()
data=json.loads(response)
b = data['field1']
c = data['field2']
print (b)
print (c)
time.sleep(15)
TS.close()
data=json.loads(response)
^
SyntaxError: invalid syntax
I'm searching, but I haven't found much.

Azure Storage Python SDK : Uploading file to Azure blob storage without writting it on my disk

I have a lot of Images from my Apache server that I want to put to azure.
I cannot afford to do it in a sequential manner , SO I will add threading afterwards. I can access those images from a given URL and build a list on that. Easy.
Now I do not have enough disk space for downloading the image and uploading it then delete it. I would like something cleaner.
Now is there a method to do that ?
Something like :
block_blob_service.AZURECOMMAND(container, source_URL, target_blob_name)
If not possible, is there a workaround ?
here is the complete code I have today ( download and then upload which I want to avoid ):
EDIT : Thanks to Gaurav Mantri I got it now. I update the code.
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
#from azure.storage.blob import BlobService
from azure.storage import *
#from azure.storage.blob.blobservice import BlobService
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
def copy_azure_files(source_url,destination_object,destination_container):
blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
blob_service.copy_blob(destination_container, destination_object, source_url)
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,filename):
start = time.clock()
block_blob_service.create_blob_from_path(
container,
blobname,
filename)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
file_name_path_local = STORING_DIRECTORY+"/"+file_name_downloaded
with open(file_name_path_local, "wb") as f:
f.write(requests.get(image_url).content)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
#upload_func(StoAcc_var_container,filename_in_clouddir,file_name_path_local)
copy_azure_files(image_url,filename_in_clouddir,StoAcc_var_container)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue
Indeed there's something exactly like this: copy_blob
block_blob_service.copy_blob(container, target_blob_name, source_URL)
Please keep in mind that this copy operation is asynchronous server side copying, thus:
Source of the copy should be publicly available.
You must wait for the copy operation to finish before deleting source items.
UPDATE
Modified code (I have not tried running it)
import requests
from bs4 import BeautifulSoup
from os.path import basename
import os
import sys
import urllib
import urllib2
import urlparse
import argparse
import json
import config
import random
import base64
import datetime
import time
import string
from azure.storage import CloudStorageAccount, AccessPolicy
from azure.storage.blob import BlockBlobService, PageBlobService, AppendBlobService
from azure.storage.models import CorsRule, Logging, Metrics, RetentionPolicy, ResourceTypes, AccountPermissions
from azure.storage.blob.models import BlobBlock, ContainerPermissions, ContentSettings
CURRENT_DIR = os.getcwd()
STORING_DIRECTORY_NAME = "stroage_scrapped_images"
STORING_DIRECTORY = CURRENT_DIR+"/"+STORING_DIRECTORY_NAME
if not os.path.exists(STORING_DIRECTORY):
os.makedirs(STORING_DIRECTORY)
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
startdate = time.clock()
metadata_loaded = {'Owner': 'ToBeAddedSoon', 'Date_Of_Upload': startdate, 'VAR_2': 'VAL_VAR_2','VAR_3': 'VAL_VAR_3','VAR_4': 'VAL_VAR_4'}
with open("credentials.json", 'r') as f:
data = json.loads(f.read())
StoAcc_var_name = data["storagacc"]["Accountname"]
StoAcc_var_key = data["storagacc"]["AccountKey"]
StoAcc_var_container = data["storagacc"]["Container"]
#print StoAcc_var_name, StoAcc_var_key, StoAcc_var_container
block_blob_service = BlockBlobService(account_name=StoAcc_var_name, account_key=StoAcc_var_key)
def upload_func(container,blobname,sourceurl):
start = time.clock()
block_blob_service.copy_blob(
container,
blobname,
sourceurl)
elapsed = time.clock()
elapsed = elapsed - start
print "*** DEBUG *** Time spent uploading API " , filename , " is : " , elapsed , " in Bucket/container : " , container
#URL_TARGET = "https://mouradcloud.westeurope.cloudapp.azure.com/blog/blog/category/food/"
URL_TARGET = "https://www.cdiscount.com/search/10/telephone.html"
base_url = URL_TARGET
out_folder = '/tmp'
r = requests.get(URL_TARGET)
data = r.text
soup = BeautifulSoup(data, "lxml")
for link in soup.find_all('img'):
src = link
image_url = link.get("src")
while image_url is not None :
if 'http' in image_url:
blocks = []
if image_url.endswith(('.png', '.jpg', '.jpeg')):
print " ->>>>>>>>>>>>>> THIS IS AN IMAGE ... PROCESSING "
file_name_downloaded = basename(image_url)
filename_in_clouddir="uploads"+"/"+file_name_downloaded
upload_func(StoAcc_var_container,filename_in_clouddir,image_url)
break
else :
print " ->>>>>>>>>>>>>> THIS NOT AN IMAGE ... SKIPPING "
break
else :
print " ->>>>>>>>>>>>>> THIS IS A LOCAL IMAGE ... SKIPPING "
break
continue

How to add muti threading or multi processing

I am running below scripts it taking almost 35 sec for all stocks. Is there any lib to run faster for all stocks at a time
import schedule
import time
from kiteconnect import KiteConnect
import CSV
import JSON
import requests
import pandas_datareader.data as pdr
import pandas as pd
import matplotlib.pyplot as plt
import time
import subprocess
Def job():
api_key='YOUR_API'
api_secret='YOUR_SECRETKEY'
api_token='YOUR_ACESSTOKEN'
kite=KiteConnect(api_key=api_key)
kite.set_access_token('YOUR_ACCESStoken')
Stocks = ['BANKINDIA','CAPF','CHENNPETRO','DLF',
'EQUITAS','ESCORTS','FORTIS','HEXAWARE',
'IDBI','IDFCBANK','IOC','IRB','ITC','JUBLFOOD',
'KPIT','OFSS','ONGC','PFC','PNB',
'RPOWER','TATAPOWER','VGUARD','WOCKPHARMA']
for testst in Stocks:
print(testst)
Kite_TODAY="https://api.kite.trade/instruments/NSE/%s?api_key='YOUR_API'&access_token='ACCESS_TOKEN'"
print(Kite_TODAY % testst)
r = requests.get(Kite_TODAY % testst)
rjson=r.json()
r1=rjson['data']['last_price']
Open = rjson['data']['ohlc']['open']
High = rjson['data']['ohlc']['high']
Low = rjson['data']['ohlc']['low']
Close = rjson['data']['ohlc']['close']
print(" Stock %s Open %s High %s Low %s Close %s",testst,Open,High,Low,Close)
if ( Open == High ):
testkite = (("kite.order_place(tradingsymbol='%s',exchange='NSE',quantity=1,price=%s,squareoff_value=1,stoploss_value=5,variety='bo',transaction_type='SELL',order_type='LIMIT',product='MIS',validity='DAY')") % (testst,Open))
order1=testkite
order2=exec(order1)
print(order2)
print (" working...")
return
schedule.every().day.at ("09:15").do (job)
While True:
schedule.run_pending()
time.sleep (1)

Urllib syntax translation from python 3.5 to 2.7

I have a piece of code which was written in Python 3.5 and uses urllib module. Now, I tried to convert this so that it will work with Python 2.7, but I get some errors from the urllib() module.
E.g:
Traceback (most recent call last):
File "alert.py", line 13, in <module>
import urllib.request as urllib
ImportError: No module named request
Now, I know that urllib is deprecated in Python 2.7 so I'm coming here to ask for some help with the lines that use urllib.
import urllib.request as urllib
from http.cookiejar import CookieJar
from os.path import isfile
from os.path import join as joinPath
from sys import exc_info
from traceback import print_tb
from urllib.parse import urlencode
# constant
APPLICATION_PATH = '/srv/path/'
ALERT_POINT_PATH = joinPath(APPLICATION_PATH, 'alert_contact')
URL_REQUEST_TIMEOUT = 42
SMS_BOX_URL = 'xx.xxxx.xxx.xxx'
def initWebConnection(): # init web connection
response = 0
initUrlLibResponse = initUrlLib() # init urllib
if initUrlLibResponse:
response = 1
return response
def initUrlLib(): # init urllib
response = 0
try:
cookieJar = CookieJar() # cookies
opener = urllib.build_opener(urllib.HTTPCookieProcessor(cookieJar))
urllib.install_opener(opener)
except Exception as e:
response = 1
# ex_type, ex, tb = exc_info()
return response
def urlRequest(url, data=None): # make url request
contentResponse = None
try:
request = None
if data:
dataRequest = urlencode(data)
dataRequest = dataRequest.encode('UTF-8')
request = urllib.Request(url, dataRequest)
else:
request = urllib.Request(url)
response = urllib.urlopen(url=request, timeout=URL_REQUEST_TIMEOUT) # make request
# get response
contentResponse = response.read()
except Exception as e:
contentResponse = None
# ex_type, ex, tb = exc_info()
return contentResponse
try:
evt.data = 'Some name'
# check production state
isInProduction = False
if evt.prodState == 1000:
isInProduction = True
if isInProduction:
initWebConnection()
# check alert point'
if isfile(ALERT_POINT_PATH):
alertContactContent = None
with open(ALERT_POINT_PATH, 'r') as alertContactFile:
alertContactContent = alertContactFile.read()
alertContactContent = alertContactContent.splitlines()
if alertContactContent:
evt.summary = '#[ DNS: ALERT ]# {}'.format(evt.summary)
for alertContactContentLine in alertContactContent:
webRequestData = dict(
## TO DO: set the url parameters appropriately
phone=alertContactContentLine,
message='NEW ALERT: {}'.format(evt.ipAddress),
)
webRequestResponse = urlRequest(SMS_BOX_URL, webRequestData)
else:
evt.summary = '#[ ERROR: SMS ALERT NO CONTACT ]# {}'.format(evt.summary)
except Exception as e:
ex_type, ex, tb = exc_info()
print('\n #[ERROR]#exception: {ex}\n'.format(ex=e))
print('\n #[ERROR]#exception traceback: {trace}\n'.format(trace=print_tb(tb)))
evt.summary = '#[ DNS:ERROR traceback in event message ]# {}'.format(evt.summary)
evt.message = '#[ DNS:ERROR ex_type:\n {} \nex: {} \n traceback:\n {} \n]# {}'.format(ex_type, ex,
print_tb(tb),
evt.message)
You can change the import lines from
import urllib.request as urllib
from http.cookiejar import CookieJar
from urllib.parse import urlencode
to
import urllib2 as urllib
from cookielib import CookieJar
from urllib import urlencode
for Python 2.7

Categories