Hey there!
# import the module
from __future__ import print_function
import aerospike
import urllib2
import json
config = {
'hosts': [ ('127.0.0.1', 3000) ]
}
try:
client = aerospike.client(config).connect()
except:
import sys
print("failed to connect to the cluster with", config['hosts'])
sys.exit(1)
key = ('ip', 'hit', 'trial')
try:
for i in range(0,255):
for j in range(0,255):
for k in range(0,255):
for l in range(0,255):
if not((i == 198 and j == 168) or (i == 172 and j > 15 and j < 32) or (i == 10)):
response = urllib2.urlopen('http://ip-api.com/json/'+str(i)+'.'+str(j)+'.'+str(k)+'.'+str(l)).read()
html = response.read()
client.put(key, json.load(response))
except Exception as e:
import sys
print("error: {0}".format(e), file=sys.stderr)
client.close()
Error
error: 'str' object has no attribute 'read'
Can anyone please help in resolving this error? Any help would be highly appreciated.
You've already called read() here:
response = urllib2.urlopen('http://ip-api.com/json/'+str(i)+'.'+str(j)+'.'+str(k)+'.'+str(l)).read()
And then you are trying to call read() again:
html = response.read()
Also, you are using json.load() afterwards which accepts a file-object, not a string. Either do:
response = urllib2.urlopen('http://ip-api.com/json/'+str(i)+'.'+str(j)+'.'+str(k)+'.'+str(l)).read()
client.put(key, json.loads(response))
Or:
response = urllib2.urlopen('http://ip-api.com/json/'+str(i)+'.'+str(j)+'.'+str(k)+'.'+str(l))
client.put(key, json.load(response))
Related
I am trying to use pyipinfo, a python tool to get the IP location from a list of IPs in a .txt file. I use the following command
$ cat iptest.txt | ipinfo >iptest_location1.csv
which shows the below error:
Traceback (most recent call last):
File "/usr/local/bin/ipinfo", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/ipinfo/__init__.py", line 45, in main
text += u'{}\t'.format(info[k])
TypeError: 'NoneType' object has no attribute '__getitem__'
I have both Python 2.7 and Python 3 installed.
code of the File "/usr/local/lib/python2.7/dist-packages/ipinfo/init.py" :
from __future__ import print_function
from __future__ import absolute_import
import sys
import requests
from ipinfo.utils import ip_list_from_string
from ipinfo.utils import is_piped
def request_to_ipinfo(ip):
''' return a json from the request '''
full_url = 'http://ipinfo.io/{}'.format(ip)
headers = {'User-Agent': 'curl/7.30.0'}
req = requests.get(full_url, headers=headers)
if req.status_code == 200:
return req.json()
def main():
if is_piped():
input_string = sys.stdin.read()
ip_list = ip_list_from_string(input_string)
else:
ip_list = ['']
if ip_list == []:
exit()
ip_set = set(ip_list)
ips_info = []
for ip in ip_set:
ips_info.append(request_to_ipinfo(ip))
all_keys = [
'ip', 'city', 'region', 'country',
'hostname', 'org', 'postal', 'loc'
]
for info in ips_info:
text = ''
for k in all_keys:
try:
text += u'{}\t'.format(info[k])
except KeyError:
text += '\t'
print(text)
if __name__ == '__main__':
main()
How to get rid of this problem?
In your request_to_ipinfo function, you return req.json() if the req.status_code is 200:
if req.status_code == 200:
return req.json()
What do you return if the req.status_code is not 200?
You don't have an explicit return statement, so the function returns None, and you append that to ips_info:
for ip in ip_set:
ips_info.append(request_to_ipinfo(ip))
Then, at some point, you try to get info[k] from that None object (which is not a dict, but a NoneType), and of course you get 'NoneType' object has no attribute '__getitem__'
So that's your problem. The correct solution depends on what you want to do if you don't get a 200 response.
The problem was ipinfo.utils in the project because the functions was in an other file so here is the full code I write it by Python 3.X
from __future__ import print_function
from __future__ import absolute_import
import sys
import requests
import re
import os
import stat
def ip_list_from_string(string):
return re.findall(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', string)
def is_piped():
'''
script is called from a pipe
echo ip | ipinfo
'''
mode = os.fstat(0).st_mode
return stat.S_ISFIFO(mode)
def request_to_ipinfo(ip):
''' return a json from the request '''
full_url = 'http://ipinfo.io/{}'.format(ip)
headers = {'User-Agent': 'curl/7.30.0'}
req = requests.get(full_url, headers=headers)
if req.status_code == 200:
return req.json()
def main():
if is_piped():
input_string = sys.stdin.read()
ip_list = ip_list_from_string(input_string)
else:
ip_list = ['']
if ip_list == []:
exit()
ip_set = set(ip_list)
ips_info = []
for ip in ip_set:
ips_info.append(request_to_ipinfo(ip))
all_keys = [
'ip', 'city', 'region', 'country',
'hostname', 'org', 'postal', 'loc'
]
for info in ips_info:
text = ''
for k in all_keys:
try:
text += u'{}\t'.format(info[k])
except KeyError:
text += '\t'
print(text)
if __name__ == '__main__':
main()
I made a Worksheet class using the spread python library. here is my code:
from oauth2client.service_account import ServiceAccountCredentials
import time
import gspread
import requests
import os
DIRNAME = os.path.dirname(__file__)
class Wksh:
credentials_google_sheet = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(DIRNAME, 'credentials/gs_credentials.json'),['https://spreadsheets.google.com/feeds'])
gc = gspread.authorize(credentials_google_sheet)
worksheet_id = None
sheet_name = None
sheet = None
def __init__(self,wksh_id,sh_name):
while True:
try:
self.worksheet_id = wksh_id
self.sheet_name = sh_name
self.sheet = self.gc.open_by_key(wksh_id).worksheet(sh_name)
break
except (gspread.exceptions.HTTPError, gspread.exceptions.requests, gspread.exceptions.RequestError) as e:
if e[0] == 401:
self.gc.login()
elif e[0] == 500:
time.sleep(10)
else:
print 'init'
print e
break
except requests.exceptions.ConnectionError as e:
time.sleep(10)
except requests.exceptions.ChunkedEncodingError as e:
time.sleep(20)
def write(self,cell,value):
while True:
try:
self.sheet.update_acell(cell,value)
break
except (gspread.exceptions.HTTPError, gspread.exceptions.requests, gspread.exceptions.RequestError) as e:
if e[0] == 401:
self.gc.login()
elif e[0] == 500:
time.sleep(10)
else:
print 'write'
print e
break
except requests.exceptions.ConnectionError as e:
time.sleep(10)
except requests.exceptions.ChunkedEncodingError as e:
time.sleep(20)
This works fine but whenever there is a request error, even though I try to catch the error I get:
AttributeError: 'module' object has no attribute 'RequestError' or
AttributeError: 'module' object has no attribute 'HTTPError'
Why is this happening? Do I need to use self. before the exceptions?
Thanks
HTTPError was removed check gspread changes https://github.com/burnash/gspread/blob/87583c562dd3951122411f7029963e53ef16a610/HISTORY.rst#062-2016-12-20
Here you can see all available exceptions
https://github.com/burnash/gspread/blob/master/gspread/exceptions.py
I have a piece of code which was written in Python 3.5 and uses urllib module. Now, I tried to convert this so that it will work with Python 2.7, but I get some errors from the urllib() module.
E.g:
Traceback (most recent call last):
File "alert.py", line 13, in <module>
import urllib.request as urllib
ImportError: No module named request
Now, I know that urllib is deprecated in Python 2.7 so I'm coming here to ask for some help with the lines that use urllib.
import urllib.request as urllib
from http.cookiejar import CookieJar
from os.path import isfile
from os.path import join as joinPath
from sys import exc_info
from traceback import print_tb
from urllib.parse import urlencode
# constant
APPLICATION_PATH = '/srv/path/'
ALERT_POINT_PATH = joinPath(APPLICATION_PATH, 'alert_contact')
URL_REQUEST_TIMEOUT = 42
SMS_BOX_URL = 'xx.xxxx.xxx.xxx'
def initWebConnection(): # init web connection
response = 0
initUrlLibResponse = initUrlLib() # init urllib
if initUrlLibResponse:
response = 1
return response
def initUrlLib(): # init urllib
response = 0
try:
cookieJar = CookieJar() # cookies
opener = urllib.build_opener(urllib.HTTPCookieProcessor(cookieJar))
urllib.install_opener(opener)
except Exception as e:
response = 1
# ex_type, ex, tb = exc_info()
return response
def urlRequest(url, data=None): # make url request
contentResponse = None
try:
request = None
if data:
dataRequest = urlencode(data)
dataRequest = dataRequest.encode('UTF-8')
request = urllib.Request(url, dataRequest)
else:
request = urllib.Request(url)
response = urllib.urlopen(url=request, timeout=URL_REQUEST_TIMEOUT) # make request
# get response
contentResponse = response.read()
except Exception as e:
contentResponse = None
# ex_type, ex, tb = exc_info()
return contentResponse
try:
evt.data = 'Some name'
# check production state
isInProduction = False
if evt.prodState == 1000:
isInProduction = True
if isInProduction:
initWebConnection()
# check alert point'
if isfile(ALERT_POINT_PATH):
alertContactContent = None
with open(ALERT_POINT_PATH, 'r') as alertContactFile:
alertContactContent = alertContactFile.read()
alertContactContent = alertContactContent.splitlines()
if alertContactContent:
evt.summary = '#[ DNS: ALERT ]# {}'.format(evt.summary)
for alertContactContentLine in alertContactContent:
webRequestData = dict(
## TO DO: set the url parameters appropriately
phone=alertContactContentLine,
message='NEW ALERT: {}'.format(evt.ipAddress),
)
webRequestResponse = urlRequest(SMS_BOX_URL, webRequestData)
else:
evt.summary = '#[ ERROR: SMS ALERT NO CONTACT ]# {}'.format(evt.summary)
except Exception as e:
ex_type, ex, tb = exc_info()
print('\n #[ERROR]#exception: {ex}\n'.format(ex=e))
print('\n #[ERROR]#exception traceback: {trace}\n'.format(trace=print_tb(tb)))
evt.summary = '#[ DNS:ERROR traceback in event message ]# {}'.format(evt.summary)
evt.message = '#[ DNS:ERROR ex_type:\n {} \nex: {} \n traceback:\n {} \n]# {}'.format(ex_type, ex,
print_tb(tb),
evt.message)
You can change the import lines from
import urllib.request as urllib
from http.cookiejar import CookieJar
from urllib.parse import urlencode
to
import urllib2 as urllib
from cookielib import CookieJar
from urllib import urlencode
for Python 2.7
I'm trying to retrieve response from ip-api.com for most IP ranges. But I want to store that data in Aerospike but I'm having some errors.
Here is the Python script
# import the module
from __future__ import print_function
import aerospike
import urllib2
config = {
'hosts': [ ('127.0.0.1', 3000) ]
}
try:
client = aerospike.client(config).connect()
except:
import sys
print("failed to connect to the cluster with", config['hosts'])
sys.exit(1)
key = ('ip', 'hit', 'trial')
try:
for i in range(0,255):
for j in range(0,255):
for k in range(0,255):
for l in range(0,255):
if not((i == 198 and j == 168) or (i == 172 and j > 15 and j < 32) or (i == 10)):
response = urllib2.urlopen('http://ip-api.com/json/'+str(i)+'.'+str(j)+'.'+str(k)+'.'+str(l))
html = response.read()
client.put(key, html)
except Exception as e:
import sys
print("error: {0}".format(e), file=sys.stderr)
client.close()
I'm new to Python as well as Aerospike, infact any no-SQL databases. Any help would be appreciated.
All code from aerospike perspective it right, except you would want to change
html = response.read()
client.put(key, html)
to
import json
client.put(key, json.load(response))
The response is a json string which needs to be converted to json object
So I have this code that is creating an output in Excel.
What I want to do now is get the parameters (lid) in payload to loop through a list of other ID's
This list is stored in a txt file.
can anyone modify my code to show me how to do that please?
The text file has values
1654,
3457,
4327,
1234
(can also hard code these somewhere in the script if it is easier)
from __future__ import print_function
import sys
import csv
import collections
import itertools
try:
import requests
from requests import exceptions
import base64
import json
except ImportError as e:
import requests
from requests import exceptions
import base64
import json
print ("Import Error: %s" % e)
API_TOKEN = u''
b64token = base64.b64encode(bytes(API_TOKEN))
REST_BASE_URL = u'https://visdasa.dsds.com/rest/'
# API URL request examples (choose one)
REST_URL = u'rawdata/'
FULL_URL = REST_BASE_URL + REST_URL
def retrieve_data(api_url):
try:
#connect to the API and retrieve data
bauth_header = {'Authorization': 'Basic '+b64token.decode('UTF-8')}
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots','lid': '9263'}
response = requests.get(api_url, headers=bauth_header, params=payload)
# check the api response
if response.status_code == requests.codes.ok:
# Convert from json data
json_data = json.loads(response.text)
Header_String = "ID", "Site Name", "Network ID", "Network Lablel", "Company Branch ID", "Comapany Label","Count", "timestamp", "ots_duration", "notsure1", "notsure2"
for location_row in json_data["data"]["locations"]:
Location_string = (location_row["id"], location_row["label"], location_row["site"]["network"]["id"],location_row["site"]["network"]["label"],
location_row["site"]["id"], location_row["site"]["label"])
try:
with open('C:\\Users\\teddy\\Desktop\\party\\test.csv', 'w') as wFile:
writer = csv.writer(wFile, delimiter=',')
writer.write(Header_string)
for row in json_data["data"]["raw_data"]:
writer.writerow(row)
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
print( "I/O error({0}): {1}".format(e.errno, e.strerror))
else:
json_data = json.loads(response.text)
# If not successful api call the throw an error
raise requests.RequestException("Error with the api. Status code : %i \n Json response: %s"
% (response.status_code, json_data))
except (requests.exceptions.ProxyError, requests.RequestException) as e:
print (e)
def main():
#retrieve_data(FULL_URL, PROXY_SETTINGS)
retrieve_data(FULL_URL)
sys.exit()
if __name__ == '__main__':
main()
Why not just pass all the lid values as a parameter to your retrieve_data function.
def retrieve_data(api_url):
would become
def retrieve_data(api_url, lid_value):
You would remove the hardcoded lid section of your payload so the payload would look like this
payload = {'start': '2014-08-01T00:00:01', 'stop': '2014- 8-01T23:59:59','category': 'ots'}
Then on the next line you can add
payload['lid'] = lid_value
In your main function you could then loop through the values in the text file. Here is a simple loop with a list.
def main():
lid_values = ['1654', '3457', '4327', '1234']
for lid in lid_values:
retrieve_data(FULL_URL, lid)
sys.exit()