problems with urllib2 when requesting twitter api - python

I'm using the twitter api wrapper to make some calls but I get this traceback:
Traceback (most recent call last):
File "tweets.py", line 100, in <module>
print stream_country('marketing','fr') + stream_worldwide('marketing') + stream_country('marketing','it') + stream_country('marketing','en')
File "tweets.py", line 41, in stream_country
search = t.search.tweets(q = keyword, lang = 'fr', count = 100, since_id = max_ID)
File "/usr/local/lib/python2.7/dist-packages/twitter/api.py", line 204, in __call__
return self._handle_response(req, uri, arg_data, _timeout)
File "/usr/local/lib/python2.7/dist-packages/twitter/api.py", line 211, in _handle_response
handle = urllib_request.urlopen(req, **kwargs)
File "/usr/lib/python2.7/urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib/python2.7/urllib2.py", line 400, in open
response = self._open(req, data)
File "/usr/lib/python2.7/urllib2.py", line 418, in _open
'_open', req)
File "/usr/lib/python2.7/urllib2.py", line 378, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 1215, in https_open
return self.do_open(httplib.HTTPSConnection, req)
File "/usr/lib/python2.7/urllib2.py", line 1177, in do_open
raise URLError(err)
urllib2.URLError: <urlopen error [Errno -3] Temporary failure in name resolution>
Code:
from twitter import *
t = Twitter(
auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
)
since_ID = -1
max_ID = 0
tcount = 0
while(since_ID != max_ID):
search = t.search.tweets(q = keyword, lang = 'fr', count = 100, since_id = max_ID)
#print len(search['statuses'])
if len(search['statuses']) == 0:
#print 'end'
break
since_ID = search['search_metadata']['since_id_str']
max_ID = search['search_metadata']['max_id_str']
for doc in search['statuses']:
#print doc['text']
tcount += 1
return tcount

Related

Docker python error. urllib.error.URLError: <urlopen error [Errno 99] Cannot assign requested address>

I was trying to run a script in a docker container and got this.
Traceback (most recent call last):
File "experiments/caffemodel2pytorch.py", line 387, in <module>
net_param = initialize(args.caffe_proto).NetParameter()
File "experiments/caffemodel2pytorch.py", line 35, in initialize
mybytes = urlopen(caffe_proto).read()
File "/usr/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.6/urllib/request.py", line 526, in open
response = self._open(req, data)
File "/usr/lib/python3.6/urllib/request.py", line 544, in _open
'_open', req)
File "/usr/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/usr/lib/python3.6/urllib/request.py", line 1361, in https_open
context=self._context, check_hostname=self._check_hostname)
File "/usr/lib/python3.6/urllib/request.py", line 1320, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [Errno 99] Cannot assign requested address>
Part of the code is here.
def initialize(caffe_proto = 'https://raw.githubusercontent.com/BVLC/caffe/master/src/caffe/proto/caffe.proto', codegen_dir = tempfile.mkdtemp(), shadow_caffe = True):
global caffe_pb2
if caffe_pb2 is None:
local_caffe_proto = os.path.join(codegen_dir, os.path.basename(caffe_proto))
with open(local_caffe_proto, 'w') as f:
mybytes = urlopen(caffe_proto).read()
mystr = mybytes.decode('ascii', 'ignore')
f.write(mystr)
#f.write((urlopen if 'http' in caffe_proto else open)(caffe_proto).read())
subprocess.check_call(['protoc', '--proto_path', os.path.dirname(local_caffe_proto), '--python_out', codegen_dir, local_caffe_proto])
sys.path.insert(0, codegen_dir)
old_pool = google.protobuf.descriptor._message.default_pool
old_symdb = google.protobuf.symbol_database._DEFAULT
google.protobuf.descriptor._message.default_pool = google.protobuf.descriptor_pool.DescriptorPool()
google.protobuf.symbol_database._DEFAULT = google.protobuf.symbol_database.SymbolDatabase(pool = google.protobuf.descriptor._message.default_pool)
import caffe_pb2 as caffe_pb2
google.protobuf.descriptor._message.default_pool = old_pool
google.protobuf.symbol_database._DEFAULT = old_symdb
sys.modules[__name__ + '.proto'] = sys.modules[__name__]
if shadow_caffe:
sys.modules['caffe'] = sys.modules[__name__]
sys.modules['caffe.proto'] = sys.modules[__name__]
return caffe_pb2
I think it has something to do with the urlopen, but i don't know how to fix this in a docker container, any help would be appreciated, plz.
BTW, I start the container like this:
sudo nvidia-docker run -itv /home/ljh/mobilepose:/home/mobilepose -p 7777:8888 ufoym/deepo:all-py36-jupyter /bin/bash

OSError, Type22Error, and a few other ones that have me stumped

So basically this all stems from a previous question I had, so I'll post that question & my edit in its entirely below:
So I have a script I've been working with for a few days trying to get a list of emails from a csv I have, but now I've run into this roadblock. Here is the code:
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import re
import csv
list1 = []
list2 = []
list3 = []
def addList():
with open('file.csv', 'rt') as f:
reader = csv.reader(f)
for row in reader:
for s in row:
list2.append(s)
def getAddress(url):
http = "http://"
https = "https://"
if http in url:
return url
elif https in url:
return url
else:
url = "http://" + url
return url
def parseAddress(url):
global list3
try:
website = urllib2.urlopen(getAddress(url))
html = website.read()
addys = re.findall('''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*#(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''', html, flags=re.IGNORECASE)
global list1
list1.append(addys)
except urllib2.HTTPError as err:
print ("Cannot retrieve URL: HTTP Error Code: "), err.code
list3.append(url)
except urllib2.URLError as err:
print ("Cannot retrive URL: ") + err.reason[1]
list3.append(url)
def execute():
global list2
addList()
totalNum = len(list2)
atNum = 1
for s in list2:
parseAddress(s)
print ("Processing ") + str(atNum) + (" out of ") + str(totalNum)
atNum = atNum + 1
print ("Completed. Emails parsed: ") + str(len(list1)) + "."
### MAIN
def main():
global list2
execute()
global list1
myFile = open("finishedFile.csv", "w+")
wr = csv.writer(myFile, quoting=csv.QUOTE_ALL)
for s in list1:
wr.writerow(s)
myFile.close
global list3
failFile = open("failedSites.csv", "w+")
write = csv.writer(failFile, quoting=csv.QUOTE_ALL)
for j in list3:
write.writerow(j)
failFile.close
main()
and when I run it I get this error:
Traceback (most recent call last):
File "pagescanner.py", line 85, in <module>
main()
File "pagescanner.py", line 71, in main
execute()
File "pagescanner.py", line 60, in execute
parseAddress(s)
File "pagescanner.py", line 42, in parseAddress
addys = re.findall('''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*#(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''', html, flags=re.IGNORECASE)
File "/usr/lib/python3.5/re.py", line 213, in findall
return _compile(pattern, flags).findall(string)
TypeError: cannot use a string pattern on a bytes-like object
So I've figured out that I need to figure out how to encode the html string into bytes for the encoding, and Tyler's answer below helped me do so but now I'm getting this error:
Traceback (most recent call last):
File "/usr/lib/python3.5/urllib/request.py", line 1254, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "/usr/lib/python3.5/http/client.py", line 1107, in request
self._send_request(method, url, body, headers)
File "/usr/lib/python3.5/http/client.py", line 1152, in _send_request
self.endheaders(body)
File "/usr/lib/python3.5/http/client.py", line 1103, in endheaders
self._send_output(message_body)
File "/usr/lib/python3.5/http/client.py", line 934, in _send_output
self.send(msg)
File "/usr/lib/python3.5/http/client.py", line 877, in send
self.connect()
File "/usr/lib/python3.5/http/client.py", line 849, in connect
(self.host,self.port), self.timeout, self.source_address)
File "/usr/lib/python3.5/socket.py", line 712, in create_connection
raise err
File "/usr/lib/python3.5/socket.py", line 703, in create_connection
sock.connect(sa)
OSError: [Errno 22] Invalid argument
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "pagescanner.py", line 39, in parseAddress
website = urllib2.urlopen(getAddress(url))
File "/usr/lib/python3.5/urllib/request.py", line 163, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.5/urllib/request.py", line 466, in open
response = self._open(req, data)
File "/usr/lib/python3.5/urllib/request.py", line 484, in _open
'_open', req)
File "/usr/lib/python3.5/urllib/request.py", line 444, in _call_chain
result = func(*args)
File "/usr/lib/python3.5/urllib/request.py", line 1282, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/lib/python3.5/urllib/request.py", line 1256, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [Errno 22] Invalid argument>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "pagescanner.py", line 85, in <module>
main()
File "pagescanner.py", line 71, in main
execute()
File "pagescanner.py", line 60, in execute
parseAddress(s)
File "pagescanner.py", line 51, in parseAddress
print ("Cannot retrive URL: ") + err.reason[1]
TypeError: 'OSError' object is not subscriptable
Does this mean that one of the urls from the list isn't a valid url? I thought I had finally removed all fo the bad urls from my csv file but I may need to take another look

Urllib error when using BioPython

I am currently working on a project for which I need to download a few thousand citations from PubMed. I am currently using BioPython and have written this code:
from Bio import Entrez
from Bio import Medline
from pandas import *
from sys import argv
import os
Entrez.email = "email"
df = read_csv("/Users/.../Desktop/sr_dataset/adhd/excluded/adhdExcluded.csv")
i=0
withoutMesh = 0
withoutMeshID = ""
withoutAbstract = 0
withoutAbstractID = ""
path = '/Users/.../Desktop/sr_dataset/adhd/excluded'
for index, row in df.iterrows():
print (row.id)
handle = Entrez.efetch(db="pubmed",rettype="medline",retmode="text", id=str(row.id))
records = Medline.parse(handle)
for record in records:
try:
abstract = str(record["AB"])
except:
abstract = "none"
withoutAbstract = withoutAbstract +1
withoutAbstractID = withoutAbstractID + str(row.id) + "\n"
try:
title = str(record["TI"])
except:
title = "none"
try:
mesh = str(record["MH"])
except:
mesh = "none"
withoutMesh = withoutMesh +1
withoutMeshID = withoutMeshID + str(row.id) + "\n"
filename= str(row.id) + '.txt'
filename = os.path.join(path, filename)
file = open(filename, "w")
output = "title: "+str(title) + "\n\n" + "abstract: "+str(abstract) + "\n\n" + "mesh: "+str(mesh) + "\n\n"
file.write(output)
file.close()
print (i)
i=i+1
filename = os.path.join(path, "overview.txt")
file = open(filename, "w")
output = "Without MeSH terms:" + str(withoutMesh) + "\n" + "ID's: "+str(withoutMeshID) + "\n\n" + "Without abstract: "+str(withoutAbstract) + "\n" + "ID's: "+str(withoutAbstractID)
file.write(output)
file.close()
The code works for the first few hundred rows in the table but then stops executing and the error I receive is:
Traceback (most recent call last):
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 1254, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 1106, in request
self._send_request(method, url, body, headers)
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 1151, in _send_request
self.endheaders(body)
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 1102, in endheaders
self._send_output(message_body)
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 934, in _send_output
self.send(msg)
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 877, in send
self.connect()
File "/Users/.../anaconda/lib/python3.5/http/client.py", line 1260, in connect
server_hostname=server_hostname)
File "/Users/.../anaconda/lib/python3.5/ssl.py", line 377, in wrap_socket
_context=self)
File "/Users/.../anaconda/lib/python3.5/ssl.py", line 752, in __init__
self.do_handshake()
File "/Users/.../anaconda/lib/python3.5/ssl.py", line 988, in do_handshake
self._sslobj.do_handshake()
File "/Users/.../anaconda/lib/python3.5/ssl.py", line 633, in do_handshake
self._sslobj.do_handshake()
ConnectionResetError: [Errno 54] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/.../Desktop/sr_dataset/ace_inhibitor/excluded/pumbedMedline.py", line 18, in <module>
handle = Entrez.efetch(db="pubmed",rettype="medline",retmode="text", id=str(row.id))
File "/Users/.../anaconda/lib/python3.5/site-packages/biopython-1.68-py3.5-macosx-10.6-x86_64.egg/Bio/Entrez/__init__.py", line 180, in efetch
return _open(cgi, variables, post=post)
File "/Users/.../anaconda/lib/python3.5/site-packages/biopython-1.68-py3.5-macosx-10.6-x86_64.egg/Bio/Entrez/__init__.py", line 524, in _open
handle = _urlopen(cgi)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 163, in urlopen
return opener.open(url, data, timeout)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 466, in open
response = self._open(req, data)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 484, in _open
'_open', req)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 444, in _call_chain
result = func(*args)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 1297, in https_open
context=self._context, check_hostname=self._check_hostname)
File "/Users/.../anaconda/lib/python3.5/urllib/request.py", line 1256, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [Errno 54] Connection reset by peer>
Here are the first few columns of the CSV file:
id
10029645
10073846
10078088
10080457
10088066
...
Biopython does follow the "up to three queries per second rule" to avoid abusing the NCBI servers, but you have have missed the first bullet point in our tutorial http://biopython.org/DIST/docs/tutorial/Tutorial.html on the guidelines:
"For any series of more than 100 requests, do this at weekends or
outside USA peak times. This is up to you to obey."
That said, sometimes you will get intermittent errors from Entrez, and using a try/except block to handle this with a retry is suggested. There is an example in the tutorial.

certificate verify failed (_ssl.c:645)>” for one particuar domain

Every request to this one particular domain now ends in an certificate verify failed (_ssl.c:645)>
I am not sure what caused this.I've been searching for an answer since last night trying to figure out how to fix it, but somehow I cant get it running.
I tried pip uninstall -y certifi && pip install certifi==2015.04.28 but it did not help.
Here is my code:
def trade_spider(max_pages):
page = -1
partner_ID = 2
location_ID = 25
already_printed = set()
for page in range(0,20):
response = urllib.request.urlopen("http://www.getyourguide.de/s/search.json?q=" + str(Region) +"&page=" + str(page))
jsondata = json.loads(response.read().decode("utf-8"))
format = (jsondata['activities'])
g_data = format.strip("'<>()[]\"` ").replace('\'', '\"')
soup = BeautifulSoup(g_data)
hallo = soup.find_all("article", {"class": "activity-card activity-card-horizontal "})
for item in hallo:
headers = item.find_all("h3", {"class": "activity-card-title"})
for header in headers:
header_final = header.text.strip()
if header_final not in already_printed:
already_printed.add(header_final)
prices = item.find_all("span", {"class": "price"})
for price in prices:
#itemStr += ("\t" + price.text.strip().replace(",","")[2:])
price_final = price.text.strip().replace(",","")[2:]
#if itemStr2 not in already_printed:
#print(itemStr2)
#already_printed.add(itemStr2)
deeplinks = item.find_all("a", {"class": "activity-card-link"})
for t in set(t.get("href") for t in deeplinks):
#itemStr += "\t" + t
deeplink_final = t
if deeplink_final not in already_printed:
#print(itemStr3)
already_printed.add(deeplink_final)
Language = "Deutsch"
end_final = "Header: " + header_final + " | " + "Price: " + str(price_final) + " | " + "Deeplink: " + deeplink_final + " | " + "PartnerID: " + str(partner_ID) + " | " + "LocationID: " + str(location_ID)+ " | " + "Language: " + Language
if end_final not in already_printed:
print(end_final)
already_printed.add(end_final)
trade_spider(int(Spider))
This is the ouput:
Traceback (most recent call last):
File "C:\Python34\lib\urllib\request.py", line 1240, in do_open
h.request(req.get_method(), req.selector, req.data, headers)
File "C:\Python34\lib\http\client.py", line 1083, in request
self._send_request(method, url, body, headers)
File "C:\Python34\lib\http\client.py", line 1128, in _send_request
self.endheaders(body)
File "C:\Python34\lib\http\client.py", line 1079, in endheaders
self._send_output(message_body)
File "C:\Python34\lib\http\client.py", line 911, in _send_output
self.send(msg)
File "C:\Python34\lib\http\client.py", line 854, in send
self.connect()
File "C:\Python34\lib\http\client.py", line 1237, in connect
server_hostname=server_hostname)
File "C:\Python34\lib\ssl.py", line 376, in wrap_socket
_context=self)
File "C:\Python34\lib\ssl.py", line 747, in __init__
self.do_handshake()
File "C:\Python34\lib\ssl.py", line 983, in do_handshake
self._sslobj.do_handshake()
File "C:\Python34\lib\ssl.py", line 628, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:645)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/Rj/Desktop/ crawling scripts/GetyourGuide_International_Final.py", line 84, in <module>
trade_spider(int(Spider))
File "C:/Users/Raju/Desktop/scripts/GetyourGuide_International_Final.py", line 36, in trade_spider
response = urllib.request.urlopen("http://www.getyourguide.com/s/search.json?q=" + str(Region) +"&page=" + str(page))
File "C:\Python34\lib\urllib\request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "C:\Python34\lib\urllib\request.py", line 471, in open
response = meth(req, response)
File "C:\Python34\lib\urllib\request.py", line 581, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python34\lib\urllib\request.py", line 503, in error
result = self._call_chain(*args)
File "C:\Python34\lib\urllib\request.py", line 443, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 686, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "C:\Python34\lib\urllib\request.py", line 465, in open
response = self._open(req, data)
File "C:\Python34\lib\urllib\request.py", line 483, in _open
'_open', req)
File "C:\Python34\lib\urllib\request.py", line 443, in _call_chain
result = func(*args)
File "C:\Python34\lib\urllib\request.py", line 1283, in https_open
context=self._context, check_hostname=self._check_hostname)
File "C:\Python34\lib\urllib\request.py", line 1242, in do_open
raise URLError(err)
urllib.error.URLError:
Can someone help me out? Any feedback is aprreciated:)
I would investigate further by checking if openssl can verify the certificate:
openssl s_client -showcerts -connect www.getyourguide.de:443

Problems with uploading content to website

I am just a newly self-learned programmer. I was hoping to upload some numbers to my website by python But somewhat I failed. Could you help me figure out what is wrong?
Here is my original python code.
#!/usr/bin/python
import time
import urllib.request
import random
import datetime
from urllib.request import Request,urlopen
basic_web = 'http://ihome.ust.hk/~xxxxx/cgi-bin/datafile.php?'
message=""
while(True):
local_time= time.time()
web_x = basic_web
file1 = open("datalist1.txt", "r")
queue1 = file1.read()
file1.close()
web_x += "&queue1=" + queue1
file2 = open("datalist2.txt", "r")
queue2 = file2.read()
file2.close()
web_x += "&queue2=" + queue2
web_x += "&local_time=" + str (local_time)
print (web_x)
#req = Request (web_x)
#html = urlopen(req).read()
response = urllib.request.urlopen(web_x, timeout = 1)
html = response.read()
print(html)
time.sleep(0.1)
print ("hehe")
And here is the output error that I got:
Traceback (most recent call last):
File "C:\web bus stop\local\datauploader.py", line 25, in <module>
response = urllib.request.urlopen(web_x)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 465, in open
response = self._open(req, data)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 483, in _open
'_open', req)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 443, in _call_chain
result = func(*args)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 1268, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 1243, in do_open
r = h.getresponse()
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\http\client.py", line 1174, in getresponse
response.begin()
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\http\client.py", line 282, in begin
version, status, reason = self._read_status()
File "C:\Users\ad\AppData\Local\Programs\Python\Python35-32\lib\http\client.py", line 264, in _read_status
raise BadStatusLine(line)
http.client.BadStatusLine: connected! queue1queue2 finish sir!
I would really appreciate it if you guys could help me figure out what is the bug.
Never mind.
I changed a computer to run it and it worked now.

Categories