MODUL grequests, How GET simple print URL and Response? - python

anyone can please explain me how i can split results for get just simple url and response?
I have try so many time but nothing, for now i can print just like:
50
0.4110674999999999
........, [<Response [200]>], [<Response [200]>], [<Response [200]>]]
[......, ['http://example.com.com/catalogue/page-48.html'], ['http://example.com.com/catalogue/page-49.html'], ['http://example.com.com/catalogue/page-50.html']]
I need like
<Response [200]>
https://example.com/
Thanks so much.
Ps. Also why after installing module grequests I get this message on the console
C:\P3\lib\site-packages\grequests.py:22: MonkeyPatchWarning: Monkey-patching ssl after ssl has already been imported may lead to errors, including RecursionError on Python 3.6. It may also silently lead to incorrect behaviour on Python 3.7. Please monkey-patch earlier. See https://github.com/gevent/gevent/issues/1016. Modules that had direct imports (NOT patched): ['urllib3.util.ssl_ (C:\\P3\\lib\\site-packages\\urllib3\\util\\ssl_.py)', 'urllib3.util (C:\\P3\\lib\\site-packages\\urllib3\\util\\__init__.py)'].
curious_george.patch_all(thread=False, select=False)
How I can fix it ? Uninstall complete python, install some patch or what ?
Thanks!
import grequests
from bs4 import BeautifulSoup
import time
def get_urls():
urls = []
for x in range(1,51):
urls.append(f'http://books.toscrape.com/catalogue/page-{x}.html')
return urls
def get_data(urls):
reqs = [grequests.get(link) for link in urls]
resp = grequests.map(reqs)
return resp
if __name__ == '__main__':
start = time.perf_counter()
urls = get_urls()
url = len(get_urls())
resp = get_data(urls)
respo = len(get_data(urls))
fin = time.perf_counter() - start
resp_list = resp
chunked_resp = list()
chunk_size = respo
urls_list = urls
chunked_url = list()
chunk_size = url
print(urls)
print(url)
print(resp)
print(respo)
print(fin)
resp_list = resp
chunked_resp = list()
chunk_size = 1
for i in range(0, len(resp_list), chunk_size):
chunked_resp.append(resp_list[i:i+chunk_size])
print(chunked_resp)
urls_list = urls
chunked_url = list()
chunk_size = 1
for i in range(0, len(urls_list), chunk_size):
chunked_url.append(urls_list[i:i+chunk_size])
print(chunked_url)

OK i have get a solution only for print url:
def get_data(urls):
reqs = [grequests.get(link) for link in urls]
resp = grequests.get(reqs)
return resp
if __name__ == '__main__':
start = time.perf_counter()
urls = get_urls()
resp = get_data(urls)
resp = '\n'.join(resp)
url = '\n'.join(urls)
http://books.toscrape.com/catalogue/page-48.html
http://books.toscrape.com/catalogue/page-49.html
http://books.toscrape.com/catalogue/page-50.html
resp = '\n'.join(resp)
TypeError: can only join an iterable
But for a resp i get TypeError: can only join an iterable
Ps. i am started learn python max 1 month... :(

Related

Is there a way to use a list of URLs using python using the import requests? r = requests.get('https://{redacted by me}/vessels/amadi_9682552_100037

I would like to use a list of URLs to process their data using python. I can obtain the information using one URL but I would like to from many.
The following code gets me the data that I need although I am unable to use a list for example where #myString I am on day few learning python.
import requests
#myString = ['https://{redacted by me}/vessels/amadi_9682552_10003796/,https://{redacted by me}/vessels/akebono-maru_9554729_2866687/,https://{redacted by me}/vessels/amani_9661869_9276632/,https://{redacted by me}/vessels/aman-sendai_9134323_2017277/,https://{redacted by me}/vessels/al-aamriya_9338266_25273/}
r = requests.get('https://{redacted by me}/vessels/amadi_9682552_10003796/')
if r.status_code == 200:
print(r.status_code)
elif r.status_code == 300:
print(r.status_code) #post url seperately to a defined api -- future
elif r.status_code == 404:
print(r.status_code) #post url seperately to a defined api -- future for removal
data = r.content
data = data.decode("utf-8")
#print(data)
next_port_locode = data.split('locode: "')[1].split('"')[0].strip()
next_port_iso2 = data.split('iso2: "')[1].split('"')[0].strip()
next_port_name = data.split('iso2: "')[1].split('name: "')[1].split('"')[0].strip()
next_port_eta = data.split('eta: moment("')[1].split('"')[0].strip()
next_port_latitude = float(data.split('latitude: ')[1].split(',')[0].strip())
next_port_longitude = float(data.split('longitude: ')[1].split('\n')[0].strip())
datajson = {
"next_port_locode": next_port_locode,
"next_port_iso2": next_port_iso2,
"next_port_name": next_port_name,
"next_port_eta": next_port_eta,
"next_port_latitude": next_port_latitude,
"next_port_longitude": next_port_longitude,
}
print(datajson)
requests.post("https://{redacted by me}/api/Moments", json=datajson)
Just need to work on your Python basics, this should do it. Check out using lists and for loops. Good luck!
import requests
myString = ['https://{redacted by me}/vessels/amadi_9682552_10003796/','https://{redacted by me}/vessels/akebono-maru_9554729_2866687/','https://{redacted by me}/vessels/amani_9661869_9276632/','https://{redacted by me}/vessels/aman-sendai_9134323_2017277/','https://{redacted by me}/vessels/al-aamriya_9338266_25273/'}
for myURL in myString:
r = requests.get(myURL)
if r.status_code == 200:
.
.
.

Skip exceptions in python

I have a newbie questions:
let say I have this list of stock in python
import requests
list = ["AMZN","APPL", "BAC"]
try:
for x in list:
url ='https://financialmodelingprep.com/api/v3/quote-short/'+x+'?apikey=demo'
response = requests.request('GET', url)
result = response.json()
print(result[0]["price"])
except:
pass
the second ticker will throw an exceptions, how do I make python to run the third ticker no matter what happen to the second ticker requests?
Use try-except inside for loop like below
import requests
list = ["AMZN","APPL", "BAC"]
for x in list:
try:
url ='https://financialmodelingprep.com/api/v3/quote-short/'+x+'?apikey=demo'
response = requests.request('GET', url)
result = response.json()
print(result[0]["price"])
except:
pass
You can use continue
import requests
list = ["AMZN","APPL", "BAC"]
for x in list:
try:
url ='https://financialmodelingprep.com/api/v3/quote-short/'+x+'?apikey=demo'
response = requests.request('GET', url)
result = response.json()
print(result[0]["price"])
except:
continue

Deploy a simple Python bot

Code:
import requests as rq
from bs4 import BeautifulSoup as bs
url = "https://apod.nasa.gov/apod/astropix.html"
page = rq.get(url).content
soup = bs(page, 'html.parser')
response = soup.find('img')
if response == None:
imglink = soup.find('iframe')['src']
else:
imglink = 'https://apod.nasa.gov/apod/' + response['src']
def main():
sess = rq.Session()
cid='**************'
turl = 'https://api.telegram.org/bot*******************/'
if response == None:
imglink = soup.find('iframe')['src']
params = {'chat_id':cid,'text':imglink}
sess.post(turl + 'sendMessage', data=params)
else:
imglink = 'https://apod.nasa.gov/apod/' + response['src']
title = soup.find('b').get_text()
params = {'chat_id':cid,'photo':imglink,'caption':title}
sess.post(turl + 'sendPhoto', data=params)
if __name__ == '__main__':
main()
This is a simple bot for sending Nasa picture to my telegram channel. I will be modifying this script to make it happen everyday. But he Question is where do I host them, So that it will run all the time (free) . What is the correct way of doing it.
I don't know of any providers that would host this for free. For cheap, AWS and Google Cloud all have simple solutions.
Ex: https://cloud.google.com/blog/products/application-development/how-to-schedule-a-recurring-python-script-on-gcp

I can't get data from JSON with python

I can't seem to get the last recorded price from a website API using JSON. I tried finding the error but it seems okay to me. The code is in python
This is the Url that I have to GET: https://api.independentreserve.com/Public/GetMarketSummary?primaryCurrencyCode=xbt&secondaryCurrencyCode=aud
Python 3.7
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, '&secondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
here is the fixed code
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, 'SecondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
the problem is in the PARAMS dict. you need to change "&secondaryCurrencyCode" to "SecondaryCurrencyCode".
if you had printed the data dict, you would see this:
{'Message': 'Secondary Currency Code is required'}
Removing & in "&secondaryCurrencyCode" will fix the issue.
Fixed code below:
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, 'secondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
API is expecting secondaryCurrencyCode not &secondaryCurrencyCode.
You don't need & sign when you use params.

Equivalent Python code for the following Java http get requests

I am trying to convert the following Java code to Python. Not sure what I am doing wrong, but I end up with an internal server error 500 with python.
Is the "body" in httplib.httpConnection method equivalent to Java httpentity?
Any other thoughts on what could be wrong?
The input information I collect is correct for sure.
Any help will be really appreciated. I have tried several things, but end up with the same internal server error.
Java Code:
HttpEntity reqEntitiy = new StringEntity("loginTicket="+ticket);
HttpRequestBase request = reMethod.getRequest(uri, reqEntitiy);
request.addHeader("ticket", ticket);
HttpResponse response = httpclient.execute(request);
HttpEntity responseEntity = response.getEntity();
StatusLine responseStatus = response.getStatusLine();
Python code:
url = serverURL + "resources/slmservices/templates/"+templateId+"/options"
#Create the request
ticket = ticket.replace("'",'"')
headers = {"ticket":ticket}
print "ticket",ticket
reqEntity = "loginTicket="+ticket
body = "loginTicket="+ticket
url2 = urlparse.urlparse(serverURL)
h1 = httplib.HTTPConnection(url2.hostname,8580)
print "h1",h1
url3 = urlparse.urlparse(url)
print "url path",url3.path
ubody = {"loginTicket":ticket}
data = urllib.urlencode(ubody)
conn = h1.request("GET",url3.path,data,headers)
#conn = h1.request("GET",url3.path)
response = h1.getresponse()
lines = response.read()
print "response.status",response.status
print "response.reason",response.reason
You don't need to go this low level. Using urllib2 instead:
import urllib2
from urllib import urlencode
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
url = '{}?{}'.format(url, urlencode(params))
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print 'Status', response.getcode()
print 'Response data', response.read()
Note that the parameters are added to the URL to form URL query parameters.
You can do this simpler still by installing the requests library:
import requests
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
response = requests.get(url, params=params, headers=headers)
print 'Status', response.status
print 'Response data', response.content # or response.text for Unicode
Here requests takes care of URL-encoding the URL query string parameters and adding it to the URL for you, just like Java does.

Categories