import pandas as pd
import pandas_datareader as web
spy_etf = web.DataReader('SPY','google')
spy_etf.info()
#ERROR
/Users/mac/anaconda3/envs/pyfinance/bin/python /Users/mac/Desktop/M_BOT_AND_TOOLS/Anaconda/PYTHON/_root_exercises/udemy_class_python_for_finance/jobs/Test/test_1.py
Traceback (most recent call last):
File "/Users/mac/Desktop/M_BOT_AND_TOOLS/Anaconda/PYTHON/_root_exercises/udemy_class_python_for_finance/jobs/Test/test_1.py", line 4, in <module>
spy_etf = web.DataReader('SPY','google')
File "/Users/mac/anaconda3/envs/pyfinance/lib/python3.6/site-packages/pandas_datareader/data.py", line 137, in DataReader
session=session).read()
File "/Users/mac/anaconda3/envs/pyfinance/lib/python3.6/site-packages/pandas_datareader/base.py", line 181, in read
params=self._get_params(self.symbols))
File "/Users/mac/anaconda3/envs/pyfinance/lib/python3.6/site-packages/pandas_datareader/base.py", line 79, in _read_one_data
out = self._read_url_as_StringIO(url, params=params)
File "/Users/mac/anaconda3/envs/pyfinance/lib/python3.6/site-packages/pandas_datareader/base.py", line 90, in _read_url_as_StringIO
response = self._get_response(url, params=params)
File "/Users/mac/anaconda3/envs/pyfinance/lib/python3.6/site-packages/pandas_datareader/base.py", line 139, in _get_response
raise RemoteDataError('Unable to read URL: {0}'.format(url))
pandas_datareader._utils.RemoteDataError: Unable to read URL: http://www.google.com/finance/historical?q=SPY&startdate=Jan+01%2C+2010&enddate=Aug+17%2C+2018&output=csv
Process finished with exit code 1
Not being able to acess google financial historial data
The problem is not with your code, but google's support of your query.
If you were to manually GET the generated url:
import requests
requests.get('http://www.google.com/finance/historical?q=SPY&startdate=Jan+01%2C+2010&enddate=Aug+17%2C+2018&output=csv')
# results in <Response [403]>
You will notice the url results in a 403 response, meaning access to the url is forbidden. As a result you get the pandas_datareader._utils.RemoteDataError: Unable to read URL
Related
(I used a translator when writing this article. Please understand that some words may be incorrect.)
I tested it using the requests module. If the site cannot be found, a 404 code should be returned, but with an error. I don't know what the reason is. Any help would be appreciated. How to properly return a 404 code?
---Below is the code.
import requests as re
a = re.get(input())
print(a.status_code)
error :
Traceback (most recent call last):
File "", line 1, in
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\api.py", line 61, in request
return session.request(method=method, url=url, **kwargs)
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\sessions.py", line 515, in request
prep = self.prepare_request(req)
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\sessions.py", line 443, in prepare_request
p.prepare(
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\models.py", line 318, in prepare
self.prepare_url(url, params)
File "C:\Users\82104_dvfqr9f\AppData\Local\Programs\Python\Python310\lib\site-packages\requests\models.py", line 392, in prepare_url
raise MissingSchema(error)
requests.exceptions.MissingSchema: Invalid URL 'eeee.com': No scheme supplied. Perhaps you meant http://eeee.com?
You can use this link for how to work with requests module.
import requests
try:
r = requests.get('https://www.google.com/search?q=ggg')
print(r.status_code)
if r.status_code==404:
print("this url dosn't exist")
except Exception as error:
print(error)
Hi I am high school senior and I want to do a small program for a school project.
I need to get information from a google sheet into my code. I followed this guide to try to do this. I followed every step and put in all my info properly, but I keep getting this error.
Traceback (most recent call last):
File "C:/Users/Conner Boggan/Desktop/Senior Project/Python Code/Google Sheets/Sheets.py", line 11, in
sheet = client.open("tpol2").sheet1 # Open the spreadhseet
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\gspread\client.py", line 119, in open
self.list_spreadsheet_files(title),
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\gspread\client.py", line 95, in list_spreadsheet_files
res = self.request('get', url, params=params).json()
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\gspread\client.py", line 67, in request
headers=headers,
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\requests\sessions.py", line 543, in get
return self.request('GET', url, **kwargs)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\auth\transport\requests.py", line 440, in request
self.credentials.before_request(auth_request, method, url, request_headers)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\auth\credentials.py", line 124, in before_request
self.refresh(request)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\oauth2\service_account.py", line 334, in refresh
access_token, expiry, _ = _client.jwt_grant(request, self._token_uri, assertion)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\oauth2_client.py", line 153, in jwt_grant
response_data = _token_endpoint_request(request, token_uri, body)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\oauth2_client.py", line 124, in _token_endpoint_request
_handle_error_response(response_body)
File "C:\ProgramData\Anaconda3\envs\Android app\lib\site-packages\google\oauth2_client.py", line 60, in _handle_error_response
raise exceptions.RefreshError(error_details, response_body)
google.auth.exceptions.RefreshError: ('invalid_grant: Invalid JWT: Token must be a short-lived token (60 minutes) and in a reasonable timeframe. Check your iat and exp values in the JWT claim.', '{"error":"invalid_grant","error_description":"Invalid JWT: Token must be a short-lived token (60 minutes) and in a reasonable timeframe. Check your iat and exp values in the JWT claim."}')
This is my code for reference:
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from pprint import pprint
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("TPOL2.json", scope)
client = gspread.authorize(creds)
sheet = client.open("tpol2").sheet1 # Open the spreadhseet
data = sheet.get_all_records() # Get a list of all records
print(data)
I can run each line except the last 3 which leads me to believe the code is able to get online properly, but there is something in the way between the code and the google sheet.
Any help would very much appreciated.
I'm currently new to programming and I'm following along on one of Qazi tutorials and I'm on a section for web scraping but unfortunately I'm getting errors that I can't seem to find a solution for, can you please help me out. Thanks
The error code is bellow.
Traceback (most recent call last):
File "D:\Users\Vaughan\Qazi\Web Scrapping\webscraping.py", line 6, in <module>
page = requests.get(
File "C:\Users\vaugh\AppData\Local\Programs\Python\Python38-32\lib\site-packages\requests\api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "C:\Users\vaugh\AppData\Local\Programs\Python\Python38-32\lib\site-packages\requests\api.py", line 60, in request
return session.request(method=method, url=url, **kwargs)
File "C:\Users\vaugh\AppData\Local\Programs\Python\Python38-32\lib\site-packages\requests\sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\vaugh\AppData\Local\Programs\Python\Python38-32\lib\site-packages\requests\sessions.py", line 640, in send
adapter = self.get_adapter(url=request.url)
File "C:\Users\vaugh\AppData\Local\Programs\Python\Python38-32\lib\site-packages\requests\sessions.py", line 731, in get_adapter
raise InvalidSchema("No connection adapters were found for '%s'" % url)
requests.exceptions.InvalidSchema: No connection adapters were found for '['https://forecast.weather.gov/MapClick.php?lat=34.09979000000004&lon=-118.32721499999997#.XkzZwCgzaUk']'
[Finished in 1.171s]
My line of code is as follows
from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
import lxml
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=34.09979000000004&lon=-118.32721499999997#.XkzZwCgzaUk')
soup = BeautifulSoup(page.content, 'html.parser')
week = soup.find(id='seven-day-forecast-body')
print(week)
I'm new to coding and I suck. I hope to find help here.
I was playing around with the praw module for reddit and couldn't get the simple function upvote() to work.
Here's my code:
import praw
r = praw.Reddit(client_id='**************',
client_secret='**************',
user_agent='**************',
username='**************',
password='***********')
r.submission('https://www.reddit.com/r/aww/comments/9znyf2/today_is_my_18th_birthday_everyone/').upvote()
And here's the full error:
Traceback (most recent call last):
File "C:\Users\*****\Desktop\*****\*****\*****\*****\*****\A.py", line 11, in <module>
r.submission('https://www.reddit.com/r/aww/comments/9znyf2/today_is_my_18th_birthday_everyone/').upvote()
File "C:\Users\*****\AppData\Local\Programs\Python\Python37\lib\site-packages\praw\models\reddit\mixins\votable.py", line 80, in upvote
self._vote(direction=1)
File "C:\Users\*****\AppData\Local\Programs\Python\Python37\lib\site-packages\praw\models\reddit\mixins\votable.py", line 10, in _vote
'id': self.fullname})
File "C:\Users*****\AppData\Local\Programs\Python\Python37\lib\site-packages\praw\reddit.py", line 465, in post
params=params)
File "C:\Users\*****\AppData\Local\Programs\Python\Python37\lib\site-packages\praw\reddit.py", line 506, in request
params=params)
File "C:\Users\*****\AppData\Local\Programs\Python\Python37\lib\site-packages\prawcore\sessions.py", line 185, in request
params=params, url=url)
File "C:\Users\*****\AppData\Local\Programs\Python\Python37\lib\site-packages\prawcore\sessions.py", line 130, in _request_with_retries
raise self.STATUS_EXCEPTIONS[response.status_code](response)
prawcore.exceptions.NotFound: received 404 HTTP response
PRAW is trying to use that URL as a post ID, not as a URL. url is the second argument to reddit.submission(), so you'll need to use it as a named argument (r.submission(url='https://www.reddit.com/...')) to get the submission. After you do that everything should work as expected.
I'm making a web page scraper using BeautifulSoup4 and requests libraries. I had some trouble with BeautifulSoup working but got some help and was able to get that fixed. Now I've run into a new problem and I'm not sure how to fix it. I'm using requests 2.2.1 and I'm trying to run this program on Python 3.1.2. And when I do I get a traceback error.
here is my code:
from bs4 import BeautifulSoup
import requests
url = input("Enter a URL (start with www): ")
link = "http://" + url
page = requests.get(link).content
soup = BeautifulSoup(page)
for url in soup.find_all('a'):
print(url.get('href'))
print()
and the error:
Enter a URL (start with www): www.google.com
Traceback (most recent call last):
File "/Users/user/Desktop/project.py", line 8, in <module>
page = requests.get(link).content
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/api.py", line 55, in get
return request('get', url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/api.py", line 44, in request
return session.request(method=method, url=url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/sessions.py", line 349, in request
prep = self.prepare_request(req)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/sessions.py", line 287, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/models.py", line 287, in prepare
self.prepare_url(url,params)
File "/Library/Frameworks/Python.framework/Versions/3.1/lib/python3.1/site-packages/requests-2.2.1-py3.1.egg/requests/models.py", line 321, in prepare_url
url = str(url)
TypeError: 'tuple' object is not callable
I've done some looking and when others have gotten this error (in django mostly) there was a comma missing but I'm not sure where to put a comma at? Any help will be appreciated.