I was developing a price prediction model that requires Tiingo but there seems to be problem in the API authentification. I used the OS access the Tiingo API.
`
api_key =os.environ.get('TIINGO_API_KEY')
df=pdr.get_data_tiingo('AAPL',api_key)
df=pd.read_csv('AAPL.csv')
print(df.tail())
The error I got looks like:
~\AppData\Local\Temp/ipykernel_9920/1017009006.py in <module>
1 api_key =os.environ.get('TIINGO_API_KEY')
----> 2 df=pdr.get_data_tiingo('AAPL',api_key)
3 df=pd.read_csv('AAPL.csv')
4 print(df.tail())
~\anaconda3\lib\site-packages\pandas_datareader\data.py in get_data_tiingo(*args, **kwargs)
118
119 def get_data_tiingo(*args, **kwargs):
--> 120 return TiingoDailyReader(*args, **kwargs).read()
121
122
~\anaconda3\lib\site-packages\pandas_datareader\tiingo.py in __init__(self, symbols, start, end, retry_count, pause, timeout, session, freq, api_key)
181 api_key = os.getenv("TIINGO_API_KEY")
182 if not api_key or not isinstance(api_key, str):
--> 183 raise ValueError(
184 "The tiingo API key must be provided either "
185 "through the api_key variable or through the "
ValueError: The tiingo API key must be provided either through the api_key variable or through the environmental variable TIINGO_API_KEY.
Any assistance is highly appreciated
It seems api_key is coming as None. You should check that.
Related
It looks like the error is related to an invalid API key or access token. Everything I did is correct and mentioned below what steps are taken by me:
I've created https://developers.kite.trade/ (from the Zerodha Kite Connect dashboard)
Here is Zerodha API key created image
To get data from Zerodha in Python, I am trying the Zerodha Kite Connect API. Kite Connect is a set of REST-like APIs that expose many capabilities required to build a complete investment and trading platform. To use the API, I first needed to create a Zerodha account and then applied for API access. After I have received your API key, I can use it to make requests to the Kite Connect API using a Python library such as kiteconnect or kiteconnect-python.
Here is an example of how you could use the kiteconnect library to get historical data for a stock:
This python code:
from kiteconnect import KiteConnect
import datetime
kite = KiteConnect(api_key='0cv9cnax7bmgjclh')
# Get historical data for a stock
today = datetime.datetime.now().date()
historical_data = kite.historical_data(
instrument_token=6048, # Instrument token of a stock
from_date=today - datetime.timedelta(days=365), # From date
to_date=today, # To date
interval="daily" # Interval (minute, hourly, daily, weekly, monthly, yearly)
)
print(historical_data)
Error:
---------------------------------------------------------------------------
InputException Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_18768\3958108260.py in <module>
6 # Get historical data for a stock
7 today = datetime.datetime.now().date()
----> 8 historical_data = kite.historical_data(
9 instrument_token=6048, # Instrument token of a stock
10 from_date=today - datetime.timedelta(days=365), # From date
~\anaconda3\lib\site-packages\kiteconnect\connect.py in historical_data(self, instrument_token, from_date, to_date, interval, continuous, oi)
629 to_date_string = to_date.strftime(date_string_format) if type(to_date) == datetime.datetime else to_date
630
--> 631 data = self._get("market.historical",
632 url_args={"instrument_token": instrument_token, "interval": interval},
633 params={
~\anaconda3\lib\site-packages\kiteconnect\connect.py in _get(self, route, url_args, params, is_json)
849 def _get(self, route, url_args=None, params=None, is_json=False):
850 """Alias for sending a GET request."""
--> 851 return self._request(route, "GET", url_args=url_args, params=params, is_json=is_json)
852
853 def _post(self, route, url_args=None, params=None, is_json=False, query_params=None):
~\anaconda3\lib\site-packages\kiteconnect\connect.py in _request(self, route, method, url_args, params, is_json, query_params)
925 # native Kite errors
926 exp = getattr(ex, data.get("error_type"), ex.GeneralException)
--> 927 raise exp(data["message"], code=r.status_code)
928
929 return data["data"]
InputException: Invalid `api_key` or `access_token`.
I am trying to get historical data via API from Zerodha.
I could manage to get access to Azure resources with the code bellow:
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.graphrbac import GraphRbacManagementClient
subscription_id = "aaaa"
tenant_id = "bbbb"
credentials = ServicePrincipalCredentials(
client_id="cccc",
secret="dddd",
tenant=tenant_id
)
client = ResourceManagementClient(credentials, subscription_id)
for item in client.resource_groups.list():
print item
compute_client = ComputeManagementClient(credentials, subscription_id)
disks = compute_client.disks.list()
for disk in disks:
print disk
But I can't access Azure AD with the same code!!! Is there a different way to access it? Why is it different?! See the code bellow:
graphrbac_client = GraphRbacManagementClient(credentials, subscription_id)
for item in graphrbac_client.groups.list():
print item
Error:
GraphErrorExceptionTraceback (most recent call last)
in ()
1 graphrbac_client = GraphRbacManagementClient(credentials, subscription_id)
2
----> 3 for item in graphrbac_client.groups.list():
4 print item
/home/andre/.local/lib/python2.7/site-packages/msrest/paging.pyc in
next(self)
129 return response
130 else:
--> 131 self.advance_page()
132 return self.next()
133
/home/andre/.local/lib/python2.7/site-packages/msrest/paging.pyc in
advance_page(self)
115 raise StopIteration("End of paging")
116 self._current_page_iter_index = 0
--> 117 self._response = self._get_next(self.next_link)
118 self._derserializer(self, self._response)
119 return self.current_page
/home/andre/.local/lib/python2.7/site-packages/azure/graphrbac/operations/groups_operations.pyc
in internal_paging(next_link, raw)
336
337 if response.status_code not in [200]:
--> 338 raise models.GraphErrorException(self._deserialize, response)
339
340 return response
GraphErrorException: Access Token missing or malformed.
azure-common version = 1.1.14
Access Token missing or malformed.
ComputeManagementClient resource path is https://management.azure.com
But for GraphRbacManagementClient the resource path is https://graph.windows.net. So you got the exception.
How to access Azure AD with Python SDK?
You could get the answer from this link. The following code is the snippet from the document.
from azure.graphrbac import GraphRbacManagementClient
from azure.common.credentials import UserPassCredentials
# See above for details on creating different types of AAD credentials
credentials = UserPassCredentials(
'user#domain.com', # Your user
'my_password', # Your password
resource="https://graph.windows.net"
)
tenant_id = "myad.onmicrosoft.com"
graphrbac_client = GraphRbacManagementClient(
credentials,
tenant_id
)
i'm trying to count all restaurants in my city using python-google-places api
but is not working, i'm getting "failed with response code: INVALID_REQUEST"
What could be causing this?
My code is like that
from googleplaces import GooglePlaces, types, lang
from time import sleep
YOUR_API_KEY = '<<MYKEY>>'
google_places = GooglePlaces(YOUR_API_KEY)
# You may prefer to use the text_search API, instead.
query_result = google_places.nearby_search(
lat_lng={'lat': -16.6824083, 'lng': -49.2556573}
,
location='Goiania',
radius=50000,types=[types.TYPE_RESTAURANT])
counter = 0;
while (query_result.has_next_page_token):
counter = counter + len(query_result.places)
query_result = google_places.nearby_search(
lat_lng={'lat': -16.6824083, 'lng': -49.2556573},
location='Goiania',
radius=50000,types=[types.TYPE_RESTAURANT],
pagetoken=query_result.next_page_token)
print(counter)
i'm getting this
---------------------------------------------------------------------------
GooglePlacesError Traceback (most recent call last)
<ipython-input-42-9cc6675b31bc> in <module>()
21 location='Goiania',
22 radius=50000,types=[types.TYPE_RESTAURANT],
---> 23 pagetoken=query_result.next_page_token)
24
25 print(counter)
C:\ProgramData\Anaconda3\lib\site-packages\googleplaces\__init__.py in nearby_search(self, language, keyword, location, lat_lng, name, radius, rankby, sensor, type, types, pagetoken)
303 url, places_response = _fetch_remote_json(
304 GooglePlaces.NEARBY_SEARCH_API_URL, self._request_params)
--> 305 _validate_response(url, places_response)
306 return GooglePlacesSearchResult(self, places_response)
307
C:\ProgramData\Anaconda3\lib\site-packages\googleplaces\__init__.py in _validate_response(url, response)
173 error_detail = ('Request to URL %s failed with response code: %s' %
174 (url, response['status']))
--> 175 raise GooglePlacesError(error_detail)
176
177
GooglePlacesError: Request to URL https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-16.6824083%2C-49.2556573&radius=50000&type=restaurant&pagetoken=CqQCGAEAAAzE3wT0DnczXFlzyjvAaka8vRLZMlsAjF2aqezA8dtGcLIV7ePoqXAUOm0MyxgroXBcKydzt3U3rB2RFvqLijFCbJ3-ucQ-nijN1E7d4aEcC2UlKUR2gNnHfmKYmFVmfQ70lbW-UmCm79WOl2s5oQ8VYoE9bRnr01IphBbVeiS_IDBsCwmsALU4ti5z-7RSYT9ACTCgFs8bVwU9lQ2x_F3v2FtkdqP7UWl5MmNLteox4dSCwa_k3gKD9yd8mCzzos0CvS248uqn_24wLaVubPmxAUrDbSFDhoSx5c8O7S-XrHl4aZ2dx4QUznYXVcEcD_9c-AHKnPoqK-zwh2MVRiHLHNscTnxr4_iCJwsrrOcqlyQrN192HCq9BMADG1tLVxIQ16yZSa5g10FKIcHzFwQqrxoUxS_m8v1Lbr0IbujvfXRi74p71ws&language=en&key=AIzaSyD8YxHJjYdGMO-k7MbOdF807uzEYT-QGYo&sensor=false failed with response code: INVALID_REQUEST
I scrape some data from Amazon and I insert those data to 4 list. But when I am trying to insert those lists into a Database, I just get TypeError: not all arguments converted during string formatting.
But all the data are in string format. I tried using a tuple, but it is not working.
# Importing Requests and BeautifulSoup Module
import requests
from bs4 import BeautifulSoup
import pymysql
# Setting Base Url
base_url = "https://www.amazon.com/s/ref=lp_6503737011_pg_2?rh=n%3A16310101%2Cn%3A%2116310211%2Cn%3A2983386011%2Cn%3A6503737011&page="
# Setting range for pagination
pagination = list(range(1,3))
# Declaring Empty Data
name = []
retailer = []
price = []
image_link = []
# Looping through pagination
for num in pagination:
url = base_url + str(num)
# Connection Error Handler
try:
r = requests.get(url)
except requests.exceptions.ConnectionError:
r.status_code = "Connection refused"
print("Connection Refused by the server")
# Setting BeautifulSoup Object
soup = BeautifulSoup(r.content, "html.parser")
# Setting Div Class of Info
g_data = soup.find_all("div", {"class": "s-item-container"})
# Getting Every Data from Info Div
for item in g_data:
imgs = soup.findAll("img", {"class":"s-access-image"})
for img in imgs:
image_link.append(img['src'])
name.append(item.contents[2].find_all('h2', {'class':'s-access-title'})[0].text)
retailer.append(item.contents[2].find_all('span', {'class':'a-size-small'})[1].text)
whole_number = str(item.contents[3].find_all('span', {'class':'sx-price-whole'})[0].text)
fractional_number = str(item.contents[3].find_all('sup', {'class':'sx-price-fractional'})[0].text)
price_1 = whole_number+"."+fractional_number
price.append(price_1)
This is the code for scraping data. All is good to here.But when I try to insert data into database am getting problem.
import pymysql
db = pymysql.connect('localhost','root','','scrape')
cursor = db.cursor()
sql = """INSERT INTO wine(
NAME,RETAILER,PRICE,IMAGE_LINK) VALUES"
"""
cursor.executemany(sql, (name,retailer,price,image_link))
I am getting this error while running this code:
TypeError Traceback (most recent call last)
<ipython-input-7-0fca81edd73c> in <module>()
6 NAME,RETAILER,PRICE,IMAGE_LINK) VALUES"
7 """
----> 8 cursor.executemany(sql, (name,retailer,price,image_link))
C:\Anaconda3\lib\site-packages\pymysql\cursors.py in executemany(self, query, args)
193 self._get_db().encoding)
194
--> 195 self.rowcount = sum(self.execute(query, arg) for arg in args)
196 return self.rowcount
197
C:\Anaconda3\lib\site-packages\pymysql\cursors.py in <genexpr>(.0)
193 self._get_db().encoding)
194
--> 195 self.rowcount = sum(self.execute(query, arg) for arg in args)
196 return self.rowcount
197
C:\Anaconda3\lib\site-packages\pymysql\cursors.py in execute(self, query, args)
162 pass
163
--> 164 query = self.mogrify(query, args)
165
166 result = self._query(query)
C:\Anaconda3\lib\site-packages\pymysql\cursors.py in mogrify(self, query, args)
141
142 if args is not None:
--> 143 query = query % self._escape_args(args, conn)
144
145 return query
TypeError: not all arguments converted during string formatting
I am not able to find any solution to solve this problem.
Your query is incomplete: you need placeholders, e.g. %s
.executemany() takes a container of containers as its second argument; typically this is a list of tuples
Change to:
sql = """INSERT INTO wine(NAME,RETAILER,PRICE,IMAGE_LINK) VALUES (%s,%s,%s,%s);"""
to_insert = [(a,b,c,d) for a,b,c,d in zip(name,retailer,price,image_link)]
cursor.executemany(sql,to_insert)
I haven't used io within pandas to access google analytic's API for a few weeks but it had been working fine to my knowledge historically without hiccups. I ran it again today and it looks as though the tools.run syntax is deprecated, so I made a pull and replaced tools.py with this update and I've changed to auth.py within pandas to be:
def authenticate(flow, storage=None):
"""
Try to retrieve a valid set of credentials from the token store if possible
Otherwise use the given authentication flow to obtain new credentials
and return an authenticated http object
Parameters
----------
flow : authentication workflow
storage: token storage, default None
"""
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, FLAGS)
http = credentials.authorize(http)
return http
I have a feeling my usage of FLAGS there is incorrect.
Any help? Thanks!
Here's my code and the error:
df = ga.read_ga(
account_id = id,
profile_id = profile,
property_id = property,
metrics = ['transactionRevenue', 'transactions'],
dimensions = ['transactionId', 'city', 'region', 'date', 'hour', 'minute', 'cityId'],
start_date = "2015-07-11",
end_date = "2015-07-16",
index_col = 0,
parse_dates = {'new_date': [3,4,5]})
The error thrown up:
C:\Users\mburke\AppData\Local\Continuum\Anaconda64\lib\site-packages\pandas\io\auth.py in authenticate(flow, storage)
106 credentials = storage.get()
107 if credentials is None or credentials.invalid:
--> 108 credentials = tools.run_flow(flow, storage, FLAGS)
109
110 http = credentials.authorize(http)
C:\Users\mburke\AppData\Local\Continuum\Anaconda64\lib\site-packages\oauth2client\util.pyc in positional_wrapper(*args, **kwargs)
140 else: # IGNORE
141 pass
--> 142 return wrapped(*args, **kwargs)
143 return positional_wrapper
144
C:\Users\mburke\AppData\Local\Continuum\Anaconda64\lib\site-packages\oauth2client\tools.pyc in run_flow(flow, storage, flags, http)
148 logging.getLogger().setLevel(getattr(logging, flags.logging_level))
--> 149 if not flags.noauth_local_webserver:
150 success = False
151 port_number = 0
C:\Users\mburke\AppData\Local\Continuum\Anaconda64\lib\site-packages\python_gflags-2.0-py2.7.egg\gflags.pyc in __getattr__(self, name)
1057 fl = self.FlagDict()
1058 if name not in fl:
-> 1059 raise AttributeError(name)
1060 return fl[name].value
1061
AttributeError: noauth_local_webserver
I did a little digging and you are correct in your assumption that the usage of FLAGS is incorrect. The docstring for tools.run_flow() states:
flags: ``argparse.Namespace``, The command-line flags. This is the
object returned from calling ``parse_args()`` on
``argparse.ArgumentParser`` as described above.
The quick-n-dirty fix would be something like this:
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args([]))
I believe a more robust solution would be for the maintainers of pandas.io to update it to the new workflow if tools.run is really deprecated.