Can't instantiate abstract class IEXStockFetcher with abstract methods fetchImageURL, fetchPrice, fetchStockHighLow - python

I am trying to get this awesome code I found on Github to compile https://github.com/apryor6/stockstreamer/blob/master/data_fetcher.py
I made a few modifications to the code as API URL for IEX has changed this code was published
Class IEXStockFetcher(StockFetcher):
"""
Fetches stock information using iextrading.com API
"""
url_prefix = "https://cloud.iexapis.com/stable/stock/market/batch?token=MY TOKEN"
url_suffix_price = "&price"
url_suffix_img = "&logo"
url_suffix_highlow = "&quote"***
When I step through the code and get to the end I receive the following error: "Can't instantiate abstract class IEXStockFetcher with abstract methods fetchImageURL, fetchPrice, fetchStockHighLow"
I am relatively new to object oriented programing in Python. Anyone has any thoughts?

class IEXStockFetcher(StockFetcher):
"""
Fetches stock information using iextrading.com API
"""
url_prefix = "https://cloud.iexapis.com/stable/stock/"
url_suffix_price = "/quote/latestPrice"
url_suffix_img = "/logo"
url_suffix_highlow = "/quote"
url_suffix_token = "?token=pk_44de71531a5d400bb1bd98a2c7dd011d"
....
....
def fetchPrice(self, stock):
# get the price of a single stock
try:
resp = urlopen("{}{}{}{}".format(IEXStockFetcher.url_prefix, stock, IEXStockFetcher.url_suffix_price, IEXStockFetcher.url_suffix_token))
resp = json.loads(resp.readlines()[0].decode('utf8'))
price = float(resp)
return price
except:
return self.fetchPrice(stock)
def fetchImageURL(self, stock):
# get the image url of a single stock
try:
resp = urlopen("{}{}{}{}".format(IEXStockFetcher.url_prefix, stock, IEXStockFetcher.url_suffix_img, IEXStockFetcher.url_suffix_token))
resp = json.loads(resp.readlines()[0].decode('utf8'))
return resp['url']
except:
return self.fetchImageURL(stock)
def fetchStockHighLow(self, stock):
# get the image url of a single stock
try:
resp = urlopen("{}{}{}{}".format(IEXStockFetcher.url_prefix, stock, IEXStockFetcher.url_suffix_highlow, IEXStockFetcher.url_suffix_token))
resp = json.loads(resp.readlines()[0].decode('utf8'))
return (resp['week52High'], resp['week52Low'])
except:
return self.fetchStockHighLow(stock)

I was able to get your code working with the new API.. I had to make a few small modifications
class IEXStockFetcher(StockFetcher):
"""
Fetches stock information using iextrading.com API
"""
url_prefix = "https://cloud.iexapis.com/stable/stock/market/batch?token=<MY TOKEN>&symbols="
url_suffix_price = "&types=price"
url_suffix_img = "&types=logo"
url_suffix_highlow = "&types=quote"
....
....
....
....
def fetchPrice(self, stock):
# get the price of a single stock
try:
resp = urlopen("{}{}{}".format(IEXStockFetcher.url_prefix,stock,IEXStockFetcher.url_suffix_price))
resp = json.loads(resp.readlines()[0].decode('utf8'))
price = float(resp[stock]['price'])
return price
except:
return self.fetchPrice(stock)

Related

Python API script

I am making a python script using API of a free test automation website called TestProject.
Link to their API: https://api.testproject.io/docs/v2/
Basically what i want to do is grab pdf of reports of all tests and save them somewhere.
But to make the GET request to do that i first need projectID and jobID which i already wrote functions getting them and saving them in the array.
But now i have a problem where its looping through both lists and not using correct projectID and jobID and its throwing errors because it does not exist.
So what i need is something to check if jobID is in projectID so that way i can make a GET request to get all the executionID's to get the PDF of the report.
I am kinda new to programming so i would love any help i can get. If anyone has any better solutions please feel free to let me know.
My script:
import requests
import json
import csv
from datetime import datetime
from jsonpath_ng import jsonpath, parse
API_key = 'api_key'
headers = {'Authorization':'{}'.format(API_key)}
list_projectId = []
list_jobId = []
list_executionId = []
ParseData_projectId = parse('$..id')
ParseData_jobId = parse('$..id')
ParseData_executionId = parse('$..id')
def parsing (response,ParseData,list_data):
# parses data and appends it to the list
Data = json.loads(response)
Parsaj = ParseData
Podatki = Parsaj.find(Data)
for i in range(0, len(Podatki)):
vrednost = Podatki[i].value
list_data.append(vrednost)
def projectId():
# gets all projectId's and saves them in list_projectId
url = 'https://api.testproject.io/v2/projects?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_projectId,list_projectId)
def jobId():
# gets all jobId's and saves them in list_jobId
for i in range(0, len(list_projectId)):
id = list_projectId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(id) + '/jobs?onlyScheduled=false&_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_jobId,list_jobId)
def executionId():
# Their API link:
# https://api.testproject.io/v2/projects/{projectId}/jobs/{jobId}/reports?_start=0
# the for loop below does not work here is where i need the help:
for i in range(0, len(list_projectId)):
project_id = list_projectId[i]
job_id = list_jobId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(project_id) + '/jobs/{}'.format(job_id) + '/reports?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_executionId,list_executionId)
projectId()
print("----------LIST PROJECT ID: ----------")
print(list_projectId)
print("")
jobId()
print("----------LIST JOB ID: ----------")
print(list_jobId)
executionId()
print("----------LIST EXECUTION ID: ----------")
print(list_executionId)
you have to use 'in' operator to check the value exist in the list data structure.

Get json api data from another api flask

i created this rest api with flask and sql alchemy and i want to not enter the json data manually but get it from another json and add it to my database :
https://www.habitat.fr/api/qDbBye4V7vtMu8qL97vvHTAnLQuEhC/product/911095/sku
my add product route in flask
#add product
#app.route('/product', methods=['POST'])
def add_product():
name = request.json['name']
description = request.json['description']
price = request.json['price']
qty = request.json['qty']
new_product = Product(name,description,price,qty)
db.session.add(new_product)
db.session.commit()
return product_schema.jsonify(new_product)
All you need do is write a function using requests and json modules to fetch the json in the url like this:
import requests
import json
def get_data():
r = requests.get('https://www.habitat.fr/api/qDbBye4V7vtMu8qL97vvHTAnLQuEhC/product/911095/sku')
return json.loads(r.content) #convert content to dict
You may now call the function in your flask app:
#app.route('/product', methods=['POST'])
def add_product():
my_data = get_data()
name = my_data['name']
description = my_data['description']
price = my_data['price']
qty = my_data['qty']

Can't I call function in function in python?

Below is a part of my code.
class Financial_Statements:
def __init__(self,API_KEY,company_code,year,report_sort):
self.API_KEY = API_KEY
self.company_code = company_code
self.year = year
self.report_sort =report_sort
def get_request(self):
request= Request('https://opendart.fss.or.kr/api/fnlttSinglAcnt.json?crtfc_key='+self.API_KEY+'&corp_code='+self.company_code+'&bsns_year='+self.year+'&reprt_code='+self.report_sort)
response = urlopen(request)
elevations = response.read()
data = json.loads(elevations)
data = json_normalize(data['list']) ##--- json to dataframe
data = data.loc[:,['fs_nm','sj_nm','account_nm','thstrm_dt','thstrm_amount','frmtrm_nm','frmtrm_amount','bfefrmtrm_nm','bfefrmtrm_amount']]
return data
def get_financial_stock_price(self,reo = 0):
data = get_request(self)
I define def get_request to get data and use it in other functions, but when I run the code it returns 'get_request' is not defined.
Can't I use a function inside another function?
If you want to call the function inside the class, you have to call it with self. The appropriate code should be self.get_request()

How to load web scraped data using Pandas and Beautifulsoup into Dataframe?

I have this code, which scrapes the Hacker News website with beautifulsoup4 and I am looking for a way to save the results into a Dataframe using Pandas. I have already imported pandas in the below code but I do not know how I can save the results into a DataFrame. It only scrapes the most favored Hacker News post now but it can be changed.
import pandas as pd
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from math import ceil
import json, sys, argparse, validators
MAX_NUM_POSTS = 100
class HackerNewsScraper:
URL = 'https://news.ycombinator.com/news'
def __init__(self, posts):
self._total_posts = posts
self._total_pages = int(ceil(posts/30))
self._stories = []
def scrape_stories(self):
"""
Fetches all HTML data.
Each page is limited to 30 stories, this function will ensure enough pages are fetched.
"""
page = 1
while(page <= self._total_pages): # Makes sure to visit sufficient amount of pages
url = '{}?p={}'.format(self.URL, page)
html = get_html(url)
self.parse_stories(html)
page += 1
def parse_stories(self, html):
"""
Given a BeautifulSoup nested data structure, html. parse_stories(html) will parse the data and select the desired fields.
After getting title, uri, author, comments, points, and rank, it will save them in dictionary form in self._stories.
"""
for storytext, subtext in zip(html.find_all('tr', {'class': 'athing'}),
html.find_all('td', {'class': 'subtext'})):
storylink = storytext.find_all('a',{'class':'storylink'})
sublink = subtext.select('a')
# All requested data being saved in the dictionary story below
TITLE = storylink[0].text.strip()
LINK = storylink[0]['href']
AUTHOR = sublink[0].text
COMMENTS = sublink[-1].text
POINTS = subtext.select('span')[0].text
RANK = storytext.select('span.rank')[0].text.strip('.')
story = {
'title' : TITLE,
'uri' : LINK,
'author' : AUTHOR,
'points' : POINTS,
'comments' : COMMENTS,
'rank' : RANK
}
# Make sure data satisfies requirements
story = validate_story(story)
# self._stories is an array of dictionaries that saves the requested number of stories
self._stories.append(story)
# If required number of stories met, stop parsing
if len(self._stories) >= self._total_posts:
return
def print_stories(self):
"""
Outputs the stories from list of dictionary format to JSON in STDOUT.
"""
json.dump(self._stories, sys.stdout, indent=4)
def get_stories(self):
"""
Returns the scraped stories to the user in a list of dictionary format.
Used for testing purposes.
"""
return self._stories
def get_html(url):
"""
Runs the HTML data through BeautifulSoup to get a BeautifulSoup object, a nested data structure.
"""
response = get_response(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
return html
def validate_story(story):
"""
Ensures that all the story data is valid according to the task.
Will return valid data for each field.
"""
story['title'] = story['title'][:256]
if not valid_title(story['title']):
story['title'] = 'Valid title not found'
story['author'] = story['author'][:256]
if not valid_author(story['author']):
story['author'] = 'Valid author not found'
if not valid_uri(story['uri']):
story['uri'] = 'Valid URI not found'
story['comments'] = validate_number(story['comments'])
story['points'] = validate_number(story['points'])
story['rank'] = validate_number(story['rank'])
return story
def valid_title(title):
"""
Ensures that title is non empty string with <= 256 characters
"""
return (len(title) <= 256 and title)
def valid_author(author):
"""
Ensures that author is non empty string and <= 256 characters.
Solved the issue of not finding an author by checking the fetched data with HN username rules.
"""
if(author.find(' ') > -1): #Hacker news username doesnt support whitespace
return False
return (len(author) <= 256 and author)
def valid_uri(url):
"""
To be able to find the scraped stories, we need their URL.
If data is not a valid URL, return False.
"""
if(validators.url(url)):
return True
return False
def validate_number(numString):
"""
Will make sure that the returned number is an integer.
Will strip any non digits from the input and return the first number.
"""
if numString.find('ago') > -1: #If not found, 'time since posted' would replace points for example
return 0
digits = [int(s) for s in numString.split() if s.isdigit()]
if len(digits) > 0:
return digits[0]
return 0
def get_response(url):
"""
Attempts to get the content at 'url' by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
Log the errors. Currently just printing them out to user.
"""
print(e)
def validate_input(arg, arg_max):
"""
Validate the user input. Makes sure it is less than or equal to 100 posts.
"""
error_msg = 'Posts cannot exceed {}'.format(arg_max)
if arg > arg_max:
raise argparse.ArgumentTypeError(error_msg)
# Parses the number of posts input from user. Default is 10.
def parse_arguments():
"""
Parses the argument input from the user. Default is 10.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--posts', '-p', metavar='n', type=int, default=1, help='number of posts (max 100)')
args = parser.parse_args()
validate_input(args.posts, MAX_NUM_POSTS)
return args.posts
def main():
"""
If user input is valid, will create a scraper and fetch requests number of posts and print them to the user.
"""
try:
posts = parse_arguments()
hnews_scraper = HackerNewsScraper(posts)
hnews_scraper.scrape_stories()
hnews_scraper.print_stories()
except argparse.ArgumentTypeError as ex:
log_error(ex)
if __name__ == '__main__':
main()
Try This:
Don't forget to import Pandas
story = {
'title' : TITLE,
'uri' : LINK,
'author' : AUTHOR,
'points' : POINTS,
'comments' : COMMENTS,
'rank' : RANK
}
data = list(zip(TITLE, LINK, AUTHOR, POINTS, COMMENTS, RANK))
dt = pd.DataFrame(data, columns = ['title', 'uri', 'author', 'points', 'comments', 'rank'])

Uploading files in Google App Engine. How to get the file to the upload Handler Class

I have a form in google app engine where I want to upload an image and all my text at the same time. Do I have to seperate this into two seperate pages and actions?
Here is my upload handler:
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def upload(self, reseller_id, imgfile):
upload_files = imgfile
blob_info = upload_files[0]
key = blob_info.key()
r = Reseller.get_by_id(reseller_id)
r.blob_key_logo = str(key)
r.put();
Here is my creation of a new reseller object:
class NewReseller(BaseHandler):
def get(self):
if self.user:
self.render("new_reseller.html")
else:
self.redirect("/display_resellers")
def post(self):
name = self.request.get('name')
website = self.request.get('website')
information = self.request.get('information')
address = self.request.get('address')
city = self.request.get('city')
state = self.request.get('state')
zipcode = self.request.get('zipcode')
email = self.request.get('email')
phone = self.request.get('phone')
r = Reseller( name = name,
website = website,
information = information,
address = address,
city = city,
state = state,
zipcode = zipcode,
email = email,
phone = phone)
r.put()
theresellerid = r.key().id()
#And then Upload the image
u = UploadHandler()
logo_img = u.get_uploads('logo_img')
u.upload(theid, logo_img)
self.redirect('/display_resellers')
I think my problem here is this line:
logo_img = u.get_uploads('logo_img')
it pops out the error message
for key, value in self.request.params.items():
AttributeError: 'NoneType' object has no attribute 'params'
Somehow I need this NewReseller class to inherit the .getuploads from BlobstoreUploadHandler so I can do:
logo_img = self.get_uploads('logo_img')
Or there is probably a better way because this seems a little messy.
So my question is how to upload files and data in one form on just one page. I could do it with two seperate pages. One for adding the reseller and one for adding the image but that seems over complicated.
I tried to follow some steps and clues from this question:
Upload files in Google App Engine
******Edit***** Working Implementation Below:
class EditReseller(BaseHandler, blobstore_handlers.BlobstoreUploadHandler):
def get(self, reseller_id):
if self.user:
reseller = Reseller.get_by_id(int(reseller_id))
upload_url = blobstore.create_upload_url('/upload')
image = True
if reseller.blob_key_logo is None:
image = False
self.render('edit_reseller.html', r=reseller, reseller_id=reseller_id, upload_url=upload_url, image=image)
else:
self.redirect('/admin')
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
reseller_id = self.request.get('reseller_id')
upload_files = self.get_uploads('logo_img')
if upload_files:
blob_info = upload_files[0]
key = blob_info.key()
r = Reseller.get_by_id(int(reseller_id))
r.blob_key_logo = str(key)
r.put();
name = self.request.get('name')
website = self.request.get('website')
information = self.request.get('information')
address = self.request.get('address')
city = self.request.get('city')
state = self.request.get('state')
zipcode = self.request.get('zipcode')
email = self.request.get('email')
phone = self.request.get('phone')
if name and website and information and email and phone and address and city and state and zipcode:
r = Reseller.get_by_id(int(reseller_id))
r.name = name
r.website = website
r.information = information
r.address = address
r.city = city
r.state = state
r.zipcode = zipcode
r.email = email
r.phone = phone
r.put()
else:
error = "Looks like your missing some critical info"
self.render("edit_reseller.html", name=name, website=website, information=information, address=address, city=city, zipcode=zipcode, email=email, phone=phone, error=error)
self.redirect("/edit_reseller/" + reseller_id)
You just need to put the logic of the UploadHandler inside the Reseller(BaseHandler) and make Reseller inherit from blobstore_handlers.BlobstoreUploadHandler.
The call to get_uploads fails, as the NewReseller Class does not inherit from BlobstoreUploadHandler. The BlobstoreUploadHandler class takes over the upload operation so you do not need to create a post method, just add the corresponding logic from post ( name = self.request.get('name'), r = Reseller(), r.put(), etc. ) and add it to the upload method.
You should not call or create a new a handler instance by hand (unless you know what you are doing), as it would be missing the things that make it work.
The complete app sample at the official docs, might also be helpful.

Categories