is there a way to store these input into a dictionary json file so when i import into panda it's easy to analyse? and if this code can be written in an easier way that would also be great (like loop)
#student's profile to be saved in file separately
j = open("jessica.txt",'a')
w = open("wendy.txt", 'a')
t = open("tatiana.txt", 'a')
#user input to record the log
name = input("Name:")
date = input('Enter a date in YYYY-MM-DD format:')
hours = input("Hours:")
rate = input("Rate:")
topic = input('Topic:')
if name == 'Jessica':
j.writelines("Date:" + date + '\n')
j.writelines("Hours:" + hours + '\n')
j.writelines("Rate:" + rate + '\n')
elif name == 'Tatiana':
t.writelines("Date:" + date + '\n')
t.writelines("Hours:" + hours + '\n')
t.writelines("Rate:" + rate + '\n')
else:
w.writelines("Date:" + date + '\n')
w.writelines("Hours:" + hours + '\n')
w.writelines("Rate:" + rate + '\n')
Here is an example:
import json
def get_inputs():
#user input to record the log
name = input("Name:")
d = {}
d['date'] = input('Enter a date in YYYY-MM-DD format:')
d['hours'] = input("Hours:")
return(name,d)
out = {}
while True:
exit = input('Do you want to add another input (y/n)? ')
if exit.lower() == 'n':
break
else:
name, d = get_inputs()
out[name] = d
with open('names.json','w') as f:
json.dump(out, f, indent=2)
And then:
import pandas as pd
print(pd.read_json('names.json'))
And you have:
Jessica
date 2014-12-01
hours 12
Related
I have a script I'm writing to make pulling data from my fantasy football league easy and exported in a format that can be played with in Excel easily.
The script I have attached only contains the relevant parts to this questions as the larger script I have written has a lot of moving parts that doesn't apply here.
I'm essentially pulling this players.get_all_players() data from the Sleeper platform using the Sleeper-API-Wrapper (Github link here).
My script will take player data and put it into a .csv like this, with the player ID in the top row and all the info in a single cell below the ID. Screenshot of this below.
Excel .csv screenshot
How can I export this so that the data is nicely formatted into separate rows? I have a different spreadsheet where I'd like to be able to pull this data to automatically.
Alternatively, if I'm doing this in a really roundabout way, please let me know! This is the JSON response from the platform: JSON Response
# 9 All players - players.get_all_players()
warning = 1
while warning == 1:
print("%s%s\n\n\nWARNING:%s" % (fg(15), bg(9), attr(0)))
print("%s%sthe 'all players' option is intensive and may freeze your PC for several minutes.%s" % (fg(15), bg(0), attr(1)))
warning = input("continue anyway? (y/n)\n")
if warning == "n":
pe_loop = 0
action = 0
elif warning == "y":
name = "all players"; file = name
output = players.get_all_players()
break
else:
print("Not a valid option, try again.")
warning = 1
overwrite = 0
name_change = 0
while action == 0:
try:
action = int(input("%s%s\n1 - print\n2 - export to Excel\n3 - back to tasks\n4 - end\n--------------------\n%s" % (fg(14), bg(0), attr(1))))
except ValueError:
print("Not a valid option, try again.")
## Print
if action == 1 and week != 18:
print(output)
break
elif action == 1 and week == 18:
week = 0
while week < 18:
week += 1
if task == 3:
output = league.get_matchups(week)
elif task == 4:
output = league.get_transactions(week)
print(output)
## Export
elif action == 2:
path = os.path.join(parent_dir, file)
name_change = input("\nDo you want to name the file? (y/n)\n")
if name_change == "y":
name = input("\nEnter file name now:\n")
if name_change == "n":
file_path = path + "\\" + name + '_' + str(year) + ".xlsx"
if os.path.isfile(file_path) == True:
overwrite = input("\nFile name... '" + name + "' already exists! Would you like to overwrite this file? (y/n)\n")
if overwrite == "n":
count = 0
while os.path.isfile(file_path) == True:
count += 1
new_name = name + "_" + str(count)
file_path = path + "\\" + new_name + ".xlsx"
else:
name = new_name
print("\nThe new file was automatically named: " + new_name + "_wk" + str(week) + "\nand placed in: " + path)
if os.path.isdir(path) == False and overwrite == 0:
os.mkdir(path)
print("\nCreating new file path... " + file + "\n")
elif os.path.isdir(path) == True and overwrite == 0:
print("\nFile path... '" + file + "' exists!\n")
toCSV = output
# 9 All Players CSV file
with open(parent_dir + file + "\\" + name + ".csv", 'w', encoding='utf8', newline='') as output_file:
fc = csv.DictWriter(output_file, output.keys())
fc.writeheader()
fc.writerow(toCSV)
It turns out that sleeper_wrapper exposes a method players.get_players_df that gives you a pandas DataFrame containing all players.
Write that to a csv file using to_csv as suggested in the comments.
Strip down your code to receive better answers faster :)
This is the code that your question needs:
from sleeper_wrapper import Players
import csv
players = Players()
toCSV = players.get_all_players()
with open(parent_dir + file + "\\" + name + ".csv", 'w', encoding='utf8', newline='') as output_file:
fc = csv.DictWriter(output_file, output.keys())
fc.writeheader()
fc.writerow(toCSV)
This is how you write the csv using pandas:
import pandas as pd
from sleeper_wrapper import Players
players = Players()
all_players = players.get_all_players()
# stolen from https://github.com/NotTheCrocHunter/sleeper-api-wrapper/blob/91d8cf1b64cf55884b4c4746d53ccd1259d11c1f/sleeper_wrapper/players.py#L41
# because that method is unavailable in the version of sleeper_wrapper in PyPI
all_players_df = pd.DataFrame.from_dict(all_players, orient="index")
# all_players_df contains some information on teams as well, maybe you want to filter that out...
all_players_df.to_csv("your_output_file.csv")
Maybe this question was asked before but since I could not find a proper answer, I dare to ask a similar one. My problem is, I have been trying to scrape a Turkish car sale web site which is named 'Sahibinden'. I use jupyter notebook and sublime editors.Once I try to get the data written in a csv file, the Turkish letter changes to different characters. I tried. 'UTF-8 Encoding', '# -- coding: utf-8 --', ISO 8859-9, etc. but I could not solve the problem. The other issue is that Sublime editor does not create the csv file despite I did not have any problem on the jupyter notebook. You will find the csv file output in the image link. If someone can reply me I would appreciate it.
Note: the program works and no problem once I run print command on the editors.
Thanks a lot.
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import unicodedata
with open ('result1.csv','w') as f:
f.write('brand, model, year, oil_type, gear, odometer, body, hp,
eng_dim, color, warranty, condition, price, safe,
in_fea, outs_fea, mul_fea,pai_fea, rep_fea, acklm \n')
chrome_path = r"C:\Users\Mike\Desktop\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
def final_page(fn_20):
for lur in fn_20:
driver.get(lur)
brand = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[3]/span''')
brand = brand.text
brand = brand.encode("utf-8")
print (brand)
model = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[5]/span''')
model = model.text
model = model.encode("utf-8")
print (model)
year = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[6]/span''')
year = year.text
year = year.encode("utf-8")
print (year)
oil_type = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[7]/span''')
oil_type = oil_type.text
oil_type = oil_type.encode("utf-8")
print (oil_type)
gear = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[8]/span''')
gear = gear.text
gear = gear.encode("utf-8")
print (gear)
odometer = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[9]/span''')
odometer = odometer.text
odometer = odometer.encode("utf-8")
print (odometer)
body = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[10]/span''')
body = body.text
body = body.encode("utf-8")
print (body)
hp = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[11]/span''')
hp = hp.text
hp = hp.encode("utf-8")
print (hp)
eng_dim = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[12]/span''')
eng_dim = eng_dim.text
eng_dim = eng_dim.encode("utf-8")
print (eng_dim)
color = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[14]/span''')
color = color.text
color = color.encode("utf-8")
print (color)
warranty = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[15]/span''')
warranty = warranty.text
warranty = warranty.encode("utf-8")
print (warranty)
condition = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/ul/li[19]/span''')
condition = condition.text
condition = condition.encode("utf-8")
print (condition)
price = driver.find_element_by_xpath('''//*[#id="classifiedDetail"]/div[1]/div[2]/div[2]/h3''')
price = price.text
price = price.encode("utf-8")
print (price)
safe = ''
safety1 = driver.find_elements_by_xpath('''//div[#id='classifiedProperties']/ul[1]/li[#class='selected']''')
for ur in safety1:
ur1 = ur.text
ur1 = ur1.encode("utf-8")
safe +=ur1 + ', '
print (safe)
in_fea = ''
in_features = driver.find_elements_by_xpath('''//div[#id='classifiedProperties']/ul[2]/li[#class='selected']''')
for ins in in_features:
ins1 = ins.text
ins1 = ins1.encode("utf-8")
in_fea += ins1 + ', '
print (in_fea)
outs_fea = ''
out_features = driver.find_elements_by_xpath('''//div[#id='classifiedProperties']/ul[3]/li[#class='selected']''')
for outs in out_features:
out1 = outs.text
out1 = out1.encode("utf-8")
outs_fea += out1 + ', '
print (outs_fea)
mul_fea = ''
mult_features = driver.find_elements_by_xpath('''//div[#id='classifiedProperties']/ul[4]/li[#class='selected']''')
for mults in mult_features:
mul = mults.text
mul = mul.encode("utf-8")
mul_fea += mul + ', '
print (mul_fea)
pai_fea = ''
paint = driver.find_elements_by_xpath('''//div[#class='classified-pair custom-area ']/ul[1]/li[#class='selected']''')
for pai in paint:
pain = pai.text
pain = pain.encode("utf-8")
pai_fea += pain + ', '
print (pai_fea)
rep_fea = ''
replcd = driver.find_elements_by_xpath('''//div[#class='classified-pair custom-area']/ul[2]/li[#class='selected']''')
for rep in replcd:
repa = rep.text
repa = repa.encode("utf-8")
rep_fea += rep + ', '
print (rep_fea)
acklm = driver.find_element_by_xpath('''//div[#id='classified-detail']/div[#class='uiBox'][1]/div[#id='classifiedDescription']''')
acklm = acklm.text
acklm = acklm.encode("utf-8")
print (acklm)
try:
with open ('result1.csv', 'a') as f:
f.write (brand + ',' [enter image description here][1]+ model + ',' + year + ',' + oil_type + ',' + gear + ',' + odometer + ',' + body + ',' + hp + ',' + eng_dim + ',' + color + ',' + warranty + ',' + condition + ',' + price + ',' + safe + ',' + in_fea + ',' + outs_fea + ',' + mul_fea + ',' + pai_fea + ',' + rep_fea + ',' + acklm + '\n')
except Exception as e:
print (e)
driver.close
import codecs
file = codecs.open("utf_test", "w", "utf-8")
file.write(u'\ufeff')
file.write("test with utf-8")
file.write("字符")
file.close()
or this also works for me
with codecs.open("utf_test", "w", "utf-8-sig") as temp:
temp.write("this is a utf-test\n")
temp.write(u"test")
I have a python program that basically parses through some CSVs and prints out a line and then stops until the user hits enter. Here is the full code:
#!/usr/bin/python
import os
import csv
import sys
from datetime import datetime
teams = [[] for x in xrange(0, 400)]
counter = 0
with open('t26.csv', 'rb') as f:
next(f)
reader = csv.reader(f)
for row in reader:
if row:
if row[1] <> "" and row[1] <> "TEAM AVERAGES:":
teams[counter].append(row[16])
teams[counter].append(row[3])
teams[counter].append(row[0])
teams[counter].append(row[4])
counter += 1
for i in range(0, counter - 1):
diff = False
lastTeam = ""
firstDate = ""
eid = teams[i][0]
date = teams[i][1]
team = teams[i][2]
pc = teams[i][3]
for csvfile in os.listdir('Uploads'):
with open('Uploads/' + csvfile, 'rb') as f:
reader = csv.reader(f)
team_index = 0
eid_am_index = 0
eid_pm_index = 0
find = False
for row in reader:
index = 0
for column_name in row:
if "team" == column_name:
team_index = index
if "eid_am" in column_name:
eid_am_index = index
if "eid_pm" in column_name:
eid_pm_index = index
index += 1
if eid in row:
#print row[team_index] + ', ' + row[eid_am_index] + ', ' + row[eid_pm_index] + ', ' + ' ----> ' + csvfile
if row[team_index] <> lastTeam and lastTeam <> "":
diff = True
lastTeam = row[team_index]
if firstDate == "":
firstDate = csvfile
break
if diff:
print "\n*diff"
else: #teams are the same
team = team[5:]
if "(" in team:
team = team[:team.index('(') - 1]
try:
lastTeam = lastTeam[:lastTeam.index(' ')]
except:
g = 0
print "\n*no diff: " + eid + " --> " + firstDate + " | " + date + "\tTeam: " + team + " | " + lastTeam + "\tPC: " + pc
if team <> lastTeam and lastTeam <> "":
print "*(!) teams not equal"
f = raw_input('') #read user input and do nothing with it
I run this program on Bash on Ubuntu on Windows, and sometimes the symbols "^#" will pop up randomly on the terminal, and then when I click enter I get an error.
Here's an example of what the terminal looks like (with some #comments to explain):
*no diff: 4903 --> 6-27-2017 3_44_01 PM.csv | 8/1/2017 1:56:39 PM Team: 180-A | 180-A PC: AGENT3-102 #this line is printed out by the python program
^# #this randomly show up
Traceback (most recent call last): #when i hit enter i get this error
File "parse.py", line 127, in <module>
f = raw_input('')
EOFError
Here's a screenshot as well:
I've been working with python for the past few days, and started working on a project. Currently trying to figure out how to execute the same function using multiple variables (In this case, Stock symbols). Preferably with one input() separated with a comma or something. I've hit a snag with this last part though. Can anyone point me in the direction of where to go next? (Running the same function with multiple variables at the same time.)
Here is my code:
#Google + Yahoo Finance Stock Lookup
from googlefinance import getQuotes
from yahoo_finance import Share
import googlefinance
import datetime, time
import os
from datetime import datetime
tDate = datetime.now().strftime('%Y-%m-%d')
print (tDate)
tDateConv = str(tDate)
try:
os.chdir('/Users/Jakes_Macbook/Desktop/Python/Stocks')
except Exception:
print('Default Path does not exsist, make sure your directory is right.')
pass
run = True
while run == True:
print('You are currently storing the file in ')
print(os.getcwd())
print('type "yes" to continue')
confirm = input()
if confirm == 'yes':
print ('ok\n')
try:
os.makedirs(tDateConv)
except Exception:
pass
os.chdir(tDateConv)
print('File will be saved to:')
print(os.getcwd())
break
else:
print('Where do you want to store the file?')
changeDir = input()
os.chdir(changeDir)
print('What Stock or Stocks would you like to look up?')
stockSymbol = input()
def runStocks():
print (" ")
print ("Stock Symbol: " + stockSymbol)
stockSymbolYhoo = Share(stockSymbol)
stockFile = open(str(stockSymbol)+'.txt', 'a')
dicStored = googlefinance.getQuotes(stockSymbol)[0]
numStoredPrice = float(dicStored['LastTradePrice'])
print('Stock Open: ' + stockSymbolYhoo.get_open())
print ("Stored Price: " + str(numStoredPrice))
stockFile.write(str("\n" + "Stock Symbol: " + stockSymbol + "\n"))
stockFile.write(str("\n" + "Open Price: " + stockSymbolYhoo.get_open() + "\n"))
stockFile.write(str("Stored Price: " + str(numStoredPrice)+'\n'))
runs = 0
while runs < 5:
stor = googlefinance.getQuotes(stockSymbol)[0]
price = stor['LastTradePrice']
print(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + " | " + price)
stockFile.write(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + " | Price " + price + ' \n')
numPrice = float(price)
if numPrice < numStoredPrice*float(.995):
print ("buy")
time.sleep(5)
runs = runs + 1
stockFile.close()
runStocks()
My goal is to have each stock symbol, that is inputted, create its own file in the folder for today. I'm pretty sure i can figure out how to do that once i get multiple functions going. Thanks in advance.
Also, let me know if you have any important suggestions or best practices. This is like my second day working with python. Thanks Again.
Just pass them into the function:
# Note the definition was updated to be able to pass in a stockSymbol
def runStocks(stockSymbol):
print (" ")
print ("Stock Symbol: " + stockSymbol)
stockSymbolYhoo = Share(stockSymbol)
stockFile = open(str(stockSymbol)+'.txt', 'a')
dicStored = googlefinance.getQuotes(stockSymbol)[0]
numStoredPrice = float(dicStored['LastTradePrice'])
print('Stock Open: ' + stockSymbolYhoo.get_open())
print ("Stored Price: " + str(numStoredPrice))
stockFile.write(str("\n" + "Stock Symbol: " + stockSymbol + "\n"))
stockFile.write(str("\n" + "Open Price: " + stockSymbolYhoo.get_open() + "\n"))
stockFile.write(str("Stored Price: " + str(numStoredPrice)+'\n'))
stockSymbols = input("Enter stock symbols separated by commas").split(",")
for stockSymbol in stockSymbols:
runStocks(stockSymbol) # Call your function in a loop
I am new to python and I am trying to run this code,which I found on github ,but it does not work, is something wrong with the code?Or is it my fault? I am always getting the
"no data found"
message.
skyscanner.py :
#!/usr/bin/python
"""The script obtains prices and flight information for a given
input (departure, arrival airports and date), outputs this
data to the console and writes it to a csv file."""
__author__ = "Ingvaras Merkys"
import json
import urllib2
import re
import sys
import time
# Global vars:
AUTOSUGGEST_URL = "http://www.skyscanner.net/dataservices/geo/v1.0/autosuggest/uk/en/"
# e. g. http://www.skyscanner.net/dataservices/geo/v1.0/autosuggest/uk/en/edinb
SKYSCANNER_URL = "http://www.skyscanner.net/flights/"
# e. g. http://www.skyscanner.net/flights/vno/edi/130419
ROUTEDATA_URL = "http://www.skyscanner.net/dataservices/routedate/v2.0/"
# e. g. http://www.skyscanner.net/dataservices/routedate/v2.0/a00765d2-7a39-404b-86c0-e8d79cc5f7e3
SUGGESTIONS_URL = "http://www.skyscanner.net/db.ashx?ucy=UK&lid=en&ccy=GBP"
# e. g. http://www.skyscanner.net/db.ashx?ucy=UK&lid=en&ccy=GBP&fp=KAUN&tp=EDIN&dd=20130410
def main(argv):
input_from = argv[0].replace(" ", "%20").replace("\"", "")
input_to = argv[1].replace(" ", "%20").replace("\"", "")
date = argv[2].replace("/", "")
place_id_from, place_id_to, name_from, name_to = get_codes(input_from, input_to)
# testjuly = map (lambda x: len(x) == 1 and '13070'+x or '1307'+x, [ str(i+1) for i in range(31) ])
# for date in testjuly:
session_key = get_session_key(place_id_from, place_id_to, date)
for attempt in range(3):
# if script is run repeatedly sometimes an empty html is returned
try:
response = urllib2.urlopen(ROUTEDATA_URL + session_key)
html = response.read()
data = json.loads(html)
except ValueError:
f = open("error.log", "a")
f.write(ROUTEDATA_URL + session_key + "\n")
f.write("Returned:\n" + html + "\n")
time.sleep(1)
else:
break
else:
sys.exit(1)
query = data['Query']
if data['Stats']['OutboundLegStats']['TotalCount'] == 0:
print "No flights found from", name_from, "to", name_to
return 0
#show_suggestions(query['OriginPlace'], query['DestinationPlace'], date)
#sys.exit(2)
stations = data['Stations']
quotes = data['Quotes']
carriers = data['Carriers']
cheapest_price = data['Stats']['ItineraryStats']['Total']['CheapestPrice']
print "Results for flight from", name_from, "to", name_to
print "Outbound date:", re.split('T', query['OutboundDate'])[0]
print "Cheapest Journey:", cheapest_price, "RMB"
return cheapest_price
# f = open(place_id_from + '-' + place_id_to + '-' + date + '.csv','w')
# for leg in data['OutboundItineraryLegs']:
# leg_price = get_leg_price(leg['PricingOptions'], quotes)
# depart_time = leg['DepartureDateTime'].replace("T", " ")
# arrive_time = leg['ArrivalDateTime'].replace("T", " ")
# duration = leg['Duration']
# carrier_names = get_carrier_names(leg['MarketingCarrierIds'], carriers)[1]
# print "\n\tPrice:", leg_price, "GBP"
# print "\tDeparting:", depart_time
# print "\tArriving:", arrive_time
# print "\tDuration:", duration/60, "h", duration%60, "min"
# print "\tCarriers:", carrier_names
# print "\t# of stops: ", leg['StopsCount']
# stop_ids = leg.get('StopIds', [])
# stop_ids_string = ", ".join([ get_station_name(stop_id, stations) for stop_id in stop_ids ])
# print "\t\t", stop_ids_string
# row = str(leg_price) + "\t" + depart_time + "\t" + arrive_time + "\t" + str(duration) + "\t" + carrier_names + "\t" + stop_ids_string
# f.write(row + "\n")
# Functions
def get_codes(input_from, input_to):
"""Returns place id codes and names, e. g. ("EDI", "KUN", "Edinburgh", "Kaunas")"""
try:
i = 0
autosuggest_json_from = json.load(urllib2.urlopen(AUTOSUGGEST_URL + input_from))
if len(autosuggest_json_from[0]['PlaceId']) == 4:
# for cases where the first result is abstract (e. g. Glasgow (Any))
i = 1
place_id_from = autosuggest_json_from[i]['PlaceId']
name_from = autosuggest_json_from[i]['PlaceName']
j = 0
autosuggest_json_to = json.load(urllib2.urlopen(AUTOSUGGEST_URL + input_to))
if len(autosuggest_json_to[0]['PlaceId']) == 4:
j = 1
place_id_to = autosuggest_json_to[j]['PlaceId']
name_to = autosuggest_json_to[j]['PlaceName']
except IndexError:
print "No code found for:"
print input_from, "AND/OR", input_to
sys.exit(3)
return (place_id_from, place_id_to, name_from, name_to)
def get_session_key(place_id_from, place_id_to, date):
"""Returns a session key for a given query, on failure exits
NB. distant or past dates cause failures"""
response = urllib2.urlopen(SKYSCANNER_URL + place_id_from + "/" + place_id_to + "/" + date)
html = response.read()
regex = ur'"SessionKey":"(.+?)"'
# e. g. "SessionKey":"a00765d2-7a39-404b-86c0-e8d79cc5f7e3"
try:
session_key = re.findall(regex, html)[0]
except IndexError:
print "No data found for this date"
sys.exit(4)
return session_key
def show_suggestions(from_id, to_id, date):
"""Prints alternative departure airports"""
suggest_places_string = ""
suggestions_json = json.load(urllib2.urlopen(SUGGESTIONS_URL + "&fp=" + from_id + "&tp=" + to_id + "&dd=20" + date))
try:
suggest_places = suggestions_json['rs']
for place in suggest_places:
if place['fpid'] != from_id:
suggest_places_string += place['fan'] + ", "
if suggest_places_string[:-2] != "":
print "Try airports: ", suggest_places_string[:-2]
except (KeyError, IndexError):
print "Suggestions unavailable"
def get_station_name(station_id, stations):
"""Returns the name of the (intermediate) station,
e. g. "London Heathrow" """
for station in stations:
if station['Id'] == station_id:
return station['Name']
return ""
def get_leg_price(pricing, quotes):
"""Returns lowest leg price"""
prices = []
for price in pricing:
prices.append(get_quote_price(price['QuoteIds'], quotes))
return min(prices)
def get_quote_price(quote_ids, quotes):
"""Finds quotes by quote id and returns their price sum"""
price = 0;
for quote_id in quote_ids:
for quote in quotes:
if quote['Id'] == quote_id:
price += quote['Price']
return price
def get_carrier_names(carrier_ids, carriers):
"""Returns a tuple (list, string) with carrier names
e.g. (["airBaltic", "KLM"], "airBaltic, KLM")"""
carrier_names = []
carrier_names_string = ""
for carrier_id in carrier_ids:
carrierName = get_carrier_name(carrier_id, carriers)
carrier_names.append(carrierName)
carrier_names_string += carrierName + ", "
return (carrier_names, carrier_names_string[:-2])
def get_carrier_name(carrier_id, carriers):
"""Returns carrier name by id"""
for carrier in carriers:
if carrier['Id'] == carrier_id:
return carrier['Name']
return ""
if __name__ == "__main__":
if len(sys.argv) == 4:
main(sys.argv[1:])
else:
print "Enter arguments in this way:\n"
print "python skyscanner.py {departure airport} {arrival airport} {departure date (yy/mm/dd)}\n\n"
print "e. g. python skyscanner.py \"glasgow prestwick\" kaunas 13/07/21\n"
sys.exit()
These endpoints are not supported as external APIs, they are used by the site itself. They can/do change without notice and some require a level of "state" to operate.
However, we do have an API that would allow you access to the same auto-suggest / flight data that the site is driven from. More details can be found at http://business.skyscanner.net