I am trying to get about 10 stock attributes from yahooquery. When some data is not available (e.g. when the company is not making a profit, there is no PE ratio) it raises KeyError. I want to return zero in that case. Is there any way how to simplify my code and not to put Try/Except to every attribute?
def data(ticker): #pulling data about stock from Yahoo Finance API
try:
company_name = Ticker(ticker).quote_type[ticker]["shortName"]
except KeyError:
company_name = 0
try:
stock_price = Ticker(ticker).financial_data[ticker]["currentPrice"]
except KeyError:
stock_price = 0
try:
change = Ticker(ticker).history(interval='1mo', start=(datetime.datetime.today() - datetime.timedelta(days=90)), end=datetime.datetime.today())
change = change["open"]
growth_or_loose = ((change.iloc[-1] / change.iloc[0]) - 1)
except:
growth_or_loose = 0
try:
recommendation = Ticker(ticker).financial_data[ticker]["recommendationKey"]
except KeyError:
recommendation = 0
try:
market_cap = Ticker(ticker).summary_detail[ticker]["marketCap"]
except KeyError:
market_cap = 0
try:
pe = Ticker(ticker).summary_detail[ticker]["trailingPE"]
except KeyError:
pe = 0
try:
pb = Ticker(ticker).key_stats[ticker]["priceToBook"]
except KeyError:
pb = 0
try:
rev_growth = Ticker(ticker).financial_data[ticker]["revenueGrowth"]
except KeyError:
rev_growth = 0
try:
ern_growth = Ticker(ticker).financial_data[ticker]["earningsGrowth"]
except KeyError:
ern_growth = 0
profit_margin = Ticker(ticker).financial_data[ticker]["profitMargins"]
try:
debt2equity = Ticker(ticker).financial_data[ticker]["debtToEquity"]
except KeyError:
debt2equity = 0
data = company_name, stock_price, growth_or_loose, recommendation, market_cap, pe, pb, rev_growth, ern_growth, profit_margin, debt2equity
return list(data)```
In this case you could use the dictionary's get-method instead which would return None instead of KeyError in case the dictionary doesn't contain that key, or if default value (2nd arg) is supplied it would return the default value.
my_dict = {
"hello" : "world"
}
try:
hello = my_dict["NONEXISTING"]
except KeyError:
hello = "greetings"
# the try/except block can be replaced with this, and since the key
# doesn't exist, the method returns "greetings" instead of raising a KeyError
hello = my_dict.get("NONEXISTING", "greetings")
You can also use defaultdict from collections to give a default value to any variable that does not have a value.
First of all convert your dictionary to defaultdict
# Python program to demonstrate
# defaultdict
from collections import defaultdict
# Function to return a default
# values for keys that is not
# present
def def_value():
return "Not Present"
# Defining the dict
d = defaultdict(def_value)
d["a"] = 1
d["b"] = 2
print(d["a"])
print(d["b"])
print(d["c"])
Output:
1
2
Not Present
from yahooquery import Ticker
import time
symbols = {
'AAPL': 'B12','BABA': 'B13','MSFT': 'B14',
}
tickers = Ticker(list(symbols.keys()), asynchronous=True)
try:
while True:
prices = tickers.price
for k, v in symbols.items():
try:
a = str(prices[k]['regularMarketPrice'])
print ("currentPrice : "+a)
except Exception as e:print(0)
try:
b = str(prices[k]['marketCap'])
print ("marketCap : "+b)
except Exception as e:print(0)
try:
c = str(prices[k]['payoutRation'])
print ("payoutRation : "+c)
except Exception as e:print(0)
except Exception as e:
print(e)
time.sleep(2)
except Exception as e:
print(e)
Also you can export this data to excel with:
import xlwings as xw
wb = xw.Book('Filename.xlsx')
sht1 = wb.sheets['Sheet1']
for k, v in symbols.items():
try:
sht1.range(v).value = str(prices[k]['regularMarketPrice'])
v1=v.replace("B", "C")
sht1.range(v1).value = str(prices[k]['regularMarketDayHigh'])
v2=v1.replace("C", "D")
sht1.range(v2).value = str(prices[k]['regularMarketDayLow'])
except Exception as e:
print(e)
Related
I don't understand why the replace got unresolved attribute reference 'replace' for class 'int' error. And then when the money reach more than 1000, I got value error, even though I tried to handle it with exception by removing comma from the string :
Traceback (most recent call last):
File "C:\Users\DELL\PycharmProjects\Day48_selenium_cookie_clicker\main.py", line 37, in <module>
money = int(cookies_owned.text)
ValueError: invalid literal for int() with base 10: '1,012'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\DELL\PycharmProjects\Day48_selenium_cookie_clicker\main.py", line 50, in <module>
money = int(cookies_owned.text)
ValueError: invalid literal for int() with base 10: '1,012'
This is my code:
from selenium import webdriver
import time
from timeit import default_timer as timer
chrome_driver_path = "C:\Development\chromedriver_win32\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get("http://orteil.dashnet.org/experiments/cookie/")
timeout = 30 # [seconds]
time_check = time.time() + 5
ids = driver.find_elements_by_xpath('//*[#id]') # find all ids within a webpage
ids_list = [i.get_attribute('id') for i in ids]
items_for_sale_ids = [i for i in ids_list if i[0] == "b" and i[1] == "u" and i[2] == "y"][:8]
items_data = [i.text.split("\n") for i in driver.find_elements_by_id("store")][0]
items_for_sale = [items_data[i].split(" - ") for i in range(len(items_data)) if i % 2 == 0]
price = [int(y[1].replace(",", "")) for y in items_for_sale]
items_pricelist = list(zip(items_for_sale_ids, price))
items_pricelist_dict = {i[1]: i[0] for i in items_pricelist}
start = timer()
cookie = driver.find_element_by_id("cookie")
while True:
cookie.click()
if time.time() > time_check:
# print("hello")
cookies_owned = driver.find_element_by_id("money")
try:
money = int(cookies_owned.text)
affordable_upgrades = {}
for cost, id in items_pricelist_dict.items():
if money > cost:
affordable_upgrades[cost] = id
max_upgrade = max(affordable_upgrades)
print(max_upgrade)
to_purchase_id = affordable_upgrades[max_upgrade]
to_buy = driver.find_element_by_id((to_purchase_id))
to_buy.click()
time_check = time.time() + 5
except ValueError:
money = int(cookies_owned.text)
formatted_money = money.replace(",", "")
affordable_upgrades = {}
for cost, id in items_pricelist_dict.items():
if formatted_money > cost:
affordable_upgrades[cost] = id
max_upgrade = max(affordable_upgrades)
print(max_upgrade)
to_purchase_id = affordable_upgrades[max_upgrade]
to_buy = driver.find_element_by_id((to_purchase_id))
to_buy.click()
time_check = time.time() + 5
see below.
(the problem is the , in value. once we remove it - it works)
value = '1,012'
try:
int_value = int(value)
except ValueError:
print('failed - lets try again..')
int_value = int(value.replace(',',''))
print(int_value)
output
failed - lets try again..
1012
I got value error, even though I tried to handle it with exception by removing comma from the string :
try:
money = int(cookies_owned.text)
...
except ValueError:
money = int(cookies_owned.text)
...
Yes, you handle Exception and after that raise same error again ;)
Correct code is:
try:
money = int(cookies_owned.text)
...
except ValueError:
money = int(cookies_owned.text.replace(",", ""))
...
Or simpler:
try:
money = int(cookies_owned.text.replace(",", ""))
...
except ...
Here is no problem to replace , anytime. You don't need to wait for exception. replace(",", "") will be correct for any number (1, 100, 1,000, 1,000,000, ...)
This won't work: (see comments inserted in the code)
money = int(cookies_owned.text) # money is now an int
formatted_money = money.replace(",", "") # ... but you treat it as a string
In python3 and pandas I use requests to capture information from a public API. That way:
import requests
import pandas as pd
headers = {"Accept" : "application/json"}
#Example link
url = 'http://legis.senado.leg.br/dadosabertos/materia/votacoes/137178'
projetos_vot = []
try:
r = requests.get(url, headers=headers)
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("OOps: Something Else",err)
projects = r.json()
try:
CodigoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['CodigoMateria'])
except KeyError:
CodigoMateria = None
except TypeError:
CodigoMateria = None
try:
SiglaCasaIdentificacaoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['SiglaCasaIdentificacaoMateria'])
except KeyError:
SiglaCasaIdentificacaoMateria = None
except TypeError:
SiglaCasaIdentificacaoMateria = None
try:
NomeCasaIdentificacaoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['NomeCasaIdentificacaoMateria'])
except KeyError:
NomeCasaIdentificacaoMateria = None
except TypeError:
NomeCasaIdentificacaoMateria = None
try:
SiglaSubtipoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['SiglaSubtipoMateria'])
except KeyError:
SiglaSubtipoMateria = None
except TypeError:
SiglaSubtipoMateria = None
try:
DescricaoSubtipoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['DescricaoSubtipoMateria'])
except KeyError:
DescricaoSubtipoMateria = None
except TypeError:
DescricaoSubtipoMateria = None
try:
NumeroMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['NumeroMateria'])
except KeyError:
NumeroMateria = None
except TypeError:
NumeroMateria = None
try:
AnoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['AnoMateria'])
except KeyError:
AnoMateria = None
except TypeError:
AnoMateria = None
try:
DescricaoObjetivoProcesso = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['DescricaoObjetivoProcesso'])
except KeyError:
DescricaoObjetivoProcesso = None
except TypeError:
DescricaoObjetivoProcesso = None
try:
DescricaoIdentificacaoMateria = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['DescricaoIdentificacaoMateria'])
except KeyError:
DescricaoIdentificacaoMateria = None
except TypeError:
DescricaoIdentificacaoMateria = None
try:
IndicadorTramitando = str(projects['VotacaoMateria']['Materia']['IdentificacaoMateria']['IndicadorTramitando'])
except KeyError:
IndicadorTramitando = None
except TypeError:
IndicadorTramitando = None
# This item (Votacoes) does not have a pattern of the same number of items on each link, so I capture everything
try:
Votacoes = str(projects['VotacaoMateria']['Materia']['Votacoes'])
except KeyError:
Votacoes = None
except TypeError:
Votacoes = None
dicionario = {"CodigoMateria": CodigoMateria,
"SiglaCasaIdentificacaoMateria": SiglaCasaIdentificacaoMateria,
"NomeCasaIdentificacaoMateria": NomeCasaIdentificacaoMateria,
"SiglaSubtipoMateria": SiglaSubtipoMateria,
"DescricaoSubtipoMateria": DescricaoSubtipoMateria,
"NumeroMateria": NumeroMateria,
"AnoMateria": AnoMateria,
"DescricaoObjetivoProcesso": DescricaoObjetivoProcesso,
"DescricaoIdentificacaoMateria": DescricaoIdentificacaoMateria,
"IndicadorTramitando": IndicadorTramitando,
"Votacoes": Votacoes
}
projetos_vot.append(dicionario)
df_projetos_vot = pd.DataFrame(projetos_vot)
df_projetos_vot.reset_index()
df_projetos_vot.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1 entries, 0 to 0
Data columns (total 11 columns):
CodigoMateria 1 non-null object
SiglaCasaIdentificacaoMateria 1 non-null object
NomeCasaIdentificacaoMateria 1 non-null object
SiglaSubtipoMateria 1 non-null object
DescricaoSubtipoMateria 1 non-null object
NumeroMateria 1 non-null object
AnoMateria 1 non-null object
DescricaoObjetivoProcesso 1 non-null object
DescricaoIdentificacaoMateria 1 non-null object
IndicadorTramitando 1 non-null object
Votacoes 1 non-null object
dtypes: object(11)
memory usage: 216.0+ bytes
Then item (Votacoes) needs to be parsed. It looks like this:
{'Votacao': [{'CodigoSessaoVotacao': '3768', 'SessaoPlenaria': {'CodigoSessao': '23', 'SiglaCasaSessao': 'SF', 'NomeCasaSessao': 'Senado Federal', 'CodigoSessaoLegislativa': '9', 'SiglaTipoSessao': 'ORD', 'NumeroSessao': '6', 'DataSessao': '1995-02-22', 'HoraInicioSessao': '14:30:00'}, 'Tramitacao': {'IdentificacaoTramitacao': {'CodigoTramitacao': '269445', 'NumeroAutuacao': '1', 'DataTramitacao': '1995-02-22', 'NumeroOrdemTramitacao': '1', 'TextoTramitacao': 'VOTAÇÃO APROVADO O PROJETO. \n ', 'IndicadorRecebimento': 'S', 'OrigemTramitacao': {'Local': {'CodigoLocal': '153', 'TipoLocal': 'A', 'SiglaCasaLocal': 'SF', 'NomeCasaLocal': 'Senado Federal', 'SiglaLocal': 'ATA-PLEN', 'NomeLocal': 'SUBSECRETARIA DE ATA - PLENÁRIO'}}, 'DestinoTramitacao': {'Local': {'CodigoLocal': '143', 'TipoLocal': 'A', 'SiglaCasaLocal': 'SF', 'NomeCasaLocal': 'Senado Federal', 'SiglaLocal': 'MESA', 'NomeLocal': 'MESA DIRETORA'}}}}, 'IndicadorVotacaoSecreta': 'Não', 'DescricaoVotacao': 'Projeto de Decreto Legislativo nº 39 de 1994', 'DescricaoResultado': 'Aprovado', 'Votos': {'VotoParlamentar': [{'IdentificacaoParlamentar': {'CodigoParlamentar': '59', 'NomeParlamentar': 'Marina Silva', 'NomeCompletoParlamentar': 'Maria Osmarina Marina Silva Vaz de Lima', 'SexoParlamentar': 'Feminino', 'FormaTratamento': 'Senadora ', 'UrlFotoParlamentar': 'http://www.senado.leg.br/senadores/img/fotos-oficiais/senador59.jpg', 'UrlPaginaParlamentar': 'http://www25.senado.leg.br/web/senadores/senador/-/perfil/59', 'EmailParlamentar': 'marinasi#senado.leg.br', 'SiglaPartidoParlamentar': 'PT', 'UfParlamentar': 'AC'}, 'SiglaVoto': 'Abstenção'},...
As I said above in the script, item (Votacoes) can have different structures on each link - number of columns or amount of data.
Please is there a more efficient way to parse this kind of information?
Also better to organize it in a dataframe? Or is it better to break into multiple dataframes, each with a unique key from each link?
Edited on 12/20/2019
More details of the item "Votacoes" to try to further explain this question.
It is information about parliamentary votes, with the votes of senators
If you open the link or link or link in a Chrome browser for example you will see more examples of how it is formed
They are very sublevel of information, with various keys and data.
Also the number of keys may vary from link to link
It is different from the items that are in 'IdentificacaoMateria', simpler and without sublevels, so it's easy to think of a dataframe structure.
1 - My question then is if there is a way to read all the keys that exist in "Votacoes" and automate the creation of a dataframe
2 - Or if I have to predict all possible key conditions to capture the information and then do the dataframe
3 - Also, as this is a complex data structure, I want an opinion as to whether the conventional dataframe strategy would really be the best or could use otherwise
For example, the current dataframe generated this file.
I thought I'd use the unique key of each poll, "CodigoMateria", to index the dataframe.
Then a search with the unique key would return the dictionary contained in "Votacoes"
And this dictionary would be used to show information in an application
Edited on 12/21/2019
I followed the directions below that #wowkin2 gave and did so:
import requests
import pandas as pd
import collections
# Function to read all keys
def get_by_key(key, value):
try:
if '.' in key:
old_key, new_key = key.split('.', 1)
new_value = value[old_key]
return get_by_key(new_key, new_value)
else:
return value[key]
except (KeyError, TypeError) as _:
return None
# Function to flatten nested dictionaries
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
headers = {"Accept": "application/json"}
# This is a dataframe with multiple voting links
# This in column "url_votacoes_materia"
df_projetos_det.info()
# Marks the beginning of the iteration in df_projetos_det
conta = 0
for num, row in df_projetos_det.iterrows():
projetos_votos = []
projects = {}
url = row['url_votacoes_materia']
print(url)
try:
r = requests.get(url, headers=headers)
projects = r.json()
except requests.exceptions.RequestException as e:
print("Requests exception: {}".format(e))
dicionario = {
"CodigoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.CodigoMateria', projects),
"SiglaCasaIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.SiglaCasaIdentificacaoMateria', projects),
"NomeCasaIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.NomeCasaIdentificacaoMateria', projects),
"SiglaSubtipoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.SiglaSubtipoMateria', projects),
"DescricaoSubtipoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoSubtipoMateria', projects),
"NumeroMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.NumeroMateria', projects),
"AnoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.AnoMateria', projects),
"DescricaoObjetivoProcesso": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoObjetivoProcesso', projects),
"DescricaoIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoIdentificacaoMateria', projects),
"IndicadorTramitando": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.IndicadorTramitando', projects),
"Votacoes": get_by_key('VotacaoMateria.Materia.Votacoes', projects),
}
projetos_votos.append(dicionario)
if conta == 0:
df_projetos_votos = pd.DataFrame(projetos_votos)
else:
df_projetos_votos_aux = pd.DataFrame(projetos_votos)
df_projetos_votos = df_projetos_votos.append(df_projetos_votos_aux)
conta = conta + 1
df_projetos_votos.info()
# Marks the beginning of the iteration in df_projetos_votos
conta = 0
for num, row in df_projetos_votos.iterrows():
# I capture the unique code of the proposition that was voted or not
CodigoMateria = row['CodigoMateria']
Votacoes = row['Votacoes']
# Tests if the proposition has already had a vote
if Votacoes is not None:
votos = flatten(Votacoes)
df = pd.DataFrame(votos)
# Add column with unique code
df['CodigoMateria'] = CodigoMateria
if conta == 0:
df_procura1 = df
else:
df_procura1 = df_procura1.append(df)
conta = conta + 1
# Created a dataframe with the voting dictionary and its unique proposition codes
df_procura1.info()
If you want to make dict structure flat and use in dataframe - you can use example from similar question about Flatten nested dictionaries. Result will be a dict that can be easily converted. If some fields are missing in few objects - dataframe will contain null values there.
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
>>> flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
{'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
.
Originally (before edit on 12/20/2019),
I thought that, you want manually extract some keys and build structure. So I thought that you can try to define your structure using dots like VotacaoMateria.Materia.IdentificacaoMateria.CodigoMateria to generate your dict for Pandas Dataframe
import requests
import pandas as pd
headers = {"Accept": "application/json"}
# Example link
url = 'http://legis.senado.leg.br/dadosabertos/materia/votacoes/137178'
projetos_vot = []
projects = {}
try:
r = requests.get(url, headers=headers)
projects = r.json()
except requests.exceptions.RequestException as e:
print("Requests exception: {}".format(e))
def get_by_key(key, value):
try:
if '.' in key:
old_key, new_key = key.split('.', 1)
new_value = value[old_key]
return get_by_key(new_key, new_value)
else:
return value[key]
except (KeyError, TypeError) as _:
return None
dicionario = {
"CodigoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.CodigoMateria', projects),
"SiglaCasaIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.SiglaCasaIdentificacaoMateria', projects),
"NomeCasaIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.NomeCasaIdentificacaoMateria', projects),
"SiglaSubtipoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.SiglaSubtipoMateria', projects),
"DescricaoSubtipoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoSubtipoMateria', projects),
"NumeroMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.NumeroMateria', projects),
"AnoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.AnoMateria', projects),
"DescricaoObjetivoProcesso": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoObjetivoProcesso', projects),
"DescricaoIdentificacaoMateria": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.DescricaoIdentificacaoMateria', projects),
"IndicadorTramitando": get_by_key('VotacaoMateria.Materia.IdentificacaoMateria.IndicadorTramitando', projects),
"Votacoes": get_by_key('VotacaoMateria.Materia.Votacoes', projects),
}
projetos_vot.append(dicionario)
df_projetos_vot = pd.DataFrame(projetos_vot)
df_projetos_vot.reset_index()
df_projetos_vot.info()
# <class 'pandas.core.frame.DataFrame'>
# RangeIndex: 1 entries, 0 to 0
# Data columns (total 11 columns):
# AnoMateria 1 non-null object
# CodigoMateria 1 non-null object
# DescricaoIdentificacaoMateria 1 non-null object
# DescricaoObjetivoProcesso 1 non-null object
# DescricaoSubtipoMateria 1 non-null object
# IndicadorTramitando 1 non-null object
# NomeCasaIdentificacaoMateria 1 non-null object
# NumeroMateria 1 non-null object
# SiglaCasaIdentificacaoMateria 1 non-null object
# SiglaSubtipoMateria 1 non-null object
# Votacoes 1 non-null object
# dtypes: object(11)
# memory usage: 160.0+ bytes
#
# Process finished with exit code 0
print(df_projetos_vot.head())
# AnoMateria CodigoMateria DescricaoIdentificacaoMateria DescricaoObjetivoProcesso ... NumeroMateria SiglaCasaIdentificacaoMateria SiglaSubtipoMateria Votacoes
# 0 2019 137178 PEC 91/2019 Revisora ... 00091 SF PEC {u'Votacao': [{u'DescricaoVotacao': u'Proposta...
Code improvement problems:
Since 2 weeks I am after improving the below code and I was able to write the below code but I still have problems and not working as intented.
There are 2 main problems
I do like this ( j_list = str(hide[1]) ) I want to set a declaration
instead I am getting it's value 1 which is not solving my problem.
Index out of range error at context[j_list[i]] = j_list[i]
context = {
'instance': project,
'user': user,
}
hide = [0,1]
for i in range(10):
j_list = "hide" + str(i)
fp_list ="fp_list_" + str(i)
j_list = str(hide[1])
context[j_list[i]] = j_list[i]
messages.add_message(request, messages.INFO, j_list)
messages.add_message(request, messages.INFO, fp_list)
try:
fp_list[i] = FP.objects.filter(id__in=group[i][1])
context[fp_list[i]] = fp_list[i]
j_list[i] = hide[0]
except IndexError:
fp_list[i] == "null"
return render(request, 'projects_detail.html', context)
Old working code but it's too ugly and I am trying to improve myself as per above code:
hide0=1
hide1=1
hide2=1
hide3=1
hide4=1
hide5=1
hide6=1
hide7=1
hide8=1
hide9=1
try:
fp_list_0 = FP.objects.filter(id__in=group[0][1])
hide0 = 0
except IndexError:
fp_list_0 = "null"
try:
fp_list_1 = FP.objects.filter(id__in=group[1][1])
hide1 = 0
except IndexError:
fp_list_1 = "null"
try:
fp_list_2 = FP.objects.filter(id__in=group[2][1])
hide2 = 0
except IndexError:
fp_list_2 = "null"
try:
fp_list_3 = FP.objects.filter(id__in=group[3][1])
hide3 = 0
except IndexError:
fp_list_3 = "null"
try:
fp_list_4 = FP.objects.filter(id__in=group[4][1])
hide4 = 0
except IndexError:
fp_list_4 = "null"
try:
fp_list_5 = FP.objects.filter(id__in=group[5][1])
hide5 = 0
except IndexError:
fp_list_5 = "null"
try:
fp_list_6 = FP.objects.filter(id__in=group[6][1])
hide6 = 0
except IndexError:
fp_list_6 = "null"
try:
fp_list_7 = FP.objects.filter(id__in=group[7][1])
hide7 = 0
except IndexError:
fp_list_7 = "null"
try:
fp_list_8 = FP.objects.filter(id__in=group[8][1])
hide8 = 0
except IndexError:
fp_list_8 = "null"
try:
fp_list_9 = FP.objects.filter(id__in=group[9][1])
hide9 = 0
except IndexError:
fp_list_9 = "null"
context = {
'instance': project,
'user': user,
"fp_list_0": fp_list_0,"fp_list_1": fp_list_1,"fp_list_2": fp_list_2,
"fp_list_3": fp_list_3,"fp_list_4": fp_list_4,"fp_list_5": fp_list_5,
"fp_list_6": fp_list_6,"fp_list_7": fp_list_7,"fp_list_8": fp_list_8,
"fp_list_9": fp_list_9,
"hide0": hide0,"hide1": hide1,"hide2": hide2,"hide3": hide3,"hide4": hide4,
"hide5": hide5, "hide6": hide6, "hide7": hide7, "hide8": hide8, "hide9": hide9,
}
return render(request, 'projects_detail.html', context)
hide_dict = {}
for i in range(0,10):
hide_dict['hide'+str(i)] = 1
#Do the same with your Fp lists
fp_dict = {}
for i in range(0,10):
fp_dict['fp_list_'+str(i)] = ""
for key, value in fp_dict.items():
try:
fp_dict[key] = FP.objects.filter(id__in=group[int(key.replace('fp_list_',''))][1])
hide_dict['hide'+(key.replace('fp_list_','') ] = 0
except IndexError:
fp_dict[key] = "null"
context = {
'instance': project,
'user': user,
'fp_dict':fp_dict,
'hide_dict':hide_dict
}
you better use Dictionaries man hope i helped you
How can I not need to query the database every time?
From the bellow snapshot:
I have five tabs, name as: 云主机,云硬盘,云主机快照,云硬盘快照,安全组:
And in the bottom of the list, there is <<, <, >,>>, and GO buttons that can calculate the page_num.
Then I can use the localhost:8000/app_admin/myServers-1-1-1-1-1 analogous link to query the data.
1-1-1-1-1 represents 云主机,云硬盘,云主机快照,云硬盘快照,安全组's page_num.
In the views.py, there are key codes:
def myServers(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
data = get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid)
return render(request, 'app_admin/my-server.html', {"data":data})
...
def get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
# query all the data, and paginator there
...
return data
But, my issue is, every time I query the localhost:8000/app_admin/myServers-x-x-x-x-x, it will take a long time, sometimes more than 8 seconds(the time can not be shorter), its a long time for user experience.
So, Whether there is a method that I only query my data once, then paginator can be multiple times?
EDIT
this is the get_res_myserver method details:
def get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
page_size = 5
# 取出分页
server_page_num = 1 if server_nid == None else server_nid
disk_page_num = 1 if disk_nid == None else disk_nid
ss_server_page_num = 1 if snapshot_server_nid == None else snapshot_server_nid # server snapshot
ss_block_page_num = 1 if snapshot_block_nid == None else snapshot_block_nid # block snapshot
sg_page_num = 1 if security_group_nid == None else security_group_nid # security_group
tab_nid_num = 1 if tab_nid == None else tab_nid
data = {}
# 云主机
# conn找到虚拟机
op_conn = OpenstackConn.OpenstackConn()
server_op_list = list(op_conn.conn.compute.servers())
import json
server_app_list = app_admin_models.Instance.objects.filter(user=get_user(request))
server_paginator = Paginator(server_op_list, page_size)
try:
server_op_page_list = server_paginator.page(server_page_num)
except PageNotAnInteger:
server_op_page_list = server_paginator.page(1)
server_page_num = 1
except EmptyPage:
server_op_page_list = server_paginator.page(server_paginator.num_pages)
server_page_num = server_paginator.num_pages
server_app_page_list = []
server_data_list = [] # data封装op和app的server. 结构 [{"op_server":op_server_instance, "app_server":app_server_instance}]
for server_op_page in server_op_page_list:
for server_app in server_app_list:
if server_app.id == server_op_page.id:
server_app_page_list.append(server_app)
server_data_list.append({"op_server": server_op_page, "app_server": server_app})
# 云硬盘
# TODO: server_disk (还没有安装)
server_disk_list = [] # list(op_conn.conn.block_store.volumes())
disk_paginator = Paginator(server_disk_list, page_size)
try:
server_disk_page_list = disk_paginator.page(disk_page_num)
except PageNotAnInteger:
server_disk_page_list = disk_paginator.page(1)
disk_page_num = 1
except EmptyPage:
server_disk_page_list = disk_paginator.page(disk_paginator.num_pages)
disk_page_num = disk_paginator.num_pages
# 快照
snapshot_server_generator_ori = op_conn.conn.compute.images()
snapshot_server_list_ori = list(snapshot_server_generator_ori)
import copy
snapshot_server_list_ori_cp = copy.copy(snapshot_server_list_ori)
for snapshot_server in snapshot_server_list_ori_cp:
if "snapshot" not in snapshot_server.name:
snapshot_server_list_ori.remove(snapshot_server)
snapshot_server_filtered_list = snapshot_server_list_ori
snapshot_server_paginator = Paginator(snapshot_server_filtered_list, page_size)
try:
snapshot_server_page_list = snapshot_server_paginator.page(ss_server_page_num)
except PageNotAnInteger:
snapshot_server_page_list = snapshot_server_paginator.page(1)
ss_server_page_num = 1
except EmptyPage:
snapshot_server_page_list = snapshot_server_paginator.page(snapshot_server_paginator.num_pages)
ss_server_page_num = snapshot_server_paginator.num_pages
# TODO: (云主机的块存储快照功能SDK还没有实现)
snapshot_block_list = [] # list(op_conn.conn.block_store.snapshots())
block_paginator = Paginator(snapshot_block_list, page_size)
try:
snapshot_block_page_list = block_paginator.page(disk_page_num) # 块存储
except PageNotAnInteger:
snapshot_block_page_list = block_paginator.page(1)
ss_block_page_num = 1
except EmptyPage:
snapshot_block_page_list = block_paginator.page(block_paginator.num_pages)
ss_block_page_num = block_paginator.num_pages
# 安全组
security_groups_list = list(op_conn.conn.network.security_groups())
security_groups_paginator = Paginator(security_groups_list, page_size)
try:
security_groups_page_list = security_groups_paginator.page(disk_page_num)
except PageNotAnInteger:
security_groups_page_list = security_groups_paginator.page(1)
sg_page_num = 1
except EmptyPage:
security_groups_page_list = security_groups_paginator.page(security_groups_paginator.num_pages)
sg_page_num = security_groups_paginator.num_pages
data['server_data_list'] = server_data_list # VM
data['server_disk_list'] = server_disk_page_list # 云硬盘
data['snapshot_server_list'] = snapshot_server_page_list # VM的快照 (所有VM的快照)
data['snapshot_block_list'] = snapshot_block_page_list # 块存储的快照
data['security_groups_list'] = security_groups_page_list # 安全组
data['settings_data'] = settings.OPENSTACK_USER_NETWORK
data['tab_nid'] = tab_nid_num # 这个是选中的哪个tab
data['server_page_num'] = server_page_num
data['disk_page_num'] = disk_page_num
data['ss_server_page_num'] = ss_server_page_num
data['ss_block_page_num'] = ss_block_page_num
data['sg_page_num'] = sg_page_num # 安全组
print ("myserver_data", data)
return data
EDIT-2
This is my op_conn get, and this is a singleton:
op_conn = OpenstackConn.OpenstackConn()
The max number of records in my input json is 100 however there is a paging-next link that provides the next 100 records. Below is what I have but it returns a dict with only 100 entries- I know there are more- How should I modify this function to get all the records?
def process_comment_json(comment_json):
post_comment_dict = dict()
next_links = list()
if 'comments' in comment_json.keys():
try:
for y in comment_json['comments']['data']:
post_id = comment_json['id']
commentor_name = y['from']['name']
commentor_id = y['from']['id']
created_time = y['created_time']
message = remove_non_ascii(y['message'])
sentiment = return_sentiment_score(message)
post_comment_dict[commentor_id] = {'commentor_name':commentor_name,\
'created_time':created_time, 'message':message,\
'sentiment':sentiment}
except:
print("malformed data, skipping this comment in round1")
if 'next' in comment_json['comments']['paging']:
print('found_next appending')
next_links.append(comment_json['comments']['paging']['next'])
else:
return post_comment_dict
while next_links:
print("processing next_links")
print("current len of post_comment_dict is:", len(post_comment_dict))
for next_link in next_links:
t = requests.get(next_link)
nl_json = t.json()
next_links.pop()
if "data" in list(nl_json.keys()):
for record in nl_json['data']:
try:
for y in comment_json['comments']['data']:
post_id = comment_json['id']
commentor_name = y['from']['name']
commentor_id = y['from']['id']
created_time = y['created_time']
message = remove_non_ascii(y['message'])
sentiment = return_sentiment_score(message)
post_comment_dict[commentor_id] = {'commentor_name':commentor_name,\
'created_time':created_time, 'message':message,\
'sentiment':sentiment}
except:
print("malformed data, skipping this comment from the next_links list")
if 'next' in comment_json['comments']['paging']:
print('found_next appending')
next_links.append(comment_json['comments']['paging']['next'])
else:
return post_comment_dict