django index out of range - python

Code improvement problems:
Since 2 weeks I am after improving the below code and I was able to write the below code but I still have problems and not working as intented.
There are 2 main problems
I do like this ( j_list = str(hide[1]) ) I want to set a declaration
instead I am getting it's value 1 which is not solving my problem.
Index out of range error at context[j_list[i]] = j_list[i]
context = {
'instance': project,
'user': user,
}
hide = [0,1]
for i in range(10):
j_list = "hide" + str(i)
fp_list ="fp_list_" + str(i)
j_list = str(hide[1])
context[j_list[i]] = j_list[i]
messages.add_message(request, messages.INFO, j_list)
messages.add_message(request, messages.INFO, fp_list)
try:
fp_list[i] = FP.objects.filter(id__in=group[i][1])
context[fp_list[i]] = fp_list[i]
j_list[i] = hide[0]
except IndexError:
fp_list[i] == "null"
return render(request, 'projects_detail.html', context)
Old working code but it's too ugly and I am trying to improve myself as per above code:
hide0=1
hide1=1
hide2=1
hide3=1
hide4=1
hide5=1
hide6=1
hide7=1
hide8=1
hide9=1
try:
fp_list_0 = FP.objects.filter(id__in=group[0][1])
hide0 = 0
except IndexError:
fp_list_0 = "null"
try:
fp_list_1 = FP.objects.filter(id__in=group[1][1])
hide1 = 0
except IndexError:
fp_list_1 = "null"
try:
fp_list_2 = FP.objects.filter(id__in=group[2][1])
hide2 = 0
except IndexError:
fp_list_2 = "null"
try:
fp_list_3 = FP.objects.filter(id__in=group[3][1])
hide3 = 0
except IndexError:
fp_list_3 = "null"
try:
fp_list_4 = FP.objects.filter(id__in=group[4][1])
hide4 = 0
except IndexError:
fp_list_4 = "null"
try:
fp_list_5 = FP.objects.filter(id__in=group[5][1])
hide5 = 0
except IndexError:
fp_list_5 = "null"
try:
fp_list_6 = FP.objects.filter(id__in=group[6][1])
hide6 = 0
except IndexError:
fp_list_6 = "null"
try:
fp_list_7 = FP.objects.filter(id__in=group[7][1])
hide7 = 0
except IndexError:
fp_list_7 = "null"
try:
fp_list_8 = FP.objects.filter(id__in=group[8][1])
hide8 = 0
except IndexError:
fp_list_8 = "null"
try:
fp_list_9 = FP.objects.filter(id__in=group[9][1])
hide9 = 0
except IndexError:
fp_list_9 = "null"
context = {
'instance': project,
'user': user,
"fp_list_0": fp_list_0,"fp_list_1": fp_list_1,"fp_list_2": fp_list_2,
"fp_list_3": fp_list_3,"fp_list_4": fp_list_4,"fp_list_5": fp_list_5,
"fp_list_6": fp_list_6,"fp_list_7": fp_list_7,"fp_list_8": fp_list_8,
"fp_list_9": fp_list_9,
"hide0": hide0,"hide1": hide1,"hide2": hide2,"hide3": hide3,"hide4": hide4,
"hide5": hide5, "hide6": hide6, "hide7": hide7, "hide8": hide8, "hide9": hide9,
}
return render(request, 'projects_detail.html', context)

hide_dict = {}
for i in range(0,10):
hide_dict['hide'+str(i)] = 1
#Do the same with your Fp lists
fp_dict = {}
for i in range(0,10):
fp_dict['fp_list_'+str(i)] = ""
for key, value in fp_dict.items():
try:
fp_dict[key] = FP.objects.filter(id__in=group[int(key.replace('fp_list_',''))][1])
hide_dict['hide'+(key.replace('fp_list_','') ] = 0
except IndexError:
fp_dict[key] = "null"
context = {
'instance': project,
'user': user,
'fp_dict':fp_dict,
'hide_dict':hide_dict
}
you better use Dictionaries man hope i helped you

Related

Scraped youtube comments amount and real amount are different

Im new to Python and Im trying to code a commentscraper for youtube with the most important informations, which I put in a JSON-file. But the my amount of comments and replys is not the same as on Youtube. I don't know, where my error is. I recognized, that it doesn't write any data in the files, if there are less than 20 comments, but I don't know, where I have to change something...
Example:
https://youtu.be/Re1m9O7q-9U here I get 102, but it should be 107
https://youtu.be/Q9Y5m1fQ7Fk here I get 423, but it should be 486
https://youtu.be/cMhE5BfmFkM here I get 1315, but it should be 2052
Here is the code:
class YT_Comments:
def __init__(self, api_key):
self.api_key = api_key
self.comment_int = 0
def get_video_comments(self, video_id, limit):
url = f"https://youtube.googleapis.com/youtube/v3/commentThreads?part=replies%2C%20snippet&order=relevance&videoId={video_id}&key={self.api_key}"
vid_comments = []
pc, npt = self._get_comments_per_page(url)
if limit is not None and isinstance(limit, int):
url += f"&maxResults={str(limit)}"
while (npt is not None):
nexturl = url + "&pageToken=" + npt
pc, npt = self._get_comments_per_page(nexturl)
vid_comments.append(pc)
print(self.comment_int)
print(len(vid_comments))
return vid_comments
def _get_comments_per_page(self, url):
json_url = requests.get(url)
data = json.loads(json_url.text)
page_comments = []
if "items" not in data:
return page_comments, None
item_data = data["items"]
nextPageToken = data.get("nextPageToken", None)
for item in tqdm.tqdm(item_data):
try:
kind = item["kind"]
if kind == "youtube#comment" or "youtube#commentThread":
comment_text = item["snippet"]["topLevelComment"]["snippet"]["textOriginal"]
comment_author = item["snippet"]["topLevelComment"]["snippet"]["authorDisplayName"]
author_id = item["snippet"]["topLevelComment"]["snippet"]["authorChannelId"]["value"]
comment_like_count = item["snippet"]["topLevelComment"]["snippet"]["likeCount"]
comment_date = item["snippet"]["topLevelComment"]["snippet"]["publishedAt"]
comment = {"comment_text" : comment_text,
"comment_author" : comment_author,
"comment_author_id" : author_id,
"comment_like_count" : comment_like_count,
"comment_date" : comment_date}
replies_l = []
self.comment_int += 1
try:
replies = item["replies"]["comments"]
for reply in replies:
reply_txt = reply["snippet"]["textOriginal"]
reply_author = reply["snippet"]["authorDisplayName"]
reply_author_id = reply["snippet"]["authorChannelId"]["value"]
reply_like_count = reply["snippet"]["likeCount"]
reply_date = reply["snippet"]["publishedAt"]
reply_dict = {"text" : reply_txt,
"author" : reply_author,
"author_id" : reply_author_id,
"likes" : reply_like_count,
"date" : reply_date}
replies_l.append(reply_dict)
self.comment_int +=1
except KeyError:
replies_l.append(None)
comment_dict = {
"comment": comment,
"replies": replies_l,
}
page_comments.append(comment_dict)
except KeyError:
print("No Comments")
return page_comments, nextPageToken

Simplifying Try, Except for yahooquery

I am trying to get about 10 stock attributes from yahooquery. When some data is not available (e.g. when the company is not making a profit, there is no PE ratio) it raises KeyError. I want to return zero in that case. Is there any way how to simplify my code and not to put Try/Except to every attribute?
def data(ticker): #pulling data about stock from Yahoo Finance API
try:
company_name = Ticker(ticker).quote_type[ticker]["shortName"]
except KeyError:
company_name = 0
try:
stock_price = Ticker(ticker).financial_data[ticker]["currentPrice"]
except KeyError:
stock_price = 0
try:
change = Ticker(ticker).history(interval='1mo', start=(datetime.datetime.today() - datetime.timedelta(days=90)), end=datetime.datetime.today())
change = change["open"]
growth_or_loose = ((change.iloc[-1] / change.iloc[0]) - 1)
except:
growth_or_loose = 0
try:
recommendation = Ticker(ticker).financial_data[ticker]["recommendationKey"]
except KeyError:
recommendation = 0
try:
market_cap = Ticker(ticker).summary_detail[ticker]["marketCap"]
except KeyError:
market_cap = 0
try:
pe = Ticker(ticker).summary_detail[ticker]["trailingPE"]
except KeyError:
pe = 0
try:
pb = Ticker(ticker).key_stats[ticker]["priceToBook"]
except KeyError:
pb = 0
try:
rev_growth = Ticker(ticker).financial_data[ticker]["revenueGrowth"]
except KeyError:
rev_growth = 0
try:
ern_growth = Ticker(ticker).financial_data[ticker]["earningsGrowth"]
except KeyError:
ern_growth = 0
profit_margin = Ticker(ticker).financial_data[ticker]["profitMargins"]
try:
debt2equity = Ticker(ticker).financial_data[ticker]["debtToEquity"]
except KeyError:
debt2equity = 0
data = company_name, stock_price, growth_or_loose, recommendation, market_cap, pe, pb, rev_growth, ern_growth, profit_margin, debt2equity
return list(data)```
In this case you could use the dictionary's get-method instead which would return None instead of KeyError in case the dictionary doesn't contain that key, or if default value (2nd arg) is supplied it would return the default value.
my_dict = {
"hello" : "world"
}
try:
hello = my_dict["NONEXISTING"]
except KeyError:
hello = "greetings"
# the try/except block can be replaced with this, and since the key
# doesn't exist, the method returns "greetings" instead of raising a KeyError
hello = my_dict.get("NONEXISTING", "greetings")
You can also use defaultdict from collections to give a default value to any variable that does not have a value.
First of all convert your dictionary to defaultdict
# Python program to demonstrate
# defaultdict
from collections import defaultdict
# Function to return a default
# values for keys that is not
# present
def def_value():
return "Not Present"
# Defining the dict
d = defaultdict(def_value)
d["a"] = 1
d["b"] = 2
print(d["a"])
print(d["b"])
print(d["c"])
Output:
1
2
Not Present
from yahooquery import Ticker
import time
symbols = {
'AAPL': 'B12','BABA': 'B13','MSFT': 'B14',
}
tickers = Ticker(list(symbols.keys()), asynchronous=True)
try:
while True:
prices = tickers.price
for k, v in symbols.items():
try:
a = str(prices[k]['regularMarketPrice'])
print ("currentPrice : "+a)
except Exception as e:print(0)
try:
b = str(prices[k]['marketCap'])
print ("marketCap : "+b)
except Exception as e:print(0)
try:
c = str(prices[k]['payoutRation'])
print ("payoutRation : "+c)
except Exception as e:print(0)
except Exception as e:
print(e)
time.sleep(2)
except Exception as e:
print(e)
Also you can export this data to excel with:
import xlwings as xw
wb = xw.Book('Filename.xlsx')
sht1 = wb.sheets['Sheet1']
for k, v in symbols.items():
try:
sht1.range(v).value = str(prices[k]['regularMarketPrice'])
v1=v.replace("B", "C")
sht1.range(v1).value = str(prices[k]['regularMarketDayHigh'])
v2=v1.replace("C", "D")
sht1.range(v2).value = str(prices[k]['regularMarketDayLow'])
except Exception as e:
print(e)

Parsing logs to json Python

Folks,
I am trying to parse log file into json format.
I have a lot of logs, there is one of them
How can I parse this?
03:02:03.113 [info] ext_ref = BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}
Into this type of json, or any other format (but I guess json is best one)
{
"time":"03:02:03.113",
"class_req":"info",
"ext_ref":"BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ",
"reqid":"1253166",
"type":"INREQ",
"channel":"BANK24AOS",
"sid":"msid_1591844511335516_KRRNBSLH2FS",
"duration":"703.991",
"req_uri":"marketcredit/order/state",
"login":"77012221122",
"req_type":"cl_req",
"req_headers":{
"accept-encoding":"gzip",
"connection":"close",
"host":"test-mobileapp-api.bank.kz",
"user-agent":"okhttp/4.4.1",
"x-forwarded-for":"212.154.169.134",
"x-real-ip":"212.154.169.134"
},
"req_body":{
"$sid":"msid_1591844511335516_KRRNBSLH2FS",
"$sid":"msid_1591844511335516_KRRNBSLH2FS",
"app":"bank",
"app_version":"2.3.2",
"channel":"aos",
"colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv",
"colvir_commercial_id":"-1",
"colvir_id":"000120.335980",
"openway_commercial_id":"6247520",
"openway_id":"6196360",
"$lang":"ru",
"ekb_id":"923243",
"inn":"990830221722",
"login":"77012221122",
"bank24_id":"262"
},
"resp_body":{
"task_id":"",
"status":"success",
"data":{
"state":"init",
"applications":[
{
"status":"init",
"id":"123db561-34a3-4a8d-9fa7-03ed6377b44f",
"name":"Sulpak",
"amount":101000,
"items":[
{
"name":"Switch CISCO x24",
"price":100000,
"count":1,
"amount":100000
}
]
}
],
"segment":{
"range":{
"min":6,
"max":36,
"step":1
},
"payment_day":{
"max":28,
"min":1
}
}
}
}
}
I am trying to split first whole text, but there I met another problem is to match keys to values depending on '=' sign. Also there might be some keys with empty values. For ex.:
type = INREQ channel = sid = duration = 1.333 (to get to know that there is an empty value, you need to pay attention on number of spaces. Usually there is 1 space between prev.value and next key). So this example should look like this:
{
"type":"INREQ",
"channel":"",
"sid":"",
"duration":"1.333"
}
Thanks ahead!
Here, one thing pass for duplicate key about "$sid":"msid_1591844511335516_KRRNBSLH2FS"
import re
text = """03:02:03.113 [info] ext_ref = reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}"""
index1 = text.index('[')
index2 = text.index(']')
new_text = 'time = '+ text[:index1-1] + ' class_req = ' + text[index1+1:index2] + text[index2+2:]
lst = re.findall(r'\S+? = |\S+? = \{.*?\} |\S+? = \{.*?\}$|\S+? = \S+? ', new_text)
res = {}
for item in lst:
key, equal, value = item.partition('=')
key, value = key.strip(), value.strip()
if value.startswith('{'):
try:
value = json.loads(value)
except:
print(value)
res[key] = value
you can try regulation in python.
here is what i write, it works for your problem.
for convenience i deleted string before "ext_ref...",you can directly truncate the raw string.
import re
import json
string = 'ext_ref = BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}'
position = re.search("req_headers",string) # position of req_headers
resp_body_pos = re.search("resp_body",string)
resp_body = string[resp_body_pos.span()[0]:]
res1 = {}
res1.setdefault(resp_body.split("=")[0],resp_body.split("=")[1])
print(res1)
before = string[:position.span()[0]]
after = string[position.span()[0]:resp_body_pos.span()[0]] # handle req_body seperately
res2 = re.findall("(\S+) = (\S+)",before)
print(res2)
res3 = re.findall("(\S+) = ({.*?})",after)
print(res3)
#res1 type: dict{'resp_body':'...'} content in resp_body
#res2 type: list[(),()..] content before req_head
#res3 type: list[(),()..] the rest content
and now you can do what you want to do with the data(.e.g. transform it into json respectively)
Hope this is helpful

How can I not need to query the database every time?

How can I not need to query the database every time?
From the bellow snapshot:
I have five tabs, name as: 云主机,云硬盘,云主机快照,云硬盘快照,安全组:
And in the bottom of the list, there is <<, <, >,>>, and GO buttons that can calculate the page_num.
Then I can use the localhost:8000/app_admin/myServers-1-1-1-1-1 analogous link to query the data.
1-1-1-1-1 represents 云主机,云硬盘,云主机快照,云硬盘快照,安全组's page_num.
In the views.py, there are key codes:
def myServers(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
data = get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid)
return render(request, 'app_admin/my-server.html', {"data":data})
...
def get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
# query all the data, and paginator there
...
return data
But, my issue is, every time I query the localhost:8000/app_admin/myServers-x-x-x-x-x, it will take a long time, sometimes more than 8 seconds(the time can not be shorter), its a long time for user experience.
So, Whether there is a method that I only query my data once, then paginator can be multiple times?
EDIT
this is the get_res_myserver method details:
def get_res_myserver(request, server_nid,disk_nid,snapshot_server_nid, snapshot_block_nid,security_group_nid, tab_nid):
page_size = 5
# 取出分页
server_page_num = 1 if server_nid == None else server_nid
disk_page_num = 1 if disk_nid == None else disk_nid
ss_server_page_num = 1 if snapshot_server_nid == None else snapshot_server_nid # server snapshot
ss_block_page_num = 1 if snapshot_block_nid == None else snapshot_block_nid # block snapshot
sg_page_num = 1 if security_group_nid == None else security_group_nid # security_group
tab_nid_num = 1 if tab_nid == None else tab_nid
data = {}
# 云主机
# conn找到虚拟机
op_conn = OpenstackConn.OpenstackConn()
server_op_list = list(op_conn.conn.compute.servers())
import json
server_app_list = app_admin_models.Instance.objects.filter(user=get_user(request))
server_paginator = Paginator(server_op_list, page_size)
try:
server_op_page_list = server_paginator.page(server_page_num)
except PageNotAnInteger:
server_op_page_list = server_paginator.page(1)
server_page_num = 1
except EmptyPage:
server_op_page_list = server_paginator.page(server_paginator.num_pages)
server_page_num = server_paginator.num_pages
server_app_page_list = []
server_data_list = [] # data封装op和app的server. 结构 [{"op_server":op_server_instance, "app_server":app_server_instance}]
for server_op_page in server_op_page_list:
for server_app in server_app_list:
if server_app.id == server_op_page.id:
server_app_page_list.append(server_app)
server_data_list.append({"op_server": server_op_page, "app_server": server_app})
# 云硬盘
# TODO: server_disk (还没有安装)
server_disk_list = [] # list(op_conn.conn.block_store.volumes())
disk_paginator = Paginator(server_disk_list, page_size)
try:
server_disk_page_list = disk_paginator.page(disk_page_num)
except PageNotAnInteger:
server_disk_page_list = disk_paginator.page(1)
disk_page_num = 1
except EmptyPage:
server_disk_page_list = disk_paginator.page(disk_paginator.num_pages)
disk_page_num = disk_paginator.num_pages
# 快照
snapshot_server_generator_ori = op_conn.conn.compute.images()
snapshot_server_list_ori = list(snapshot_server_generator_ori)
import copy
snapshot_server_list_ori_cp = copy.copy(snapshot_server_list_ori)
for snapshot_server in snapshot_server_list_ori_cp:
if "snapshot" not in snapshot_server.name:
snapshot_server_list_ori.remove(snapshot_server)
snapshot_server_filtered_list = snapshot_server_list_ori
snapshot_server_paginator = Paginator(snapshot_server_filtered_list, page_size)
try:
snapshot_server_page_list = snapshot_server_paginator.page(ss_server_page_num)
except PageNotAnInteger:
snapshot_server_page_list = snapshot_server_paginator.page(1)
ss_server_page_num = 1
except EmptyPage:
snapshot_server_page_list = snapshot_server_paginator.page(snapshot_server_paginator.num_pages)
ss_server_page_num = snapshot_server_paginator.num_pages
# TODO: (云主机的块存储快照功能SDK还没有实现)
snapshot_block_list = [] # list(op_conn.conn.block_store.snapshots())
block_paginator = Paginator(snapshot_block_list, page_size)
try:
snapshot_block_page_list = block_paginator.page(disk_page_num) # 块存储
except PageNotAnInteger:
snapshot_block_page_list = block_paginator.page(1)
ss_block_page_num = 1
except EmptyPage:
snapshot_block_page_list = block_paginator.page(block_paginator.num_pages)
ss_block_page_num = block_paginator.num_pages
# 安全组
security_groups_list = list(op_conn.conn.network.security_groups())
security_groups_paginator = Paginator(security_groups_list, page_size)
try:
security_groups_page_list = security_groups_paginator.page(disk_page_num)
except PageNotAnInteger:
security_groups_page_list = security_groups_paginator.page(1)
sg_page_num = 1
except EmptyPage:
security_groups_page_list = security_groups_paginator.page(security_groups_paginator.num_pages)
sg_page_num = security_groups_paginator.num_pages
data['server_data_list'] = server_data_list # VM
data['server_disk_list'] = server_disk_page_list # 云硬盘
data['snapshot_server_list'] = snapshot_server_page_list # VM的快照 (所有VM的快照)
data['snapshot_block_list'] = snapshot_block_page_list # 块存储的快照
data['security_groups_list'] = security_groups_page_list # 安全组
data['settings_data'] = settings.OPENSTACK_USER_NETWORK
data['tab_nid'] = tab_nid_num # 这个是选中的哪个tab
data['server_page_num'] = server_page_num
data['disk_page_num'] = disk_page_num
data['ss_server_page_num'] = ss_server_page_num
data['ss_block_page_num'] = ss_block_page_num
data['sg_page_num'] = sg_page_num # 安全组
print ("myserver_data", data)
return data
EDIT-2
This is my op_conn get, and this is a singleton:
op_conn = OpenstackConn.OpenstackConn()

M2m relation breaks when passing filter parameters

I have a m2m relation between properties and images in my model like imageproperty = models.ManyToManyField(Property, blank = True). Im having an issue trying to filter properties with their associated images as whenever i pass a parameter in my query i get something like this and the images are not showing quiet good
. This is my code so far
def filter_properties(request, prop, p):
order = "creation_date"
if p["sort"]: order = p["sort"]
if p["asc_desc"] == "desc": order = '-' + order
results = Property.objects.filter(status = True)
for prop in results:
prop.images = prop.image_set.all()[:1] #Should i need to return in results so it brings values when filtering?
if p["name"] : results = results.filter(name__icontains=p["name"])
if p["price_from"] : results = results.filter(price__gte=int(p["price_from"]))
if p["price_to"] : results = results.filter(price__lte=int(p["price_to"]))
if p["category"]:
lst = p["category"]
or_query = Q(categories = lst[0])
for c in lst[1:]:
or_query = or_query | Q(categories = c)
results = results.filter(or_query).distinct()
return results
def search_properties_view(request):
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
p = request.POST
prop = defaultdict(dict)
parameters = dict.fromkeys(
('name', 'price_from', 'price_to', 'currency_type', 'activity_type', 'sort', 'asc_desc'),
'',
)
parameters["category"] = []
for k, v in p.items():
if k == "category":
parameters[k] = [int(x) for x in p.getlist(k)]
elif k in parameters:
parameters[k] = v
elif k.startswith("name") or k.startswith("curency_type") or k.startswith("activity_type"):
k, pk = k.split('-')
prop[pk][k] = v
elif k.startswith("category"):
pk = k.split('-')[1]
prop[pk]["category"] = p.getlist(k)
if page != 1 and "parameters" in request.session:
parameters = request.session["parameters"]
else:
request.session["parameters"] = parameters
results = filter_properties(request, prop, parameters)
paginator = Paginator(results, 20)
try:
results = paginator.page(page)
except (InvalidPage, EmptyPage):
request = paginator.page(paginator.num_pages)
return render(request, 'propiedades/propiedades.html', {
'propiedades': request.POST,
'media_url': settings.MEDIA_URL,
'results': results,
'params': parameters,
'categories': PropertyCategory.objects.all()
})

Categories