Grapehene doesn't execute query - python
I have the following Graphene implementation:
import graphene
import json
import psycopg2
import re
connection = psycopg2.connect(user='postgres', password='Steppen1!', host='127.0.0.1', port='5432', database='TCDigital')
cursor = connection.cursor()
paths = {}
class PathError(Exception):
def __init__(self, referencing, referenced):
self.message = "entity {} has no relation with entity {}".format(referencing, referenced)
def __str__(self):
return self.message
def get_columns(entity):
columns = {}
cursor.execute("SELECT ordinal_position, column_name FROM information_schema.columns WHERE table_name = '{}'".format(entity))
resultset = cursor.fetchall()
i = 1
for entry in resultset:
columns[entry[1]] = i
i = i + 1
return columns
def get_previous_annotate(name, entity, related_column, id):
columns = get_columns(entity)
related_position = columns[related_column]-1
entity_content = paths[name][entity]
entity_content_filtered = [entry for entry in entity_content if entry['entry'][related_position] == id]
annotate_to_return = sum(list(map(lambda entry: entry['annotate'], entity_content_filtered)))
return annotate_to_return
def calculate_annotate_operation(entity, entry, entity_columns, operation, operands):
operand1 = entity_columns[operands[0]]
operand2 = entity_columns[operands[1]]
if operation == '_sum':
return entry[operand1] + entry[operand2]
elif operation == '_mult':
return entry[operand1] * entry[operand2]
elif operation == '_div':
return entry[operand1] / entry[operand2]
elif operation == '_rest':
return entry[operand1] - entry[operand2]
else:
return None
def get_annotated_value(name, entity, entry, annotate, entity_columns):
if annotate[0] != '_':
column = entity_columns[annotate]
column_value = entity[column['ordinal_position']]
return column_value
elif annotate == '_count':
return 1
else:
operation = annotate.split('(')
if operation[0] in ['_sum', '_mult', '_div', '_rest']:
operands_base = operation[1].split(')')[0]
operands = operands_base.split(',')
return calculate_annotate_operation(operation[0], operands)
else:
raise "OperaciĆ³n no permitida: {}".format(annotate)
def get_annotate(name, entity, entry, entity_columns, previous_entity, related_column, annotate):
annotated_value = None
previous_entity_columns = get_columns(previous_entity)
if previous_entity:
annotated_value = get_previous_annotate(name, previous_entity, related_column, entry[entity_columns['id']-1])
else:
annotated_value = get_annotated_value(name, entity, entry, annotate, entity_columns)
#print({'name': name, 'entity': entity, 'entry': entry, 'annotated_value': annotated_value})
return annotated_value
def populate_entity(name, entity, entity_columns, previous_entity, previous_entity_relationship_column, annotate):
cursor.execute('SELECT * FROM {}'.format(entity))
resultset = cursor.fetchall()
paths[name][entity] = []
for entry in resultset:
if previous_entity:
entry_annotate = get_annotate(name, entity, entry, entity_columns, previous_entity, previous_entity_relationship_column, annotate)
else:
entry_annotate = get_annotate(name, entity, entry, entity_columns, previous_entity, None, annotate)
paths[name][entity].append({'entry': entry, 'entity_columns': entity_columns, 'annotate': entry_annotate, 'previos_entity': previous_entity, 'previous_entity_relationship_column': previous_entity_relationship_column})
def create_path(name, entities, annotate):
paths[name] = {}
previous_entity = None
for entity in reversed(entities):
previous_entity_relationship_column = None
if previous_entity:
previous_entity_relationships = get_foreign_relationships(previous_entity)
previous_entity_relationship = [relationship for relationship in previous_entity_relationships if relationship[5] == entity][0]
previous_entity_relationship_column = previous_entity_relationship[3]
entity_columns = get_columns(entity)
populate_entity(name, entity, entity_columns, previous_entity, previous_entity_relationship_column, annotate)
previous_entity = entity
def get_foreign_relationships(entity):
cursor.execute('''
SELECT
tc.table_schema, tc.constraint_name, tc.table_name, kcu.column_name, ccu.table_schema AS foreign_table_schema, ccu.table_name AS foreign_table_name, ccu.column_name AS foreign_column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name='{}';'''.format(entity))
result = cursor.fetchall()
result_array = []
for record in result:
new_entity = Entity(name=record[5])
result_array.append(new_entity)
return result
def is_relationship(referencing, referenced):
foreign_relationships = get_foreign_relationships(referencing)
if referenced in list(map(lambda relationship: relationship[5], foreign_relationships)):
return True
else:
return False
def traverse(entities, direction):
for i in range(len(entities)):
if i > 0 and i < len(entities)-1:
if not is_relationship(entities[i], entities[i-1]):
raise PathError(entities[i], entities[i-1])
return True
def validate_path(path):
entities = path.split('/')
traverse(entities, 'forward')
return entities
def get_path_step(name, step, key):
content = paths[name][step]
if key is None:
filtered_content = [{'entry': entry['entry'], 'annotate': entry['annotate']} for entry in content]
else:
if content['previous_entity_relationship_column'] is not None:
previous_entity_relationship_column = content['previous_entity_relationship_column']
relationship_column_index = content['entity_columns'][previous_entity_relationship_column]
filtered_content = [{'entry': entry['entry'], 'annotate': entry['annotate']} for entry in content if entry[relationship_column_index] == key]
return filtered_content
class Entity(graphene.ObjectType):
name = graphene.String()
annotate = graphene.Float()
content = graphene.Field(graphene.List(lambda: Entity))
class Query(graphene.ObjectType):
entity_relationships = graphene.List(Entity, entity=graphene.String())
postgresql_version = graphene.String
path = graphene.String(name=graphene.String(), path=graphene.String(), annotate=graphene.String(), current=graphene.String(), key=graphene.Int())
path_step = graphene.String(name=graphene.String(), step=graphene.String(), key=graphene.Int())
#staticmethod
def resolve_path_step(parent, info, name, step, key):
path_step = get_path_step(name, step, key)
print(name)
print(step)
print(key)
print(path_step)
return path_step
#staticmethod
def resolve_path(parent, info, name, path, annotate, current, key):
entities = validate_path(path)
create_path(name, entities, annotate)
content_to_return = get_path_step(name, entities[0], None)
return content_to_return
#staticmethod
def resolve_entity_relationships(parent, info, entity):
result_array = get_foreign_relationships(entity)
return result_array
#staticmethod
def resolve_postgresql_version(parent, info):
cursor.execute("SELECT version();")
record = cursor.fetchone()
return record
def execute_query(query_to_execute):
queries = {
'postgresqlVersion': '''
{
postgresqlVersion
}
''',
'entityRelationships': '''
{
entityRelationships (entity: "inventory_productitem") {
name
}
}
''',
'path': '''
{
path(name: "Ventas", path: "general_state/general_city/inventory_store/operations_sale", annotate: "_count", current: "inventory_product", key: 0)
}
''',
'path_step': '''
{
path_step(name: "Ventas", step: "inventory_store", key: 27)
}
'''
}
schema = graphene.Schema(query=Query)
result = schema.execute(queries[query_to_execute])
dict_result = dict(result.data.items())
print(json.dumps(dict_result, indent=2))
result2 = schema.execute(queries['path_step'])
dict_result2 = dict(result2.data.items())
print(json.dumps(dict_result2, indent=2))
execute_query('path')
Te first call to schema.execute() works with no problem, but the second one doesn't even enter the resolver, and the only error message I get is:
Traceback (most recent call last):
File "query.py", line 249, in <module>
execute_query('path')
File "query.py", line 246, in execute_query
dict_result2 = dict(result2.data.items())
AttributeError: 'NoneType' object has no attribute 'items'
I don't know what I am missing.
I have found that the problem was that I am making a pythonic pascal-cased call to Graphene query: path_step(name: "Ventas", step: "inventory_store", key: 27), but Graphene requieres queries to be called on a camel-cased fashion, even when the name of the resolvers and query variables are pascal-cased in the code.
So the call to the query must by camel-cased like this: pathStep(name: "Ventas", step: "inventory_store", key: 27)
Related
TypeError:string indices must be integers
i am getting following error- "Python: TypeError:string indices must be integers" and I can not see what's wrong. Am I being stupid and overlooking an obvious mistake here? class Order_ListAPIView(APIView): def get(self,request,format=None): totalData=[] if request.method == 'GET': cur,conn = connection() order_query = ''' SELECT * FROM orders''' order_detail_query = ''' SELECT * FROM order_details''' with conn.cursor(MySQLdb.cursors.DictCursor) as cursor: cursor.execute(order_detail_query) order_detail_result = cursor.fetchall() order_detail_data = list(order_detail_result) # print(order_detail_data) cursor.execute(order_query) order_result = cursor.fetchall() order_data = list(order_result) dic = {} for d in order_detail_query: if d['order_id'] not in dic: dic[d['order_id']] = [] dic[d['order_id']].append(d) return order_data.append(dic) totalData.append({"order_data":order_data, "order_detail_data":order_detail_data}) return Response({"totalData":totalData,},status=status.HTTP_200_OK) else: return Response(status=status.HTTP_400_BAD_REQUEST)
lass Order_ListAPIView(APIView): def get(self,request,format=None): totalData=[] if request.method == 'GET': cur,conn = connection() order_query = ''' SELECT * FROM orders''' order_detail_query = ''' SELECT * FROM order_details''' with conn.cursor(MySQLdb.cursors.DictCursor) as cursor: cursor.execute(order_detail_query) order_detail_result = cursor.fetchall() order_detail_data = list(order_detail_result) # print(order_detail_data) cursor.execute(order_query) order_result = cursor.fetchall() order_data = list(order_result) dic = {} for d in order_detail_data: if d['order_id'] not in dic: dic[d['order_id']] = [] dic[d['order_id']].append(d) return order_data.append(dic) totalData.append({"order_data":order_data, "order_detail_data":order_detail_data}) return Response({"totalData":totalData,},status=status.HTTP_200_OK) else: return Response(status=status.HTTP_400_BAD_REQUEST) This should work
Expected singleton error occurs when return more than one record in odoo?
This method to get the product price from the PO, and it works well if the PO have only one record otherwise I am getting this error. raise ValueError("Expected singleton: %s" % self) This is the method #api.multi def create_refund_invoice(self): inv_obj = self.env['account.invoice'] for pick in self.filtered(lambda x:x.return_type): type = 'in_refund' if pick.return_type == 'purchase' else 'out_refund' inv_lines = {'type':type, 'partner_id':pick.partner_id.id, 'invoice_line_ids':[]} account = pick.return_type == 'sale' and pick.partner_id.property_account_receivable_id.id or pick.partner_id.property_account_payable_id.id inv_lines['account_id'] = account inv_lines['origin'] = pick.name inv_lines['name'] = pick.origin for line in pick.move_lines: name = line.product_id.partner_ref for rec in self: rec.order_id = line.env['purchase.order'].search([('name', '=', line.origin)]).order_line rec.price = rec.order_id.price_unit inv_lines['invoice_line_ids'] += [(0, None, { 'product_id':line.product_id.id, 'name':name, 'quantity':line.quantity_done, 'price_unit': rec.price, 'account_id':line.product_id.product_tmpl_id.get_product_accounts()['income'].id})] if inv_lines['invoice_line_ids']: inv_id = inv_obj.create(inv_lines) pick.invoice_id = inv_id.id
It is necessary for odoo that when you are getting more than one record then you can not access it's field values directly. In your code you are trying to get purchase_order_line of purchase_order It may possible that many lines are available in a single order. def create_refund_invoice(self): purchase_order_obj = self.env['purchase.order'] inv_obj = self.env['account.invoice'] for pick in self.filtered(lambda x:x.return_type): type = 'in_refund' if pick.return_type == 'purchase' else 'out_refund' inv_lines = {'type':type, 'partner_id':pick.partner_id.id, 'invoice_line_ids':[]} account = pick.return_type == 'sale' and pick.partner_id.property_account_receivable_id.id or pick.partner_id.property_account_payable_id.id inv_lines['account_id'] = account inv_lines['origin'] = pick.name inv_lines['name'] = pick.origin for line in pick.move_lines: name = line.product_id.partner_ref for rec in self: order_lines = purchase_order_obj.search([('name', '=', line.origin)]).order_line for pol in order_lines: price = pol.order_id.price_unit inv_lines['invoice_line_ids'] += [(0, None, { 'product_id':line.product_id.id, 'name':name, 'quantity':line.quantity_done, 'price_unit': price, 'account_id':line.product_id.product_tmpl_id.get_product_accounts()['income'].id}) ] if inv_lines['invoice_line_ids']: inv_id = inv_obj.create(inv_lines) pick.invoice_id = inv_id.id I have updated code test above code and update it as per your requirement.
Save an object during a Celery task
I would like to save an object in my database during the Celery task. This object is an export file. The Celery task lets to create it and I would like to store it in my table. This is my models.py : class CeleryExportFile(models.Model): name = models.CharField(max_length=100, verbose_name=_('Name of export file'), default='') file = models.FileField(upload_to='media/', default='') creation_date = models.DateTimeField(verbose_name=_('Creation date'), auto_now_add=True) expiration_date = models.DateTimeField(verbose_name=_('Expiration date')) def __str__(self): return self.file class Meta: verbose_name = _('Celery Export File') verbose_name_plural = _('Celery Export Files') I have a Celery tasks.py file : def get_xls_export(self, model="", search_info="", query_params=None): app_label = 'app' its_fp_or_up_product = False query_params = query_params or {} obsolete = False if query_params.get('obsolete', '') == 'True': obsolete = True default_sorting_key = '' show_date_format = settings.USER_DATE_FORMAT if model == "finalproduct" or model == "upstreamproduct": its_fp_or_up_product = True default_sorting_key = 'manufacturer_name' if model == "finalproduct" else 'releasing_body__short_name' if model == "releasebodyinstitution": default_sorting_key = 'name' model = apps.get_model(app_label=app_label, model_name=model) # create a workbook in memory output = io.BytesIO() book = Workbook(output, {'constant_memory': True}) sheet = book.add_worksheet('Page 1') # Sheet header, first row row_num = 0 columns = model.export_field_excel() rows_width_max = {} bold_format = book.add_format({'bold': True}) max_col_width = [] for col_num in range(len(columns)): rows_width_max[col_num] = columns[col_num].__len__() sheet.write(row_num, col_num, columns[col_num], bold_format) max_col_width.append(len(columns[col_num]) if len(columns[col_num]) > 10 else 10) default_sorting = True sorting_key = '' # Define search get all object or sorted value. if search_info != '': create_filters = search_info.split(';') if create_filters.__len__() == 1: if 'sorting' in create_filters[0]: default_sorting = False sorting_key = create_filters[0].split('=')[1].replace('~', '-') search_info = '' else: for criter in create_filters: if 'sorting' in criter: default_sorting = False sorting_key = criter.split('=')[1].replace('~', '-') search_info = search_info.replace(criter, "") search_info = search_info[:-1] objects = model.objects.all() if not its_fp_or_up_product: if obsolete: objects = objects.obsolete() else: objects = objects.active() if sorting_key: objects = objects.order_by(sorting_key, 'pk') if default_sorting: objects = objects.order_by(default_sorting_key, 'pk') if search_info != '': create_filters = search_info.split(';') for search_filter in create_filters: search_filter = search_filter.split('=') try: if search_filter[1]: objects = objects.filter(**{search_filter[0]: search_filter[1]}) except: # Crud patch search if search_filter[0] == 'q': search_info = search_info.replace('q=', '') objects = objects.filter(get_query(search_info, model.get_xls_values_list())) rows = objects.values_list(*model.get_xls_values_list()) for row in rows: row_num += 1 for col_num in range(len(row)): # Patch True False for boolean field is_bool = False if type(row[col_num]) is bool: is_bool = True if col_num in model.get_date_field_number(): if row[col_num]: sheet.write(row_num, col_num, row[col_num].strftime(show_date_format)) else: if is_bool: sheet.write(row_num, col_num, 'True' if row[col_num] else 'False') else: sheet.write(row_num, col_num, row[col_num]) if len(str(row[col_num])) > max_col_width[col_num]: max_col_width[col_num] = len(str(row[col_num])) # AutoFit col for col_num in range(len(columns)): sheet.set_column(col_num, col_num, max_col_width[col_num] + 1) book.close() output.seek(0) name = str(name + "_" + str(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%s")) + '.xlsx') CeleryExportFile.save(name=name, file=output, expiration_date=datetime.datetime.now()) # default_storage.save(name, output) try: self.send_email(name=name) except ConnectionRefusedError as e: return ['error_message', _('Error for sending email')] return ['success_message', _('Generation of export file is done')] I'm getting this issue : [2019-02-14 12:23:56,151: ERROR/ForkPoolWorker-4] Task app.tasks.get_xls_export[80e7ea2e-4192-4df7-ba05-83a14805225f] raised unexpected: TypeError("save() got an unexpected keyword argument 'name'",) Traceback (most recent call last): File "/home/.pyenv/versions/3.6.2/envs/app/lib/python3.6/site-packages/celery/app/trace.py", line 382, in trace_task R = retval = fun(*args, **kwargs) File "/home/.pyenv/versions/3.6.2/envs/app/lib/python3.6/site-packages/celery/app/trace.py", line 641, in __protected_call__ return self.run(*args, **kwargs) File "/home/Bureau/Projets/app/src/app/tasks.py", line 151, in get_xls_export CeleryExportFile.save(name='test', file=book, expiration_date=datetime.datetime.now()) TypeError: save() got an unexpected keyword argument 'name' How I can save my file into my database ? I have to set name, file, .. as kwargs and create a save() method in my models.py with kwargs.pop ?
Just do it as following: celery_export_file = CeleryExportFile(name=name, file=output, expiration_date=datetime.datetime.now()) celery_export_file.save() or you can call create() method like: CeleryExportFile.objects.create(name=name, file=output, expiration_date=datetime.datetime.now())
Python syntax error in class definition [closed]
Closed. This question needs debugging details. It is not currently accepting answers. Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question. Closed 6 years ago. Improve this question I am new to Python I got an error at class def line. I am not sure what mistake I have done. Please check it and let me know. class contactservices(): def GetDirectorySearchList(userId:int, searchKey,result:ContactResultSet): ret = RETURN_CODE.RECORD_NOT_EXISTS cursor = connections['default'].cursor() rows ="" invalid syntax (contactservices.py, line 15) Thanks in advance. Full Code: from django.db import connections from api.enums import RETURN_CODE from api.user.contactmodel import ContactModel from api.user.contactmodel import DirectoryModel from api.user.resultset import ContactResultSet from api.datalayer.dbservices import dbservices class contactservices(): """Get Directory Search of a specific user""" def GetDirectorySearchList(userId:int, searchKey, result:ContactResultSet): ret = RETURN_CODE.RECORD_NOT_EXISTS cursor = connections['default'].cursor() rows ="" try: #user triple quote for multiline string. msqlquery = """SELECT a.id, username, first_name, last_name, firm,email,extension, extpassword,start_date,expiry_date,status,presence_status ,aliasname,picturepath,statusupdatedate FROM ocktopi_login a where (first_name LIKE '%%""" + str(searchKey) + "%%' OR last_name LIKE '%%" + str(searchKey) + "%%' OR aliasname LIKE '%%" + str(searchKey) + "%%') AND id NOT IN (select contact from usercontactmapping where user = """ + str(userId) + ") and id <> " + str(userId) + ""; #cursor.execute(msqlquery) rows = dbservices.query_to_dicts(msqlquery) ret = RETURN_CODE.RECORD_EXISTS except Exception as e: ret = RETURN_CODE.RECORD_ERROR #We dont have a way to map with column name . So only solution is column index.Changed to dictory using a method now. directorylist = list() for row in rows: directory = DirectoryModel() directory.Id = row['id'] directory.Username = row['username'] directory.FirstName = row['first_name'] directory.LastName = row['last_name'] directory.Firm = row['firm'] directory.Email = row['email'] directory.Extension = row['extension'] directory.Status = row['status'] directory.PresenceStatus = row['presence_status'] directory.AliasName = row['aliasname'] directory.Picturepath = row['picturepath'] directorylist.append(directory) result.ReturnCode = int(ret) return directorylist """Get Contact Details of a specific user""" def GetContactList(userId:int, result:ContactResultSet): ret = RETURN_CODE.RECORD_NOT_EXISTS cursor = connections['default'].cursor() rows ="" try: #user triple quote for multiline string. msqlquery = """SELECT a.id, username, first_name, last_name, firm,email,extension, extpassword,start_date,expiry_date,status,presence_status ,aliasname,picturepath,statusupdatedate FROM ocktopi_login a inner join usercontactmapping b on a.id=b.contact and a.id <> """ + str(userId) + " and b.user= " + str(userId) + ""; rows = dbservices.query_to_dicts(msqlquery) ret = RETURN_CODE.RECORD_EXISTS except Exception as e: ret = RETURN_CODE.RECORD_ERROR #We dont have a way to map with column name . So only solution is column index.Changed to dictory using a method now. contactlist = list() for row in rows: contact = ContactModel() contact.Id = row['id'] contact.Username = row['username'] contact.FirstName = row['first_name'] contact.LastName = row['last_name'] contact.Firm = row['firm'] contact.Email = row['email'] contact.Extension = row['extension'] contact.Status = row['status'] contact.PresenceStatus = row['presence_status'] contact.AliasName = row['aliasname'] contact.Picturepath = row['picturepath'] contactlist.append(contact) result.ReturnCode = int(ret) return contactlist """Add user contact""" def AddUserContact(userId:int, contactId:int): ret = 0 cursor = connections['default'].cursor() rows ="" try: msqlquery = """insert into usercontactmapping (user,contact) values(%s, %s)"""; cursor.execute(msqlquery,(userId, contactId)) ret = cursor.rowcount return ret except Exception as e: ret = -1 finally: cursor.close() return ret """Remove user contact""" def RemoveUserContact(userId:int, contactId:int): ret = 0 cursor = connections['default'].cursor() rows ="" try: msqlquery = """delete from usercontactmapping where user=%s and contact=%s"""; cursor.execute(msqlquery,(userId, contactId)) ret = cursor.rowcount return ret except Exception as e: ret = -1 finally: cursor.close() return ret
Try: def GetDirectorySearchList(userId, searchKey,result): ret = RETURN_CODE.RECORD_NOT_EXISTS cursor = connections['default'].cursor() rows = "" Annotations such as userId:int don't work in Python 2.7. Also make sure the code is indented properly.
M2m relation breaks when passing filter parameters
I have a m2m relation between properties and images in my model like imageproperty = models.ManyToManyField(Property, blank = True). Im having an issue trying to filter properties with their associated images as whenever i pass a parameter in my query i get something like this and the images are not showing quiet good . This is my code so far def filter_properties(request, prop, p): order = "creation_date" if p["sort"]: order = p["sort"] if p["asc_desc"] == "desc": order = '-' + order results = Property.objects.filter(status = True) for prop in results: prop.images = prop.image_set.all()[:1] #Should i need to return in results so it brings values when filtering? if p["name"] : results = results.filter(name__icontains=p["name"]) if p["price_from"] : results = results.filter(price__gte=int(p["price_from"])) if p["price_to"] : results = results.filter(price__lte=int(p["price_to"])) if p["category"]: lst = p["category"] or_query = Q(categories = lst[0]) for c in lst[1:]: or_query = or_query | Q(categories = c) results = results.filter(or_query).distinct() return results def search_properties_view(request): try: page = int(request.GET.get("page", '1')) except ValueError: page = 1 p = request.POST prop = defaultdict(dict) parameters = dict.fromkeys( ('name', 'price_from', 'price_to', 'currency_type', 'activity_type', 'sort', 'asc_desc'), '', ) parameters["category"] = [] for k, v in p.items(): if k == "category": parameters[k] = [int(x) for x in p.getlist(k)] elif k in parameters: parameters[k] = v elif k.startswith("name") or k.startswith("curency_type") or k.startswith("activity_type"): k, pk = k.split('-') prop[pk][k] = v elif k.startswith("category"): pk = k.split('-')[1] prop[pk]["category"] = p.getlist(k) if page != 1 and "parameters" in request.session: parameters = request.session["parameters"] else: request.session["parameters"] = parameters results = filter_properties(request, prop, parameters) paginator = Paginator(results, 20) try: results = paginator.page(page) except (InvalidPage, EmptyPage): request = paginator.page(paginator.num_pages) return render(request, 'propiedades/propiedades.html', { 'propiedades': request.POST, 'media_url': settings.MEDIA_URL, 'results': results, 'params': parameters, 'categories': PropertyCategory.objects.all() })