I would like to save an object in my database during the Celery task. This object is an export file. The Celery task lets to create it and I would like to store it in my table.
This is my models.py :
class CeleryExportFile(models.Model):
name = models.CharField(max_length=100, verbose_name=_('Name of export file'), default='')
file = models.FileField(upload_to='media/', default='')
creation_date = models.DateTimeField(verbose_name=_('Creation date'), auto_now_add=True)
expiration_date = models.DateTimeField(verbose_name=_('Expiration date'))
def __str__(self):
return self.file
class Meta:
verbose_name = _('Celery Export File')
verbose_name_plural = _('Celery Export Files')
I have a Celery tasks.py file :
def get_xls_export(self, model="", search_info="", query_params=None):
app_label = 'app'
its_fp_or_up_product = False
query_params = query_params or {}
obsolete = False
if query_params.get('obsolete', '') == 'True':
obsolete = True
default_sorting_key = ''
show_date_format = settings.USER_DATE_FORMAT
if model == "finalproduct" or model == "upstreamproduct":
its_fp_or_up_product = True
default_sorting_key = 'manufacturer_name' if model == "finalproduct" else 'releasing_body__short_name'
if model == "releasebodyinstitution":
default_sorting_key = 'name'
model = apps.get_model(app_label=app_label, model_name=model)
# create a workbook in memory
output = io.BytesIO()
book = Workbook(output, {'constant_memory': True})
sheet = book.add_worksheet('Page 1')
# Sheet header, first row
row_num = 0
columns = model.export_field_excel()
rows_width_max = {}
bold_format = book.add_format({'bold': True})
max_col_width = []
for col_num in range(len(columns)):
rows_width_max[col_num] = columns[col_num].__len__()
sheet.write(row_num, col_num, columns[col_num], bold_format)
max_col_width.append(len(columns[col_num]) if len(columns[col_num]) > 10 else 10)
default_sorting = True
sorting_key = ''
# Define search get all object or sorted value.
if search_info != '':
create_filters = search_info.split(';')
if create_filters.__len__() == 1:
if 'sorting' in create_filters[0]:
default_sorting = False
sorting_key = create_filters[0].split('=')[1].replace('~', '-')
search_info = ''
else:
for criter in create_filters:
if 'sorting' in criter:
default_sorting = False
sorting_key = criter.split('=')[1].replace('~', '-')
search_info = search_info.replace(criter, "")
search_info = search_info[:-1]
objects = model.objects.all()
if not its_fp_or_up_product:
if obsolete:
objects = objects.obsolete()
else:
objects = objects.active()
if sorting_key:
objects = objects.order_by(sorting_key, 'pk')
if default_sorting:
objects = objects.order_by(default_sorting_key, 'pk')
if search_info != '':
create_filters = search_info.split(';')
for search_filter in create_filters:
search_filter = search_filter.split('=')
try:
if search_filter[1]:
objects = objects.filter(**{search_filter[0]: search_filter[1]})
except:
# Crud patch search
if search_filter[0] == 'q':
search_info = search_info.replace('q=', '')
objects = objects.filter(get_query(search_info, model.get_xls_values_list()))
rows = objects.values_list(*model.get_xls_values_list())
for row in rows:
row_num += 1
for col_num in range(len(row)):
# Patch True False for boolean field
is_bool = False
if type(row[col_num]) is bool:
is_bool = True
if col_num in model.get_date_field_number():
if row[col_num]:
sheet.write(row_num, col_num, row[col_num].strftime(show_date_format))
else:
if is_bool:
sheet.write(row_num, col_num, 'True' if row[col_num] else 'False')
else:
sheet.write(row_num, col_num, row[col_num])
if len(str(row[col_num])) > max_col_width[col_num]:
max_col_width[col_num] = len(str(row[col_num]))
# AutoFit col
for col_num in range(len(columns)):
sheet.set_column(col_num, col_num, max_col_width[col_num] + 1)
book.close()
output.seek(0)
name = str(name + "_" + str(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%s")) + '.xlsx')
CeleryExportFile.save(name=name, file=output, expiration_date=datetime.datetime.now())
# default_storage.save(name, output)
try:
self.send_email(name=name)
except ConnectionRefusedError as e:
return ['error_message', _('Error for sending email')]
return ['success_message', _('Generation of export file is done')]
I'm getting this issue :
[2019-02-14 12:23:56,151: ERROR/ForkPoolWorker-4] Task app.tasks.get_xls_export[80e7ea2e-4192-4df7-ba05-83a14805225f] raised unexpected: TypeError("save() got an unexpected keyword argument 'name'",)
Traceback (most recent call last):
File "/home/.pyenv/versions/3.6.2/envs/app/lib/python3.6/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/.pyenv/versions/3.6.2/envs/app/lib/python3.6/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/home/Bureau/Projets/app/src/app/tasks.py", line 151, in get_xls_export
CeleryExportFile.save(name='test', file=book, expiration_date=datetime.datetime.now())
TypeError: save() got an unexpected keyword argument 'name'
How I can save my file into my database ?
I have to set name, file, .. as kwargs and create a save() method in my models.py with kwargs.pop ?
Just do it as following:
celery_export_file = CeleryExportFile(name=name, file=output, expiration_date=datetime.datetime.now())
celery_export_file.save()
or you can call create() method like:
CeleryExportFile.objects.create(name=name, file=output, expiration_date=datetime.datetime.now())
Related
i have this endpoint where basically will call the function that will create the excel file
#router.post("/users/export",response_model=schemas.Mesage, status_code=201)
async def get_all_users_from_dashboard(
*,
db: Session = Depends(deps.get_db),
s3: BaseClient = Depends(s3_auth),
background_tasks: BackgroundTasks,
current_user: models.User = Depends(deps.get_current_active_dashboard)
) -> Any:
"""
Dashboard (Update user status all users from dashboard)
"""
background_tasks.add_task(send_report_user, current_user)
response = message("THE REPORT WILL be delivered", 201, {}, {})
return response
and also the function send_report_user is the one who will collect the data from the database and after that with that data will create the excel file
def send_report_user(current_user):
s3: BaseClient = deps.s3_auth()
if crud.user.is_only_superuser(user= current_user):
query = (select(User).where(User.role_id !=4))
elif current_user.department_id == 2:
query = (select(User)).where(and_(User.role_id != 1, User.department_id !=1 ))
else:
query = (select(User)).where(User.department_id == current_user.department_id, User.role_id != 1, User.department_id !=1 )
users = query.order_by(User.last_name)
users_enable = query.filter(User.is_active == True).order_by(User.last_name)
users_disable = query.filter(User.is_active == False).order_by(User.last_name)
save_path = 'C:\miProyecto'
title_columns = ['ID', 'Apellido','Nombre','Departamento','Correo electrónico','Teléfono']
excel_file_name= 'Nomina_AIG_'+ datetime.datetime.now().date().strftime("%d-%m-%Y")+'.xlsx' #nombre del archivo nomina
workbook = xlsxwriter.Workbook(excel_file_name) #creates a workbook
worksheet = workbook.add_worksheet(name='Usuarios activos')
row = 1 #empieza desde la fila 1 columna 0
col = 0
for id, name in enumerate(title_columns):
worksheet.write(0,id, name)
worksheet.set_column(0,id,15)
worksheet = workbook.add_worksheet(name='Usuarios inactivos')
row = 1
col = 0
worksheet.write_row(0, 0, title_columns)
for row_data in users_enable:
data = [row, row_data.id,row_data.last_name,row_data.first_name,row_data.department_id,row_data.email,
row_data.phone]
worksheet.write_row(row, col, data)
row += 1
worksheet = workbook.add_worksheet(name='Usuarios totales')
row = 1
col = 0
worksheet.write_row(0, 0, title_columns)
for row_data in users:
data = [row, row_data.id,row_data.last_name,row_data.first_name,row_data.department_id,row_data.email,
row_data.phone]
worksheet.write_row(row, col, data)
row += 1
workbook.close()
entries = os.listdir('.')
print(entries)
#return create_response(data="hola")
the situation is that isnt working and idk what else i could change
I have the following Graphene implementation:
import graphene
import json
import psycopg2
import re
connection = psycopg2.connect(user='postgres', password='Steppen1!', host='127.0.0.1', port='5432', database='TCDigital')
cursor = connection.cursor()
paths = {}
class PathError(Exception):
def __init__(self, referencing, referenced):
self.message = "entity {} has no relation with entity {}".format(referencing, referenced)
def __str__(self):
return self.message
def get_columns(entity):
columns = {}
cursor.execute("SELECT ordinal_position, column_name FROM information_schema.columns WHERE table_name = '{}'".format(entity))
resultset = cursor.fetchall()
i = 1
for entry in resultset:
columns[entry[1]] = i
i = i + 1
return columns
def get_previous_annotate(name, entity, related_column, id):
columns = get_columns(entity)
related_position = columns[related_column]-1
entity_content = paths[name][entity]
entity_content_filtered = [entry for entry in entity_content if entry['entry'][related_position] == id]
annotate_to_return = sum(list(map(lambda entry: entry['annotate'], entity_content_filtered)))
return annotate_to_return
def calculate_annotate_operation(entity, entry, entity_columns, operation, operands):
operand1 = entity_columns[operands[0]]
operand2 = entity_columns[operands[1]]
if operation == '_sum':
return entry[operand1] + entry[operand2]
elif operation == '_mult':
return entry[operand1] * entry[operand2]
elif operation == '_div':
return entry[operand1] / entry[operand2]
elif operation == '_rest':
return entry[operand1] - entry[operand2]
else:
return None
def get_annotated_value(name, entity, entry, annotate, entity_columns):
if annotate[0] != '_':
column = entity_columns[annotate]
column_value = entity[column['ordinal_position']]
return column_value
elif annotate == '_count':
return 1
else:
operation = annotate.split('(')
if operation[0] in ['_sum', '_mult', '_div', '_rest']:
operands_base = operation[1].split(')')[0]
operands = operands_base.split(',')
return calculate_annotate_operation(operation[0], operands)
else:
raise "Operación no permitida: {}".format(annotate)
def get_annotate(name, entity, entry, entity_columns, previous_entity, related_column, annotate):
annotated_value = None
previous_entity_columns = get_columns(previous_entity)
if previous_entity:
annotated_value = get_previous_annotate(name, previous_entity, related_column, entry[entity_columns['id']-1])
else:
annotated_value = get_annotated_value(name, entity, entry, annotate, entity_columns)
#print({'name': name, 'entity': entity, 'entry': entry, 'annotated_value': annotated_value})
return annotated_value
def populate_entity(name, entity, entity_columns, previous_entity, previous_entity_relationship_column, annotate):
cursor.execute('SELECT * FROM {}'.format(entity))
resultset = cursor.fetchall()
paths[name][entity] = []
for entry in resultset:
if previous_entity:
entry_annotate = get_annotate(name, entity, entry, entity_columns, previous_entity, previous_entity_relationship_column, annotate)
else:
entry_annotate = get_annotate(name, entity, entry, entity_columns, previous_entity, None, annotate)
paths[name][entity].append({'entry': entry, 'entity_columns': entity_columns, 'annotate': entry_annotate, 'previos_entity': previous_entity, 'previous_entity_relationship_column': previous_entity_relationship_column})
def create_path(name, entities, annotate):
paths[name] = {}
previous_entity = None
for entity in reversed(entities):
previous_entity_relationship_column = None
if previous_entity:
previous_entity_relationships = get_foreign_relationships(previous_entity)
previous_entity_relationship = [relationship for relationship in previous_entity_relationships if relationship[5] == entity][0]
previous_entity_relationship_column = previous_entity_relationship[3]
entity_columns = get_columns(entity)
populate_entity(name, entity, entity_columns, previous_entity, previous_entity_relationship_column, annotate)
previous_entity = entity
def get_foreign_relationships(entity):
cursor.execute('''
SELECT
tc.table_schema, tc.constraint_name, tc.table_name, kcu.column_name, ccu.table_schema AS foreign_table_schema, ccu.table_name AS foreign_table_name, ccu.column_name AS foreign_column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name='{}';'''.format(entity))
result = cursor.fetchall()
result_array = []
for record in result:
new_entity = Entity(name=record[5])
result_array.append(new_entity)
return result
def is_relationship(referencing, referenced):
foreign_relationships = get_foreign_relationships(referencing)
if referenced in list(map(lambda relationship: relationship[5], foreign_relationships)):
return True
else:
return False
def traverse(entities, direction):
for i in range(len(entities)):
if i > 0 and i < len(entities)-1:
if not is_relationship(entities[i], entities[i-1]):
raise PathError(entities[i], entities[i-1])
return True
def validate_path(path):
entities = path.split('/')
traverse(entities, 'forward')
return entities
def get_path_step(name, step, key):
content = paths[name][step]
if key is None:
filtered_content = [{'entry': entry['entry'], 'annotate': entry['annotate']} for entry in content]
else:
if content['previous_entity_relationship_column'] is not None:
previous_entity_relationship_column = content['previous_entity_relationship_column']
relationship_column_index = content['entity_columns'][previous_entity_relationship_column]
filtered_content = [{'entry': entry['entry'], 'annotate': entry['annotate']} for entry in content if entry[relationship_column_index] == key]
return filtered_content
class Entity(graphene.ObjectType):
name = graphene.String()
annotate = graphene.Float()
content = graphene.Field(graphene.List(lambda: Entity))
class Query(graphene.ObjectType):
entity_relationships = graphene.List(Entity, entity=graphene.String())
postgresql_version = graphene.String
path = graphene.String(name=graphene.String(), path=graphene.String(), annotate=graphene.String(), current=graphene.String(), key=graphene.Int())
path_step = graphene.String(name=graphene.String(), step=graphene.String(), key=graphene.Int())
#staticmethod
def resolve_path_step(parent, info, name, step, key):
path_step = get_path_step(name, step, key)
print(name)
print(step)
print(key)
print(path_step)
return path_step
#staticmethod
def resolve_path(parent, info, name, path, annotate, current, key):
entities = validate_path(path)
create_path(name, entities, annotate)
content_to_return = get_path_step(name, entities[0], None)
return content_to_return
#staticmethod
def resolve_entity_relationships(parent, info, entity):
result_array = get_foreign_relationships(entity)
return result_array
#staticmethod
def resolve_postgresql_version(parent, info):
cursor.execute("SELECT version();")
record = cursor.fetchone()
return record
def execute_query(query_to_execute):
queries = {
'postgresqlVersion': '''
{
postgresqlVersion
}
''',
'entityRelationships': '''
{
entityRelationships (entity: "inventory_productitem") {
name
}
}
''',
'path': '''
{
path(name: "Ventas", path: "general_state/general_city/inventory_store/operations_sale", annotate: "_count", current: "inventory_product", key: 0)
}
''',
'path_step': '''
{
path_step(name: "Ventas", step: "inventory_store", key: 27)
}
'''
}
schema = graphene.Schema(query=Query)
result = schema.execute(queries[query_to_execute])
dict_result = dict(result.data.items())
print(json.dumps(dict_result, indent=2))
result2 = schema.execute(queries['path_step'])
dict_result2 = dict(result2.data.items())
print(json.dumps(dict_result2, indent=2))
execute_query('path')
Te first call to schema.execute() works with no problem, but the second one doesn't even enter the resolver, and the only error message I get is:
Traceback (most recent call last):
File "query.py", line 249, in <module>
execute_query('path')
File "query.py", line 246, in execute_query
dict_result2 = dict(result2.data.items())
AttributeError: 'NoneType' object has no attribute 'items'
I don't know what I am missing.
I have found that the problem was that I am making a pythonic pascal-cased call to Graphene query: path_step(name: "Ventas", step: "inventory_store", key: 27), but Graphene requieres queries to be called on a camel-cased fashion, even when the name of the resolvers and query variables are pascal-cased in the code.
So the call to the query must by camel-cased like this: pathStep(name: "Ventas", step: "inventory_store", key: 27)
This method to get the product price from the PO, and it works well if the PO have only one record otherwise I am getting this error.
raise ValueError("Expected singleton: %s" % self)
This is the method
#api.multi
def create_refund_invoice(self):
inv_obj = self.env['account.invoice']
for pick in self.filtered(lambda x:x.return_type):
type = 'in_refund' if pick.return_type == 'purchase' else 'out_refund'
inv_lines = {'type':type, 'partner_id':pick.partner_id.id, 'invoice_line_ids':[]}
account = pick.return_type == 'sale' and pick.partner_id.property_account_receivable_id.id or pick.partner_id.property_account_payable_id.id
inv_lines['account_id'] = account
inv_lines['origin'] = pick.name
inv_lines['name'] = pick.origin
for line in pick.move_lines:
name = line.product_id.partner_ref
for rec in self:
rec.order_id = line.env['purchase.order'].search([('name', '=', line.origin)]).order_line
rec.price = rec.order_id.price_unit
inv_lines['invoice_line_ids'] += [(0, None, {
'product_id':line.product_id.id,
'name':name,
'quantity':line.quantity_done,
'price_unit': rec.price,
'account_id':line.product_id.product_tmpl_id.get_product_accounts()['income'].id})]
if inv_lines['invoice_line_ids']:
inv_id = inv_obj.create(inv_lines)
pick.invoice_id = inv_id.id
It is necessary for odoo that when you are getting more than one record then you can not access it's field values directly.
In your code you are trying to get purchase_order_line of purchase_order It may possible that many lines are available in a single order.
def create_refund_invoice(self):
purchase_order_obj = self.env['purchase.order']
inv_obj = self.env['account.invoice']
for pick in self.filtered(lambda x:x.return_type):
type = 'in_refund' if pick.return_type == 'purchase' else 'out_refund'
inv_lines = {'type':type, 'partner_id':pick.partner_id.id, 'invoice_line_ids':[]}
account = pick.return_type == 'sale' and pick.partner_id.property_account_receivable_id.id or pick.partner_id.property_account_payable_id.id
inv_lines['account_id'] = account
inv_lines['origin'] = pick.name
inv_lines['name'] = pick.origin
for line in pick.move_lines:
name = line.product_id.partner_ref
for rec in self:
order_lines = purchase_order_obj.search([('name', '=', line.origin)]).order_line
for pol in order_lines:
price = pol.order_id.price_unit
inv_lines['invoice_line_ids'] += [(0, None, {
'product_id':line.product_id.id,
'name':name,
'quantity':line.quantity_done,
'price_unit': price,
'account_id':line.product_id.product_tmpl_id.get_product_accounts()['income'].id})
]
if inv_lines['invoice_line_ids']:
inv_id = inv_obj.create(inv_lines)
pick.invoice_id = inv_id.id
I have updated code test above code and update it as per your requirement.
I create new record with the method named create() in local database with data pulled away from remote database.As we all know,there are four normal fields in Odoo such as create_date,write_date,create_uid,write_uid.I want these data to be in a remote database,but when I use method named create() to create the record,these data are the data at the time of local creation and not the remote data.
For example,in remote database,the creat_date is '2019-10-11',I can't change the value that is finally written to the local database even if I pass the value of the remote database into the dictionary.Finally,the value of field named create_date is '2019-12-03' rather than '2019-10-11'.The '2019-12-03' is the date now.The situation is similar for other fields such as write_date,create_uid,write_uid.
Please help me,thanks to everyone who thought about this question.
Following is my code.
The Class Model
class ReportRentalIncomeFromProperty(models.Model):
_name = 'report.rental.income.from.property'
_description = 'The report about the income from property rental'
_order = 'product_id, start_date'
# create_date = fields.Datetime('Created on')
create_uid = fields.Char('Created by')
# write_date = fields.Datetime('Last Modified on')
write_uid = fields.Char('Last Contributor')
product_id = fields.Many2one('product.product', 'Property House')
area_id = fields.Many2one('res.country.area', 'City')
district_id = fields.Many2one('res.country.district', 'District')
town_id = fields.Many2one('res.country.town', 'Town')
road_name = fields.Char('Road')
start_date = fields.Date('Start Date')
end_date = fields.Date('End Date')
should_pay = fields.Float('Should Pay')
real_pay = fields.Float('Real Pay')
balance_pay = fields.Float('Balance Pay')
rental_compliance_rate = fields.Float('Rental Compliance Rate(%)', group_operator="avg")
company_id = fields.Many2one('res.company', string='Company')
parent_company_id = fields.Many2one('res.company', related='company_id.parent_id', store=True,
string='Parent Company')
actual_business = fields.Many2many(
'rh.commercial.activities',
'house_rental_rent_income_business_db',
'actual_business_id',
'commercial_activities_id',
string='Actual business')
The function to pull away remote data to create new record in local database.
#api.multi
def synchronization_contract_performance_rate(self):
self.env['report.rental.income.from.property'].search([]).unlink()
product_dict = {}
A_product = self.env['product.product'].search([])
for a in A_product:
product_dict[a.name] = a.id
activities_dict = {}
D_activities = self.env['rh.commercial.activities'].search([])
for d in D_activities:
activities_dict[d.name] = d.id
address_dict = {}
i = 0
address_model_list = ['res.country.area', 'res.country.district', 'res.country.town']
address_field_list = ['area_id', 'district_id', 'town_id']
for addr in address_model_list:
C_address = self.env[addr].search([])
addr_dict = {}
for c in C_address:
addr_dict[c.name] = c.id
address_dict[i] = addr_dict
i += 1
record_list_1 = self.company_recursive_func()
for list_1 in record_list_1:
database = list_1[0]
link_url = list_1[1]
if link_url.startswith('http://'):
_uri = link_url.replace('http://', '')
my_odoo = odoorpc.ODOO(_uri, port=48080)
if link_url.startswith('https://'):
_uri = link_url.replace('https://', '')
my_odoo = odoorpc.ODOO(_uri, port=443, protocol='jsonrpc+ssl')
username = list_1[2]
password = list_1[3]
my_odoo.login(database, username, password)
company_id = list_1[4]
company_code = list_1[5]
product_actual_business_dict = {}
A_product_actual_business_ids = my_odoo.env['product.product'].search([])
A_product_actual_business = my_odoo.execute('product.product', 'read', A_product_actual_business_ids,
['actual_business'])
for a in A_product_actual_business:
name_list = []
for b in my_odoo.execute('rh.commercial.activities', 'read', a.get('actual_business'), ['name']):
name_list.append(b.get('name'))
product_actual_business_dict[a.get('id')] = name_list
remote_ids = my_odoo.env['report.rental.income.from.property'].search([])
remote_data_dict = my_odoo.execute('report.rental.income.from.property', 'read', remote_ids, ['product_id',
'start_date',
'create_date',
'create_uid',
'write_date',
'write_uid',
'end_date',
'should_pay',
'balance_pay',
'real_pay',
'rental_compliance_rate',
'area_id',
'road_name',
'district_id',
'town_id'])
for data in remote_data_dict:
remote_product_name = data.get('product_id')[1]
product_id = product_dict.get(remote_product_name + '(' + company_code + ')',
None)
if product_id:
i = 0
address_id_list = []
for address_field in address_field_list:
if data.get(address_field):
remote_address_name = data.get(address_field)[1]
local_address_id = address_dict[i].get(remote_address_name, None)
address_id_list.append(local_address_id)
else:
address_id_list.append(None)
i += 1
ids_list = []
find_names = product_actual_business_dict.get(data.get('product_id')[0])
for find_name in find_names:
id = activities_dict.get(find_name, None)
if id:
ids_list.append(id)
value = {
'product_id': product_id,
'area_id': address_id_list[0],
'district_id': address_id_list[1],
'town_id': address_id_list[2],
'road_name': data['road_name'],
'start_date': data['start_date'],
'end_date': data['end_date'],
'should_pay': data['should_pay'],
'real_pay': data['real_pay'],
'create_date': data['create_date'],
'create_uid': data['create_uid'][1],
'write_date': data['write_date'],
'write_uid': data['write_uid'][1],
'balance_pay':data['balance_pay'],
'rental_compliance_rate': data['rental_compliance_rate'],
'company_id': company_id,
'actual_business': [(6, 0, ids_list)]
}
self.env['report.rental.income.from.property'].create(value)
my_odoo.logout()
You can change standart odoo fields after you create your record with sql query
property_id = self.env['report.rental.income.from.property'].create(value)
self.env.cr.execute("UPDATE report_rental_income_from_property SET create_date='%s', create_uid=%s, write_date='%s', write_uid=%s WHERE id=%s" %
(value['create_date'], value['create_uid'], value['write_date'], value['write_uid'], property_id))
I keep getting random indentationError using OS X and Pycharm (Tried switching tabs and spaces in pycharm settings no luck). If I run the project under linux it works just fine. The below code attempts to upload the users from a csv, I've attempted to comment and different fields that dont exist in this group of users.
import csv
from django.contrib.auth import get_user_model
User = get_user_model()
members = open('Volunteers.csv', "rU")
data = csv.DictReader(members)
default_password = User.objects.make_random_password()
def generate_username(first_name, last_name):
val = "{0}{1}".format(first_name[0], last_name[0]).lower()
x = 0
while True:
if x == 0 and User.objects.filter(username=val).count() == 0:
return val
else:
new_val = "{0}{1}".format(val, x)
if User.objects.filter(username=new_val).count() == 0:
return new_val
x += 1
if x > 1000000:
raise Exception("Name is super popular!")
for row in data:
email = row['Email']
first_name = row['First Name'],
last_name = row['Last Name'],
username = generate_username(first_name, last_name)
user = User.objects.create_user(username, email, default_password)
user.is_staff = False
user.volunteer = True
user.active = row['Active']
user.first_name = row['First Name']
user.last_name = row['Last Name']
user.organization = row['Organization']
user.interview = row['Interview']
user.house_number_street_name = row['Address']
user.state = row['State']
user.city = row['City']
user.zip_code = row['Zip Code']
user.daytime_phone = row['Daytime Phone']
user.home_phone = row['Home Phone']
user.cell_phone = row['Cell Phone']
user.organization = row['Organization']
user.emergency_contact = row['Emergency Contact']
user.days_available = row['Days Available']
user.food_pantry = row['Food Pantry']
user.interview = row['Interview']
user.bi_lingual = bool(row['Bilingual'])
user.fund_raising = row['Fund Raising']
user.board_member = row['Board Member']
user.sunshine_committee = row['Sunshine Committe']
user.solicit_donations = row['Solicit Donations']
user.record_keeping = row['Record Keeping']
user.truck_or_van = row['Truck or Van']
user.pick_up_food = row['Pick up Food']
user.deliver_food = row['Deliver Food']
user.save()
# user.comments = row['Comments']
# user.drivers_licence = row['Drivers Licence Number']
# user.unemployment = row['Unemployment']
# user.food_stamps = row['Food Stamps']
# user.disability = row['Disability']
# user.salary = row['Salary']
# user.pension = row['Pension']
# user.ss_ssi = row['Social and Supplemental Income']
The logs
IndentationError: unexpected indent
>>> user.record_keeping = row['Record Keeping']
File "<console>", line 1
user.record_keeping = row['Record Keeping']
^
IndentationError: unexpected indent
>>> user.truck_or_v user.truck_or_v ]
File "<console>", line 1
user.truck_or_v user.truck_or_v ]
^
IndentationError: unexpected indent
>>> user.pick_up_food = row['Pick up Food']
File "<console>", line 1
user.pick_up_food = row['Pick up Food']
^
IndentationError: unexpected indent
>>> user.deliver_food = row['Deliver Food']
File "<console>", line 1
user.deliver_food = row['Deliver Food']
^
IndentationError: unexpected indent
>>> user.save()
File "<console>", line 1
user.save()
^
Updated code that will run in reference to comment below asking to comment out lines with errors.
import csv
from django.contrib.auth import get_user_model
User = get_user_model()
members = open('Volunteers.csv', "rU")
data = csv.DictReader(members)
default_password = User.objects.make_random_password()
def generate_username(first_name, last_name):
val = "{0}{1}".format(first_name[0], last_name[0]).lower()
x = 0
while True:
if x == 0 and User.objects.filter(username=val).count() == 0:
return val
else:
new_val = "{0}{1}".format(val, x)
if User.objects.filter(username=new_val).count() == 0:
return new_val
x += 1
if x > 1000000:
raise Exception("Name is super popular!")
for row in data:
email = row['Email']
first_name = row['First Name'],
last_name = row['Last Name'],
username = generate_username(first_name, last_name)
user = User.objects.create_user(username, email, default_password)
user.is_staff = False
user.volunteer = True
user.active = row['Active']
user.first_name = row['First Name']
user.last_name = row['Last Name']
user.organization = row['Organization']
user.interview = row['Interview']
user.house_number_street_name = row['Address']
user.state = row['State']
user.city = row['City']
user.zip_code = row['Zip Code']
user.daytime_phone = row['Daytime Phone']
user.save()
# user.home_phone = row['Home Phone']
# user.cell_phone = row['Cell Phone']
# user.organization = row['Organization']
# user.emergency_contact = row['Emergency Contact']
# user.days_available = row['Days Available']
# user.food_pantry = row['Food Pantry']
# user.interview = row['Interview']
# user.bi_lingual = bool(row['Bilingual'])
# user.fund_raising = row['Fund Raising']
# user.board_member = row['Board Member']
# user.sunshine_committee = row['Sunshine Committe']
# user.solicit_donations = row['Solicit Donations']
# user.record_keeping = row['Record Keeping']
# user.truck_or_van = row['Truck or Van']
# user.pick_up_food = row['Pick up Food']
# user.deliver_food = row['Deliver Food']
# user.save()
# user.comments = row['Comments']
# user.drivers_licence = row['Drivers Licence Number']
# user.unemployment = row['Unemployment']
# user.food_stamps = row['Food Stamps']
# user.disability = row['Disability']
# user.salary = row['Salary']
# user.pension = row['Pension']
# user.ss_ssi = row['Social and Supplemental Income']