pagginate extjs grid with django - python

I try to use extjs with django, i started extjs with php. for create a paginate grid i used to get total count of the data and get the start and limit value. In django, the pagination does not work. what am i forgot? is it my query? i use postgresql. this is my code. i
if request.POST['task'] == 'OK':
pers = Plante.objects.all().values('id','name','year')
nbrows = len(pers)
if request.POST['start']:
start = request.POST['start']
else:
start = request.GET['start']
if request.POST['limit']:
end = request.POST['limit']
else:
end = request.GET['limit']
pers = Plante.objects.all().values('id','name','year')[start:end]

start = int(request.POST.get('start') or request.GET.get('start'))
limit = int(request.POST.get('limit') or request.GET.get('limit'))
pers = Plante.objects.all().values('id','name','year')[start:start+limit]

I know it's quite late but here's a way how you can achieve it using the "start" & "limit" pagination params sent by EXTJS.
def fetchRecords(self, params):
totalCount = 0
pageNumber = 1
records = []
ids = []
#Instanciate your query object
query = Q()
#Not really relevant for this case but in case you have any filter criteria params then put them here
if(params.get("searchStartDate")):
startDate = datetime.strptime(params.get("searchStartDate"), '%Y-%m-%d').date()
query &= Q(date_created__gte=startDate)
if(params.get("searchEndDate")):
endDate = datetime.strptime(params.get("searchEndDate"), '%Y-%m-%d').date()
query &= Q(date_created__lte=endDate)
# Get the total count, EXT JS Grids need the total count value to be able to paginate
totalCount = YourModel.objects.filter(query).count()
#Get the primary keys, we do this because we don't want to get all the objects onto memory. The paginator doesn't
#Optimize the fetched data. If your table has millions of records and you load all the record objects to mem, the
#execution might be quite slow
your_model_ids_list = YourModel.objects.filter(query).order_by("-id").only('id')
#Compute the page number based on the pagination "start" & "limit" params sent by EXT grid
if(int(params.get("start")) != 0 ):
pageNumber = (int(params.get("start")) / int(params.get("limit"))) + 1
#Instanciate the paginator object with the unique id's list matching your filter criteria & limit
paginator = Paginator(your_model_ids_list, int(params.get("limit")))
#Get the records that fall on the particular page number that we computed above
recordIds = paginator.page(pageNumber)
#Iterate through the record IDs and place them in an array list
for recordId in recordIds.object_list:
ids.append(recordId.id)
#Now fetch the records from your model based on the unique ids that fall on the particular page fetched
#above
result = YourModel.objects.filter(Q(pk__in=ids)).order_by("-id")
#Formulate your response object and return the data
return {'totalCount': totalCount, 'records': result}

Related

Shopify Python - Set inventory quantity

I'm trying to set inventory quantity of a product in shopify using the Shopify Python Api.
As i understand it, i need to set the 'inventory_level' of the 'inventory_item' that belongs to the product, but after a few days of searching and testing i still have no luck.
Where i'm at
I have my products showing up in my store with all the data but the inventory quantity.
I'm not sure how to proceed as there's not a whole lot of documentation.
Here's my code for creating a product
def CreateShopifyProduct(data):
# CREATE PRODUCT
product = shopify.Product()
# Add stuff to product, variant and inventoryItem here
product.title = data['title']
#product.status = ""
#product.tags = data['tags']
product.body_html = data['description']
if 'catagory' in data:
product.product_type = data['category']
if 'vendor' in data:
product.vendor = data['vendor']
if 'image_url' in data:
image_path = data['image_url']
image = shopify.Image()
image.src = image_path
product.images = [image]
else:
try:
image = GetLocalImageFiles(data['id'])
product.images = [image]
except:
print("No local images found")
success = product.save() #returns false if the record is invalid
# CREATE VARIANT
variant = shopify.Variant()
if 'ean' in data:
variant.barcode = data['ean']
variant.price = data['gross_price']
variant.weight = data['weight']
#variant.count = data['inventory']
variant.inventory_management = 'shopify'
product.variants = [variant]
variant.product_id = product.id
s = variant.save()
success = product.save() #returns false if the record is invalid
# CREATE INVENTORYITEM
inventoryItem = shopify.InventoryItem()
#inventoryItem = variant.inventory_item
inventoryItem.tracked = True
inventoryItem.id = product.id
variant.inventory_quantity = data['inventory']
inventoryItem.inventory_quantity = data['inventory']
variant.inventory_item = inventoryItem
s = variant.save()
success = product.save()
#ii = inventoryItem.save() # this returns 406
#inv_level = shopify.InventoryLevel.find(inventory_item_ids=6792364982390, location_ids=61763518582)
#quantity = inv_level[0].__dict__['attributes']['available']
#shopify.InventoryLevel.set(location_id=61763518582, inventory_item_id=variant.inventory_item.id, available=data['inventory'])
#shopify.InventoryLevel.connect(61763518582, variant.inventory_item.id)
if product.errors:
#something went wrong, see new_product.errors.full_messages() for example
print("error")
print(product.errors.full_messages())
If i try to set the InventoryLevel with
shopify.InventoryLevel.set(61527654518, inventoryItem.id, 42)
# or
shopify.InventoryLevel.set(location_id=61527654518, inventory_item_id=inventoryItem.id, available=17)
I recieve a
pyactiveresource.connection.ResourceNotFound: Not Found: https://domain.myshopify.com/admin/api/2022-07/inventory_levels/set.json
You need three things to update an inventory level. One, you need a valid location ID. Two, you need the inventory item ID. Finally, you need the amount to adjust inventory to, that will adjust the inventory there to match your needs.
You should really play at the command-line and ensure you can quickly get the information you need, then try your updates. In other words, ensure you are getting a good location ID, inventory item ID and finally, that you know the amount of inventory already in Shopify. Since you have to calculate a delta change, these are the minimum steps most people take.
Note that once you get good at doing one item, you'll realize Shopify also accepts up to 100 at a time, making your updates a lot faster.

Python - Using a While Loop, how can I update a variable string to be used in a function

Background: I'm attempting to create a dataframe using data called from Twitch's API. They only allow 100 records per call so with each pull a new Pagination Cursor is offered in order to move on to the next page. I'm using the following code to try and efficiently pull this data rather than manually adjusting the after=(pagination value) in the get response. Right now the variable I'm trying to make dynamic is the 'Pagination' variable but it only gets updated once the loop finishes - not helpful! Take a look below and see if you notice anything I can change to achieve this goal. Any help is appreciated!
TwitchTopGamesDataFrame = [] #This is our Data List
BaseURL = 'https://api.twitch.tv/helix/games/top?first=100'
Headers = {'client-id':'lqctse0orgdbs5gdf5faz665api03r','Authorization': 'Bearer a1yl09mwmnwetp6ovocilheias8pzt'}
Indent = 2
Pagination = ''
FullURL = BaseURL + Pagination
Response = requests.get(FullURL,headers=Headers)
iterations = 1 # Data records returned are equivalent to iterations x100
#Loop: Response, Convert JSON data, Append to Data List, Get Pagination & Replace String in Variable - Iterate until 300 records
while count <= 3:
#Grab JSON Data, Convert, & Append
ResponseJSONData = Response.json()
#print(pgn) - Debug
pd.set_option('display.max_rows', None)
TopGamesDF = pd.DataFrame(ResponseJSONData['data'])
TopGamesDF = TopGamesDF[['id','name']]
TopGamesDF = TopGamesDF.rename(columns={'id':'GameID','name':'GameName'})
TopGamesDF['Rank'] = TopGamesDF.index + 1
TwitchTopGamesDataFrame.append(TopGamesDF)
#print(FullURL) - Debug
#Grab & Replace Pagination Value
ResponseJSONData['pagination']
RPagination = pd.DataFrame(ResponseJSONData['pagination'],index=[0])
pgn = str('&after='+RPagination.to_string(index=False,header=False).strip())
Pagination = pgn
#print(FullURL) - Debug
iterations += 1
TwitchTopGamesDataFrame```
Figured it out:
def top_games(page_count):
from time import gmtime, strftime
strftime("%Y-%m-%d %H:%M:%S", gmtime())
print("Time of Execution:", strftime("%Y-%m-%d %H:%M:%S", gmtime()))
#In order to condense the code above and be more efficient, a while/for loop would work great.
#Goal: Run a While Loop to create a larger DataFrame through Pagination as the Twitch API only allows for 100 records per call.
baseURL = 'https://api.twitch.tv/helix/games/top?first=100' #Base URL
Headers = {'client-id':'lqctse0orgdbs5gdf5faz665api03r','Authorization': 'Bearer a1yl09mwmnwetp6ovocilheias8pzt'}
Indent = 2
Pagination = ''
FullURL = BaseURL + Pagination
Response = requests.get(FullURL,headers=Headers)
start_count = 0
count = 0 # Data records returned are equivalent to iterations x100
max_count = page_count
#Loop: Response, Convert JSON data, Append to Data List, Get Pagination & Replace String in Variable - Iterate until 300 records
while count <= max_count:
#Grab JSON Data, Extend List
Pagination
FullURL = baseURL + Pagination
Response = requests.get(FullURL,headers=Headers)
ResponseJSONData = Response.json()
pd.set_option('display.max_rows', None)
if count == start_count:
TopGamesDFL = ResponseJSONData['data']
if count > start_count:
i = ResponseJSONData['data']
TopGamesDFL.extend(i)
#Grab & Replace Pagination Value
ResponseJSONData['pagination']
RPagination = pd.DataFrame(ResponseJSONData['pagination'],index=[0])
pgn = str('&after='+RPagination.to_string(index=False,header=False).strip())
Pagination = pgn
count += 1
if count == max_count:
FinalDataFrame = pd.DataFrame(TopGamesDFL)
FinalDataFrame = FinalDataFrame[['id','name']]
FinalDataFrame = FinalDataFrame.rename(columns={'id':'GameID','name':'GameName'})
FinalDataFrame['Rank'] = FinalDataFrame.index + 1
return FinalDataFrame

How to Handle When Request Returns None

I have a list of IDs which corresponds to a set of records (opportunities) in a database. I then pass this list as a parameter in a RESTful API request where I am filtering the results (tickets) by ID. For each match, the query returns JSON data pertaining to the individual record. However, I want to handle when the query does not find a match. I would like to assign some value for this case such as the string "None", because not every opportunity has a ticket. How can I make sure there exists some value in presales_tickets for every ID in opportunity_list? Could I provide a default value in the request for this case?
views.py
opportunities = cwObj.get_opportunities()
temp = []
opportunity_list = []
cw_presales_engineers = []
for opportunity in opportunities:
temp.append(str(opportunity['id']))
opportunity_list = ','.join(temp)
presales_tickets = cwObj.get_tickets_by_opportunity(opportunity_list)
for opportunity in opportunities:
try:
if opportunity['id'] == presales_tickets[0]['opportunity']['id']:
try:
for presales_ticket in presales_tickets:
cw_engineer = presales_ticket['owner']['name']
cw_presales_engineers.append(cw_engineer)
except:
pass
else:
cw_engineer = 'None'
cw_presales_engineers.append(cw_engineer)
except AttributeError:
cw_engineer = ''
cw_presales_engineers.append(cw_engineer)
So, lets say you have a Ticket model and Opportunity model. Connected via a foreign key.
class Opportunity(models.Model):
... some fields here ...
class Ticket(models.Model):
opportunity = models.ForeignKey(Opportunity)
and in your view, you get a list of opportunity ids
def some_view(request):
ids = request.GET['ids']
It sounds, like what you want is to fetch all the tickets for the supplied opportunities and add some default processing for the opportunities that do not have tickets. If that is the case, why not do something like
def some_view(request):
ids = request.GET['ids']
tickets = Ticket.objects.filter(opportunity__id__in=ids)
results = []
for ticket in tickets:
result = ... do your thing here ...
results.append(result)
# now handle missing opportunities
good_ids = tickets.values_list('opportunity__id', flat=True).distinct()
for id in ids:
if id not in good_ids:
result = ... do your default processing ...
results.append(result)
Is that what you are trying to do?

Get info from a table on a website where XPATH varies on each site, Python

If you take this website as an example:
http://gbgfotboll.se/information/?scr=table&ftid=51168
I am using this code to get information from the second table:
for url in urlList:
request = net.Request(url)
response = net.urlopen(request)
data = response.read()
dom = lxml.html.parse(BytesIO(data))
#all table rows
xpatheval = etree.XPathDocumentEvaluator(dom)
rows = xpatheval('//div[#id="content-primary"]/table[2]/tbody/tr')
divName = xpatheval('//*[#id="content-primary"]/h1//text()')[0]
trash, divisionName = divName.rsplit("- ")
dict[divisionName] = {}
for id,row in enumerate(rows):
columns = row.findall("td")
teamName = columns[0].find("a").text, # Lag
print teamName
teamName
playedGames = columns[1].text, # S
wins = columns[2].text,
draw = columns[3].text,
lost = columns[4].text,
dif = columns[6].text, # GM-IM
points = columns[7].text, # P - last column
dict[divisionName].update({id :{"teamName":columns[0].find("a").text, "playedGames":playedGames, "wins":wins, "draw":draw, "lost":lost, "dif":dif, "points":points }})
For that website the rows has table[2]
For this website:
http://gbgfotboll.se/serier/?scr=table&ftid=57108
the rows would need to look like this:
rowss = '//div[#id="content-primary"]/table[1]/tbody/tr'[0]
So what I am asking for if there is a way to get the information I need regardless what table index the table will be at?
One way to do it would be to select by its class attribute (all 3 classes are required):
xpatheval('//div[#id="content-primary"]/table[#class="clCommonGrid clTblStandings clTblWithFullToggle"]/tbody/tr'
An alternative would be to select a child element in that table that you know is only present in that specific type of table. For example, the GM-IM header could be quite specific to that type of table, so I navigate to it and then work my way up the tree to end up with the same rows as you:
xpatheval('//div[#id="content-primary"]//tr[th="GM-IM"]/../../tbody/tr')

What is an efficient way of inserting thousands of records into an SQLite table using Django?

I have to insert 8000+ records into a SQLite database using Django's ORM. This operation needs to be run as a cronjob about once per minute.
At the moment I'm using a for loop to iterate through all the items and then insert them one by one.
Example:
for item in items:
entry = Entry(a1=item.a1, a2=item.a2)
entry.save()
What is an efficient way of doing this?
Edit: A little comparison between the two insertion methods.
Without commit_manually decorator (11245 records):
nox#noxdevel marinetraffic]$ time python manage.py insrec
real 1m50.288s
user 0m6.710s
sys 0m23.445s
Using commit_manually decorator (11245 records):
[nox#noxdevel marinetraffic]$ time python manage.py insrec
real 0m18.464s
user 0m5.433s
sys 0m10.163s
Note: The test script also does some other operations besides inserting into the database (downloads a ZIP file, extracts an XML file from the ZIP archive, parses the XML file) so the time needed for execution does not necessarily represent the time needed to insert the records.
You want to check out django.db.transaction.commit_manually.
http://docs.djangoproject.com/en/dev/topics/db/transactions/#django-db-transaction-commit-manually
So it would be something like:
from django.db import transaction
#transaction.commit_manually
def viewfunc(request):
...
for item in items:
entry = Entry(a1=item.a1, a2=item.a2)
entry.save()
transaction.commit()
Which will only commit once, instead at each save().
In django 1.3 context managers were introduced.
So now you can use transaction.commit_on_success() in a similar way:
from django.db import transaction
def viewfunc(request):
...
with transaction.commit_on_success():
for item in items:
entry = Entry(a1=item.a1, a2=item.a2)
entry.save()
In django 1.4, bulk_create was added, allowing you to create lists of your model objects and then commit them all at once.
NOTE the save method will not be called when using bulk create.
>>> Entry.objects.bulk_create([
... Entry(headline="Django 1.0 Released"),
... Entry(headline="Django 1.1 Announced"),
... Entry(headline="Breaking: Django is awesome")
... ])
In django 1.6, transaction.atomic was introduced, intended to replace now legacy functions commit_on_success and commit_manually.
from the django documentation on atomic:
atomic is usable both as a decorator:
from django.db import transaction
#transaction.atomic
def viewfunc(request):
# This code executes inside a transaction.
do_stuff()
and as a context manager:
from django.db import transaction
def viewfunc(request):
# This code executes in autocommit mode (Django's default).
do_stuff()
with transaction.atomic():
# This code executes inside a transaction.
do_more_stuff()
Bulk creation is available in Django 1.4:
https://django.readthedocs.io/en/1.4/ref/models/querysets.html#bulk-create
Have a look at this. It's meant for use out-of-the-box with MySQL only, but there are pointers on what to do for other databases.
You might be better off bulk-loading the items - prepare a file and use a bulk load tool. This will be vastly more efficient than 8000 individual inserts.
To answer the question particularly with regard to SQLite, as asked, while I have just now confirmed that bulk_create does provide a tremendous speedup there is a limitation with SQLite: "The default is to create all objects in one batch, except for SQLite where the default is such that at maximum 999 variables per query is used."
The quoted stuff is from the docs--- A-IV provided a link.
What I have to add is that this djangosnippets entry by alpar also seems to be working for me. It's a little wrapper that breaks the big batch that you want to process into smaller batches, managing the 999 variables limit.
You should check out DSE. I wrote DSE to solve these kinds of problems ( massive insert or updates ). Using the django orm is a dead-end, you got to do it in plain SQL and DSE takes care of much of that for you.
Thomas
def order(request):
if request.method=="GET":
cust_name = request.GET.get('cust_name', '')
cust_cont = request.GET.get('cust_cont', '')
pincode = request.GET.get('pincode', '')
city_name = request.GET.get('city_name', '')
state = request.GET.get('state', '')
contry = request.GET.get('contry', '')
gender = request.GET.get('gender', '')
paid_amt = request.GET.get('paid_amt', '')
due_amt = request.GET.get('due_amt', '')
order_date = request.GET.get('order_date', '')
print(order_date)
prod_name = request.GET.getlist('prod_name[]', '')
prod_qty = request.GET.getlist('prod_qty[]', '')
prod_price = request.GET.getlist('prod_price[]', '')
print(prod_name)
print(prod_qty)
print(prod_price)
# insert customer information into customer table
try:
# Insert Data into customer table
cust_tab = Customer(customer_name=cust_name, customer_contact=cust_cont, gender=gender, city_name=city_name, pincode=pincode, state_name=state, contry_name=contry)
cust_tab.save()
# Retrive Id from customer table
custo_id = Customer.objects.values_list('customer_id').last() #It is return
Tuple as result from Queryset
custo_id = int(custo_id[0]) #It is convert the Tuple in INT
# Insert Data into Order table
order_tab = Orders(order_date=order_date, paid_amt=paid_amt, due_amt=due_amt, customer_id=custo_id)
order_tab.save()
# Insert Data into Products table
# insert multiple data at a one time from djanog using while loop
i=0
while(i<len(prod_name)):
p_n = prod_name[i]
p_q = prod_qty[i]
p_p = prod_price[i]
# this is checking the variable, if variable is null so fill the varable value in database
if p_n != "" and p_q != "" and p_p != "":
prod_tab = Products(product_name=p_n, product_qty=p_q, product_price=p_p, customer_id=custo_id)
prod_tab.save()
i=i+1
I recommend using plain SQL (not ORM) you can insert multiple rows with a single insert:
insert into A select from B;
The select from B portion of your sql could be as complicated as you want it to get as long as the results match the columns in table A and there are no constraint conflicts.
def order(request):
if request.method=="GET":
# get the value from html page
cust_name = request.GET.get('cust_name', '')
cust_cont = request.GET.get('cust_cont', '')
pincode = request.GET.get('pincode', '')
city_name = request.GET.get('city_name', '')
state = request.GET.get('state', '')
contry = request.GET.get('contry', '')
gender = request.GET.get('gender', '')
paid_amt = request.GET.get('paid_amt', '')
due_amt = request.GET.get('due_amt', '')
order_date = request.GET.get('order_date', '')
prod_name = request.GET.getlist('prod_name[]', '')
prod_qty = request.GET.getlist('prod_qty[]', '')
prod_price = request.GET.getlist('prod_price[]', '')
# insert customer information into customer table
try:
# Insert Data into customer table
cust_tab = Customer(customer_name=cust_name, customer_contact=cust_cont, gender=gender, city_name=city_name, pincode=pincode, state_name=state, contry_name=contry)
cust_tab.save()
# Retrive Id from customer table
custo_id = Customer.objects.values_list('customer_id').last() #It is return Tuple as result from Queryset
custo_id = int(custo_id[0]) #It is convert the Tuple in INT
# Insert Data into Order table
order_tab = Orders(order_date=order_date, paid_amt=paid_amt, due_amt=due_amt, customer_id=custo_id)
order_tab.save()
# Insert Data into Products table
# insert multiple data at a one time from djanog using while loop
i=0
while(i<len(prod_name)):
p_n = prod_name[i]
p_q = prod_qty[i]
p_p = prod_price[i]
# this is checking the variable, if variable is null so fill the varable value in database
if p_n != "" and p_q != "" and p_p != "":
prod_tab = Products(product_name=p_n, product_qty=p_q, product_price=p_p, customer_id=custo_id)
prod_tab.save()
i=i+1
return HttpResponse('Your Record Has been Saved')
except Exception as e:
return HttpResponse(e)
return render(request, 'invoice_system/order.html')

Categories