I wanna custom sorting ag-grid use justpy. But when I add 'comparator' to columnDefs and run code =>> It not run. It notify l.column.getColDef(...).comparator
enter image description here
can anyone help me
import justpy as jp
grid_options = {
'getDataPath': '''function(data) { return data.orgHierarchy; }''',
'treeData': True,
'defaultColDef': {
'filter': True,
'sortable': True,
'resizable': True,
},
'columnDefs': [
{'headerName': "job title", 'field': "jobTitle"},
{'headerName': "employment type",
'field': "employmentType",
'comparator': '''function(valueA, valueB) {
console.log('valuea', valueA)
if (valueA == valueB) return 0;
return (valueA > valueB) ? 1 : -1;
}'''
},
],
'rowData' : [
{'orgHierarchy': ['Erica'], 'jobTitle': "CEO", 'employmentType': "1"},
{'orgHierarchy': ['Erica', 'Malcolm'], 'jobTitle': "VP", 'employmentType': "2"},
{'orgHierarchy': ['Erica', 'Bob'], 'jobTitle': "SVP", 'employmentType': "3"},
{'orgHierarchy': ['Erica', 'Bob', 'jo'], 'jobTitle': "eVP", 'employmentType': "4"}
]
}
def grid_test():
wp = jp.WebPage()
grid = jp.AgGrid(a=wp, options=grid_options)
print(grid)
grid.evaluate = ['getDataPath']
return wp
jp.justpy(grid_test)
Related
I'm pulling data from an API call, the JSON output has some specific keys that I want to get rid of, for the most part I've used pop('id', None) but I have a circumstance where the item has a nested list. i.e.
[
{
"enabled":true,
"is_local":true,
"name":"no_squash",
"policy_type":"nfs-export",
"rules":[
{
"access":"no-squash",
"anongid":"None",
"anonuid":"None",
"atime":true,
"client":"*",
"fileid_32bit":false,
"id":"6680ab71-1823-48fc-bc84-920059d218c5",
"index":1,
"name":"no_squash.1",
"permission":"rw",
"policy":{
"id":"e399c67e-595a-8b21-18dc-678164e360bd",
"name":"no_squash",
"resource_type":"nfs-export-policies"
},
"policy_version":"ffffffff-d747-c55b-0000-0000411034c5",
"secure":false,
"security":[
"krb5",
"krb5i",
"krb5p",
"sys"
]
}
],
"version":"ffffffff-d747-c55b-0000-0000411034c5"
}
]
More so the rules section, how do I target ['rules']['id'] specifically and remove it?
check = requests.get(standard_url+api_version+"/nfs-export-policies", headers=auth_headers, verify=False)
centry = check.json()['items']
for item in centry:
item.pop('id', None)
item.pop('location', None)
The above has already removed the id key from the initial entry but not the subelement.
I've worked out that I can achieve the desired results with nested for loops, but is there a better way?
Raw JSON as received from the API get call:
{'continuation_token': None, 'total_item_count': 1, 'items': [{'name': 'no_squash', 'id': 'e399c67e-595a-8b21-18dc-678164e360bd', 'enabled': True, 'is_local': True, 'location': {'name': 'fbstaines03', 'id': 'a7c7d4ad-b515-4f6b-a396-562cdad2063d', 'resource_type': 'arrays'}, 'version': 'ffffffff-d747-c55b-0000-0000411034c5', 'rules': [{'name': 'no_squash.1', 'id': '6680ab71-1823-48fc-bc84-920059d218c5', 'policy': {'name': 'no_squash', 'id': 'e399c67e-595a-8b21-18dc-678164e360bd', 'resource_type': 'nfs-export-policies'}, 'access': 'no-squash', 'anongid': None, 'anonuid': None, 'atime': True, 'client': '*', 'fileid_32bit': False, 'index': 1, 'permission': 'rw', 'secure': False, 'security': ['krb5', 'krb5i', 'krb5p', 'sys'], 'policy_version': 'ffffffff-d747-c55b-0000-0000411034c5'}], 'policy_type': 'nfs-export'}]}
Any help gratefully received.
IIUC it looks as though any 'id' key in any dictionary needs to be removed. If so, this is best dealt with recursively:
import json
J = """[
{
"enabled":true,
"is_local":true,
"name":"no_squash",
"policy_type":"nfs-export",
"rules":[
{
"access":"no-squash",
"anongid":"None",
"anonuid":"None",
"atime":true,
"client":"*",
"fileid_32bit":false,
"id":"6680ab71-1823-48fc-bc84-920059d218c5",
"index":1,
"name":"no_squash.1",
"permission":"rw",
"policy":{
"id":"e399c67e-595a-8b21-18dc-678164e360bd",
"name":"no_squash",
"resource_type":"nfs-export-policies"
},
"policy_version":"ffffffff-d747-c55b-0000-0000411034c5",
"secure":false,
"security":[
"krb5",
"krb5i",
"krb5p",
"sys"
]
}
],
"version":"ffffffff-d747-c55b-0000-0000411034c5"
}
]"""
def rmid(e):
if isinstance(e, dict):
e.pop('id', None)
for v in e.values():
rmid(v)
elif isinstance(e, list):
for v in e:
rmid(v)
data = json.loads(J)
for d in data:
rmid(d)
print(json.dumps(data, indent=2))
Output:
[
{
"enabled": true,
"is_local": true,
"name": "no_squash",
"policy_type": "nfs-export",
"rules": [
{
"access": "no-squash",
"anongid": "None",
"anonuid": "None",
"atime": true,
"client": "*",
"fileid_32bit": false,
"index": 1,
"name": "no_squash.1",
"permission": "rw",
"policy": {
"name": "no_squash",
"resource_type": "nfs-export-policies"
},
"policy_version": "ffffffff-d747-c55b-0000-0000411034c5",
"secure": false,
"security": [
"krb5",
"krb5i",
"krb5p",
"sys"
]
}
],
"version": "ffffffff-d747-c55b-0000-0000411034c5"
}
]
I have this sample list of dictionaries:
[
{
"name": "like father",
"director": "Ajun kun",
"edited": "2014-12-20T21:23:49.867000Z",
"similar_movies": [
"http://movies.dev/api/films/1/",
"http://movies.dev/api/films/3/",
],
"rating": "2.0",
},
{
"name": "be like her",
"director": tuned ku",
"edited": "2014-12-20T21:23:49.870000Z",
"similar_movies": [
"http://movies.dev/api/films/1/"
]
}, .......
]
Some of the dictionaries in the list contain ratings while others do not. I want to generate a new dictionary of like the dictionary below sorted by the ratings:
{
"movies":[
{"name": "movie_4", "rating" : 0.1},
{"name": "movie_1", "rating" : 0.3},
{"name": "movies_5", "rating" : 0.5}
],
"movies_without_rating": [
{"name": "movie_8"},
{"name": "movie_3"}
]
}
Here is my sample code:
from flask import Flask, jsonify, request
import requests
from collections import ChainMap
app = Flask(__name__)
#app.route('/movies', methods=['GET'])
def returnAll():
#note: the URL is just a demo url
response = requests.get("https://movies.dev/api/movies/")
results = response.json()['results']
general_dic = {}
for result in result:
for key, val in result:
if (key == 'rating'):
general_dic['movies']
else:
general_dic['movies_with_rating']
return general_dic
return jsonify(results)
if __name__ == "__main__":
app.run(debug=True)
I got stuck and I couldn't continue, I will greatly appreciate your help.
You can use this example to integrate in your code:
lst = [
{
"movies": "like father",
"rating": "2.0",
},
{
"movies": "other movie",
"rating": "2.5",
},
{
"movies": "be like her",
},
{
"movies": "other movie 2",
"rating": "5.5",
},
{
"movies": "other movie 3",
},
]
out = {'movies':[], 'movies_without_rating':[]}
for movie in lst:
if 'rating' in movie:
out['movies'].append({'name': movie['movies'], 'rating': float(movie['rating'])})
else:
out['movies_without_rating'].append({'name': movie['movies']})
# sort it
out['movies'] = sorted(out['movies'], key=lambda k: k['rating'])
# pretty print on screen:
from pprint import pprint
pprint(out)
Prints:
{'movies': [{'name': 'like father', 'rating': 2.0},
{'name': 'other movie', 'rating': 2.5},
{'name': 'other movie 2', 'rating': 5.5}],
'movies_without_rating': [{'name': 'be like her'}, {'name': 'other movie 3'}]}
It seems something like this is what you'd like:
def separate_movies(movies_list):
movies_with_rating = []
movies_without_rating = []
for movie in movies_list:
name = movie["movies"]
if "rating" in movie:
movies_with_rating.append({
"name": name,
"rating": movie["rating"]
})
else:
movies_without_rating.append({
"name": name
})
movies_with_rating.sort(key = lambda movie: movie["rating"])
return {
"movies": movies_with_rating,
"movies_without_rating": movies_without_rating
}
The key here is using the in keyword to check whether a movie has a rating.
I'm trying to deepmerge lists to get a specific json.
what I want to achieve is this format (the order of the elements is irelevant):
{
"report": {
"context": [{
"report_id": [
"Report ID 30"
],
"status": [
"Status 7"
],
"fallzahl": [
"Fallzahl 52"
],
"izahl": [
"IZahl 20"
]
}
],
"körpergewicht": [{
"any_event_en": [{
"gewicht": [{
"|magnitude": 185.44,
"|unit": "kg"
}
],
"kommentar": [
"Kommentar 94"
],
"bekleidung": [{
"|code": "at0011"
}
]
}
]
}
]
}
}
I try to deepmerge dicts and lists to achieve this specific format. My baseline are some dicts:
{'körpergewicht': [{'any_event_en': [{'gewicht': [{'|magnitude': '100', '|unit': 'kg'}]}]}]}
{'körpergewicht': [{'any_event_en': [{'bekleidung': [{'|code': 'at0013'}]}]}]}
{'körpergewicht': [{'any_event_en': [{'kommentar': ['none']}]}]}
{'context': [{'status': ['fatty']}]}
{'context': [{'fallzahl': ['123']}]}
{'context': [{'report_id': ['123']}]}
{'context': [{'izahl': ['123']}]}
what I tried to do is following I have a dict called tmp_dict in that I hold a baseline dict as I loop through. The so called collect_dict is the dict in that I try to merge my baseline dicts. element holds my current baseline dict.
if (index == (len(element)-1)): #when the baseline dict is traversed completely
if tmp_dict:
first_key_of_tmp_dict=list(tmp_dict.keys())[0]
if not (first_key_of_tmp_dict in collect_dict):
collect_dict.update(tmp_dict)
else:
merge(tmp_dict,collect_dict)
else:
collect_dict.update(tmp_dict)
and I also wrote a merge method:
def merge(tmp_dict,collect_dict):
first_common_key_of_dicts=list(tmp_dict.keys())[0]
second_depth_key_of_tmp_dict=list(tmp_dict[first_common_key_of_dicts][0].keys())[0]
second_depth_tmp_dict=tmp_dict[first_common_key_of_dicts][0]
second_depth_key_of_coll_dict=collect_dict[first_common_key_of_dicts][0]
if not second_depth_key_of_tmp_dict in second_depth_key_of_coll_dict:
collect_dict.update(second_depth_tmp_dict)
else:
merge(second_depth_tmp_dict,second_depth_key_of_coll_dict)
what I'm coming up with goes in the right direction but is far from beeing my desired output:
{"report": {
"k\u00f6rpergewicht": [{
"any_event_en": [{
"kommentar": ["none"]
}
],
"bekleidung": [{
"|code": "at0013"
}
],
"gewicht": [{
"|magnitude": "100",
"|unit": "kg"
}
]
}
],
"context": [{
"fallzahl": ["234"]
}
],
"report_id": ["234"],
"status": ["s"],
"izahl": ["234"]
}
}
With another set of inputs:
{'atemfrequenz': {'context': [{'status': [{'|code': 'at0012'}]}]}},
{'atemfrequenz': {'context': [{'kategorie': ['Kategorie']}]}},
{'atemfrequenz': {'atemfrequenz': [{'messwert': [{'|magnitude': '123', '|unit': 'min'}]}]}}
I would like to achieve the following output:
"atemfrequenz": {
"context": [
{
"status": [
{
"|code": "at0012"
}
],
"kategorie": [
"Kategorie"
]
}
],
"atemfrequenz": [
{
"messwert": [
{
"|magnitude": 123,
"|unit": "/min"
}
]
}
]
}
This code should get the correct answer. I removed the special character (ö) to prevent errors.
dd = [
{'korpergewicht': [{'any_event_en': [{'gewicht': [{'|magnitude': '100', '|unit': 'kg'}]}]}] },
{'korpergewicht': [{'any_event_en': [{'bekleidung': [{'|code': 'at0013'}]}]}]},
{'korpergewicht': [{'any_event_en': [{'kommentar': ['none']}]}]},
{'context': [{'status': ['fatty']}]},
{'context': [{'fallzahl': ['123']}]},
{'context': [{'report_id': ['123']}]},
{'context': [{'izahl': ['123']}]}
]
def merge(d):
if (type(d) != type([])): return d
if (type(list(d[0].values())[0])) == type(""): return d
keys = list(set(list(k.keys())[0] for k in d))
lst = [{k:[]} for k in keys]
for e in lst:
for k in d:
if (list(e.keys())[0] == list(k.keys())[0]):
e[list(e.keys())[0]] += k[list(k.keys())[0]]
for e in lst:
if (type(e[list(e.keys())[0]][0]) == type({})):
e[list(e.keys())[0]] = merge(e[list(e.keys())[0]])
for i in lst[1:]: lst[0].update(i)
lst2 = [] # return list of single dictionary
lst2.append(lst[0])
return lst2
dx = merge(dd)
dx = {'report': dx[0]} # no list at lowest level
print(dx)
Output (formatted)
{'report': {
'korpergewicht': [{
'any_event_en': [{
'kommentar': ['none'],
'bekleidung': [{'|code': 'at0013'}],
'gewicht': [{'|magnitude': '100', '|unit': 'kg'}]}]}],
'context': [{
'report_id': ['123'],
'izahl': ['123'],
'fallzahl': ['123'],
'status': ['fatty']}]}}
Concerning the second data set provided, the data needs to structured to match the previous data set.
This data set works correctly:
dd = [
{'atemfrequenz': [{'context': [{'status': [{'|code': 'at0012'}]}]}]},
{'atemfrequenz': [{'context': [{'kategorie': ['Kategorie']}]}]},
{'atemfrequenz': [{'atemfrequenz': [{'messwert': [{'|magnitude': '123', '|unit': 'min'}]}]}]}
]
Output (formatted)
{'report': {
'atemfrequenz': [{
'atemfrequenz': [{
'messwert': [{'|magnitude': '123', '|unit': 'min'}]}],
'context': [{
'kategorie': ['Kategorie'],
'status': [{'|code': 'at0012'}]}]}]}}
I'm tying to get some original data with geo-point mapping. I need to get satname and timestamp alone with "geo"
I get data from Restful API with python Elasticsearch.
settings = { "settings": {
"number_of_shards":1,
'number_of_replicas':0
},
"mappings" : {
"document" : {
"properties":{
"geo": {
"type": "geo_point"
}
}
}
}
}
es.indices.create(index = "new", body=settings)
def collect_data():
data = requests.get(url = URL).json()
del data['positions'][1]
new_data = {'geo':{'lat':data['positions'][0]['satlatitude'],
'lon':data['positions'][0]['satlongitude']}}, {data['info'][0]['satname']} ,
{data['positions'][0]['timestamp']}
es.index(index='new', doc_type='document', body=new_data)
schedule.every(10).seconds.do(collect_data)
while True:
schedule.run_pending()
time.sleep(1)
Error received:
SerializationError: (({'geo': {'lat': 37.43662067, 'lon': -26.09384821}}, {1591391688}),
TypeError("Unable to serialize {1591391688} (type: <class 'set'>)"))
RESTful json data sample--- {'info': {'satname': 'SPACE STATION', 'satid': 25544,
'transactionscount': 0}, 'positions': [{'satlatitude': 28.89539607,
'satlongitude': 90.44547739, 'sataltitude': 420.36, 'azimuth': 12.46,
'elevation': -52.81, 'ra': 215.55022984, 'dec': -5.00234017, 'timestamp': 1591196844, 'eclipsed':
True}]}
I need to have "geo", "satnam" and"timestamp".I'm wondering how could I obtain correct results.
Looks like you were setting the timestamp and satname without a key, try this to process the data:
import json
from datetime import datetime
response_json = '''
{
"info": {
"satname": "SPACE STATION",
"satid": 25544,
"transactionscount": 0
},
"positions": [
{
"satlatitude": 28.89539607,
"satlongitude": 90.44547739,
"sataltitude": 420.36,
"azimuth": 12.46,
"elevation": -52.81,
"ra": 215.55022984,
"dec": -5.00234017,
"timestamp": 1591196844,
"eclipsed": true
}
]
}
'''
response_data = json.loads(response_json)
def process_data(data):
return {
'satname': response_data['info']['satname'],
# comvert unix timestamp to iso time
'timestamp': datetime.fromtimestamp(response_data['positions'][0]['timestamp']).isoformat(),
'geo': {
'lat': response_data['positions'][0]['satlatitude'],
'lon': response_data['positions'][0]['satlongitude']
}
}
print(process_data(response_data))
Output:
{'satname': 'SPACE STATION', 'timestamp': '2020-06-03T15:07:24', 'geo': {'lat': 28.89539607, 'lon': 90.44547739}}
I am using the compute client to create a VM (using create_or_update) and I want the VM to have a standard hdd and not a premium ssd as its os disk. I should be able to specify that in the managed disk parameters but when I do, the VM still creates with a premium SSD.
Here are my VM parameters.
vm_parameters = {
'location': vm_location,
'os_profile': {
'computer_name': vm_name,
'admin_username': vm_name,
'admin_password': vm_password,
'custom_data': startup_script
},
'hardware_profile': {
'vm_size': 'Standard_B1ls'
},
'storage_profile': {
'image_reference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '16.04.0-LTS',
'version': 'latest'
},
'os_disk': {
'caching': 'None',
'create_option': 'FromImage',
'disk_size_gb': 30,
'managed_disk_parameters': {
'storage_account_type': 'Standard_LRS'
}
}
},
'network_profile': {
'network_interfaces': [{
'id': nic_info.id
}]
},
'tags': {
'expiration_date': 'expirationdatehere'
}
}
Just specifying the storage account type as Standard_LRS isn't changing anything. What should I do to make my VM create with a standard hdd as its os disk instead of a premium ssd?
According to my test, you use the wrong parameter in the vm_parameters. Please update managed_disk_parameters to managed_disk. For more details, please refer to https://learn.microsoft.com/en-us/python/api/azure-mgmt-compute/azure.mgmt.compute.v2019_03_01.models.osdisk?view=azure-python.
For example:
import os
import traceback
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.compute.models import DiskCreateOption
from msrestazure.azure_exceptions import CloudError
from haikunator import Haikunator
haikunator = Haikunator()
AZURE_TENANT_ID= ''
AZURE_CLIENT_ID=''
AZURE_CLIENT_SECRET=''
AZURE_SUBSCRIPTION_ID=''
credentials = ServicePrincipalCredentials(client_id=AZURE_CLIENT_ID,secret=AZURE_CLIENT_SECRET,tenant=AZURE_TENANT_ID)
resource_client = ResourceManagementClient(credentials, AZURE_SUBSCRIPTION_ID)
compute_client = ComputeManagementClient(credentials,AZURE_SUBSCRIPTION_ID)
network_client = NetworkManagementClient(credentials, AZURE_SUBSCRIPTION_ID)
GROUP_NAME='allenR'
LOCATION='eastus'
# Network
VNET_NAME = 'azure-sample-vnet'
SUBNET_NAME = 'azure-sample-subnet'
print('\nCreate Vnet')
async_vnet_creation = network_client.virtual_networks.create_or_update(
GROUP_NAME,
VNET_NAME,
{
'location': LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.wait()
# Create Subnet
print('\nCreate Subnet')
async_subnet_creation = network_client.subnets.create_or_update(
GROUP_NAME,
VNET_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
# Create NIC
print('\nCreate NIC')
async_nic_creation = network_client.network_interfaces.create_or_update(
GROUP_NAME,
'test19191',
{
'location': LOCATION,
'ip_configurations': [{
'name': 'test19191-ip',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic = async_nic_creation.result()
print(nic.id)
vm_parameters = {
'location': 'eastus',
'os_profile': {
'computer_name': 'jimtest120yue',
'admin_username': 'jimtest',
'admin_password': 'Password0123!',
#'custom_data': startup_script
},
'hardware_profile': {
'vm_size': 'Standard_B1ls'
},
'storage_profile': {
'image_reference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '16.04.0-LTS',
'version': 'latest'
},
'os_disk': {
'caching': 'ReadWrite',
'name' : 'jimtest120yue_disk',
'create_option': 'FromImage',
'disk_size_gb': 30,
'os_type': 'Linux',
'managed_disk': {
'storage_account_type': 'Standard_LRS'
}
}
},
'network_profile': {
'network_interfaces': [{
'id': nic.id
}]
},
'tags': {
'expiration_date': 'expirationdatehere'
}
}
async_vm_creation=compute_client.virtual_machines.create_or_update('allenR','jimtest120yue',vm_parameters)
print(async_vm_creation.result())
disk = compute_client.disks.get('allenR','jimtest120yue_disk')
print(disk.sku)
If you are using Rest API to create the VM then here is the sample JSOn request for creating the VM:
{
"location": "westus",
"properties": {
"hardwareProfile": {
"vmSize": "Standard_D1_v2"
},
"storageProfile": {
"imageReference": {
"id": "/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/{existing-custom-image-name}"
},
"osDisk": {
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Standard_LRS"
},
"name": "myVMosdisk",
"createOption": "FromImage"
}
},
"osProfile": {
"adminUsername": "{your-username}",
"computerName": "myVM",
"adminPassword": "{your-password}"
},
"networkProfile": {
"networkInterfaces": [
{
"id": "/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/{existing-nic-name}",
"properties": {
"primary": true
}
}
]
}
}
}
Here is the API for the same:
PUT https://management.azure.com/subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM?api-version=2019-03-01
If you are looking for a way to create Virtual Machine then you can follow below code sample:
https://github.com/Azure-Samples/Hybrid-Compute-Python-Manage-VM/blob/master/example.py
Hope it helps.