what I'm missing when populating table rows to avoid adding empty rows. For example if the items inserted are two, there is additional one empty row added at the top and so on:
def search_watchlist_category(self) -> None:
""" Get details watchlist based on the search word matches
:return None
"""
search_word = ErrorMessaging.remove_spaces(self.searchText.text())
if search_word == '':
self.watchlist_results.watchlistsTable.setRowCount(0)
self.watchlistButton.setText("Watchlist")
self.watchlistButton.setStyleSheet('color: #FFFFFF; background-color: #312624; border: 0px')
else:
search_query = search_word.split(' ')[0]
response = utils.search_watchlists(search_query)
self.watchlist_results.watchlistsTable.setRowCount(0)
matched_results = response['watchlist_details']
num_rows = len(matched_results)
self.watchlist_results.watchlistsTable.setRowCount(num_rows)
for watchlist_category in response['watchlist_details']:
self.watchlist_results.watchlistsTable.verticalHeader().setVisible(False)
self.watchlist_results.watchlistsTable.setShowGrid(False)
self.watchlist_results.watchlistsTable.horizontalHeader().setStyleSheet(
'background-color: transparent;')
self.watchlist_results.watchlistsTable.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents)
self.watchlist_results.watchlistsTable.horizontalHeader().setVisible(True)
checkbox_item = QtWidgets.QTableWidgetItem('')
checkbox_item.setFlags(checkbox_item.flags() | Qt.ItemIsUserCheckable |
QtCore.Qt.ItemIsSelectable)
checkbox_item.setCheckState(QtCore.Qt.Unchecked)
checkbox_item.setForeground(QtGui.QColor('#FFFFFF'))
watchlist_targets = utils.get_persons_of_interest_in_category(watchlist_category.id)
target_item = QtWidgets.QTableWidgetItem(str(len(watchlist_targets)))
target_item.setForeground(QtGui.QColor('#FFFFFF'))
watchlist_item = QtWidgets.QTableWidgetItem(f"{watchlist_category.name}")
watchlist_item.setForeground(QtGui.QColor('#FFFFFF'))
watchlist_item.setData(Qt.UserRole, watchlist_category.id)
watchlist_item.setData(Qt.UserRole + 1, watchlist_category.date_created)
watchlist_item.setData(Qt.UserRole + 2, watchlist_category.description)
watchlist_item.setData(Qt.UserRole + 3, watchlist_category.color)
row_position = self.watchlist_results.watchlistsTable.rowCount()
self.watchlist_results.watchlistsTable.insertRow(row_position)
self.watchlist_results.watchlistsTable.setItem(row_position - 1, 0, checkbox_item)
self.watchlist_results.watchlistsTable.setItem(row_position - 1, 1, watchlist_item)
self.watchlist_results.watchlistsTable.setItem(row_position - 1, 2, target_item)
self.watchlistButton.setText("Watchlist(" + str(num_rows) + ")")
Related
I need to understand if, with the below using of np.delete, we perform a removing of all block of 2 columns and len(pk)-1 rows OR if we just delete the last line composed of 2 columns hk and pk indexed by len(pk)-1.
# Save all the fiducial power spectrums.
aaa = len(zrange)
while aaa >= 0:
if aaa == len(zrange):
kh, pk = np.loadtxt(
"test_matterpower_" + str(aaa) + ".dat",
usecols=(
0,
1,
),
unpack=True,
)
elif aaa > 0:
kh1, pk1 = np.loadtxt(
"test_matterpower_" + str(aaa) + ".dat",
usecols=(
0,
1,
),
unpack=True,
)
kh = np.vstack((kh, kh1))
pk = np.vstack((pk, pk1))
else:
kh1, pk1 = np.loadtxt(
"test_matterpower_" + str(len(zrange) + 1) + ".dat",
usecols=(
0,
1,
),
unpack=True,
)
kh = np.vstack((kh, kh1))
pk = np.vstack((pk, pk1))
aaa = aaa - 1
kh = np.delete(kh, len(kh) - 1, axis=0)
pk = np.delete(pk, len(pk) - 1, axis=0)
with open("pkkh_seq", "w") as f:
f.write(str(pk) + str(kh))
outP = open(fold_path_fid[0] + "/Pks8sqRatio_ist_LogSplineInterpPk.dat", "w")
Indeed, I would like to remove all the last block, i.e remove the last len(pk)-1 rows (composed of 2 columns).
Is the syntax correct here to perform this ?
I am a newbie in programming. I am writing a Python script that extracts data from a pdf. I am having trouble with tuple. I am not able to provide its argument. I think it is my logic that is not correct including indentation, sequence, or something else.
I am hoping to get some explanation on why I am getting the error.
This is the sample of my PDF (i have to block some sensitive info)
This is what I am trying to achieve
I am getting this error:
Traceback (most recent call last):
File "/Users/jeff/PycharmProjects/extractFreightInvoice/main.py", line 79, in <module>
lines.append(Line('invDate, invNumber, poNumber, contactName, jobNumber, '
TypeError: <lambda>() missing 11 required positional arguments: 'invNumber', 'poNumber', 'contactName', 'jobNumber', 'jobName', 'invDescription', 'siteAddress', 'invItemsDesc', 'invItemsQty', 'invItemsUnitPrice', and 'invItemsAmount'
My code is as per below:
# This is a pdf extractor
import re
import pdfplumber
import pandas as pd
from collections import namedtuple
Line = namedtuple('Line', 'invDate, invNumber, poNumber, contactName, jobNumber, '
'jobName, invDescription, siteAddress, invItemsDesc, invItemsQty, invItemsUnitPrice, '
'invItemsAmount ')
invDate_re = re.compile(r'(Clever Core NZ Limited\s)(\d{1,2}/\d{1,2}/\d{4})(.+)')
invNumber_re = re.compile(r'(IN\d{6})')
poNumber_re = re.compile(r'\d{4}')
contactNameBen_re = re.compile(r'(Jordan\s.+)')
contactNameCraig_re = re.compile(r'(Lorna\s.+)')
jobNumber_re = re.compile(r'(J[\d]{6})')
jobName_re = re.compile(r'(Job Name)')
invDescription_re = re.compile(r'(Invoice Description)')
siteAddress_re = re.compile(r'(Site address.*)')
colHeading_re = re.compile(r'((Description)(.* Quantity.* Unit Price.*))')
invItems_re = re.compile(
r'(.+) (([0-9]*[.])?[0-9]+) (([0-9]*[.])?[0-9]+) (\d*\?\d+|\d{1,3}(,\d{3})*(\.\d+)?)')
# quoteLines_re = re.compile(r'(.+)(:\s*)(.+)')
# clevercorePriceLine_re = re.compile(r'(.* First .*\s?)(-\s?.*\$)(\s*)(.+)')
file = 'CombinedInvoicePdf.pdf'
lines = []
with pdfplumber.open(file) as myPdf:
for page in myPdf.pages:
text = page.extract_text()
lines = text.split('\n')
index = 0
for i in range(len(lines)):
line = lines[i]
invDateLine = invDate_re.search(line)
invNumberLine = invNumber_re.search(line)
poNumberLine = poNumber_re.search(line)
contactNameJordanLine = contactNameJordan_re.search(line)
contactNameLornaLine = contactNameLorna_re.search(line)
jobNumberLine = jobNumber_re.search(line)
jobNameLine = jobName_re.search(line)
invDescriptionLine = invDescription_re.search(line)
colHeadingLine = colHeading_re.search(line)
siteAddressLine = siteAddress_re.search(line)
invItemsLine = invItems_re.search(line)
if invDateLine:
invDate = invDateLine.group(2)
if invNumberLine:
invNumber = invNumberLine.group(1)
if poNumberLine and len(line) == 4:
poNumber = poNumberLine.group(0)
if contactNameBenLine:
contactName = 'Jordan Michael'
if contactNameCraigLine:
contactName = 'Lorna Tolentin'
if jobNumberLine:
jobNumber = lines[i]
if jobNameLine:
jobName = (lines[i + 1])
if invDescriptionLine:
invDescription = lines[i + 1]
if siteAddressLine:
if len(lines[i + 1]) > 0 and len(lines[i + 1]) == 0:
siteAddress = lines[i + 1]
elif len(lines[i + 1]) > 0 and len(lines[i + 1]) > 0:
siteAddress = lines[i + 1] + ' ' + lines[i + 2]
else:
siteAddress = 'check invoice'
if invItemsLine and invItemsLine[2] != '06':
invItemsDesc = invItemsLine.group(1)
invItemsQty = invItemsLine.group(2)
invItemsUnitPrice = invItemsLine.group(4)
invItemsAmount = invItemsLine.group(6)
lines.append(Line('invDate, invNumber, poNumber, contactName, jobNumber, '
'jobName, invDescription, siteAddress, invItemsDesc, invItemsQty, invItemsUnitPrice, '
'inItemsAmount'))
df = pd.DataFrame(lines)
print(df)
print(df.head())
df.to_csv('freightCharges.csv')
Line is a tuple subclass with parameters and fields
You need to fill them with separate parameters, not a single string
lines.append(Line('invDate', 'invNumber', 'poNumber', 'contactName', 'jobNumber', 'jobName', 'invDescription',
'siteAddress', 'invItemsDesc', 'invItemsQty', 'invItemsUnitPrice', 'inItemsAmount'))
So I have a Python code that first aggregates and standardizes Data into a file I called "tripFile". Then the code tries to identify the differences between this most recent tripFile and a previous one.
From the first part of the code, if I export the tripFile, and import it again for the second part of the code, it takes around 5 minutes to run and says it is looping over a bit more than 4,000 objects.
newTripFile = pd.read_csv(PATH + today + ' Trip File v6.csv')
However, if I do not export & re-import the Data (just keeping it from the first part of the code), it takes a bit less than 24 hours (!!) and says it is looping over a bit more than 951,691 objects.
newTripFile = tripFile
My Data is a dataframe, and checked the shape of it, it is identical to the file I export.
Any idea what can be causing that ???
Here is the second part of my code:
oldTripFile = pd.read_excel(PATH + OLDTRIPFILE)
oldTripFile.drop(['id'], axis = 1, inplace = True)
oldTripFile['status'] = 'old'
# New version of trip file
newTripFile = pd.read_csv(PATH + today + ' Trip File v6.csv')
newTripFile.drop(['id'], axis = 1, inplace = True)
newTripFile['status'] = 'new'
db_trips = pd.concat([oldTripFile, newTripFile]) #concatenation of the two dataframes
db_trips = db_trips.reset_index(drop = True)
db_trips.drop_duplicates(keep = False, subset = [column for column in db_trips.columns[:-1] ], inplace = True)
db_trips = db_trips.reset_index(drop = True)
db_trips.head()
update_details = []
# Get the duplicates : only consider ['fromCode', 'toCode', 'mode'] for identifying duplicates
# Create a dataframe that contains only the trips that was deleted and was recently added
db_trips_delete_new = db_trips.drop_duplicates(keep = False, subset = ['fromCode', 'toCode', 'mode'])
db_trips_delete_new = db_trips_delete_new.reset_index(drop = True)
# New trips
new_trips = db_trips_delete_new[db_trips_delete_new['status'] == 'new'].values.tolist()
for trip in new_trips:
trip.append('new trip added')
update_details = update_details + new_trips
# Deleted trips
old_trips = db_trips_delete_new[db_trips_delete_new['status'] == 'old'].values.tolist()
for trip in old_trips:
trip.append('trip deleted')
update_details = update_details + old_trips
db_trips_delete_new.head()
# Updated trips
# Ocean: no need to check the transit time column
sea_trips = db_trips.loc[db_trips['mode'].isin(['sea', 'cfs'])]
sea_trips = sea_trips.reset_index(drop = True)
list_trips_sea_update = sea_trips[sea_trips.duplicated(subset = ['fromCode', 'toCode', 'mode'], keep = False)].values.tolist()
if len(list_trips_sea_update) != 0:
for i in tqdm(range(0, len(list_trips_sea_update) - 1)):
for j in range(i + 1, len(list_trips_sea_update)):
if list_trips_sea_update[i][2] == list_trips_sea_update[j][2] and list_trips_sea_update[i][9] == list_trips_sea_update[j][9] and list_trips_sea_update[i][14] == list_trips_sea_update[j][14]:
update_comment = ''
# Check display from / to
if list_trips_sea_update[i][5] != list_trips_sea_update[j][5]:
update_comment = update_comment + 'fromDisplayLocation was updated.'
if list_trips_sea_update[i][12] != list_trips_sea_update[j][12]:
update_comment = update_comment + 'toDisplayLocation was updated.'
# Get the updated trip (the row with status new)
if list_trips_sea_update[i][17] == 'new' and list_trips_sea_update[j][17] != 'new' :
list_trips_sea_update[i].append(update_comment)
update_details = update_details + [list_trips_sea_update[i]]
else:
if list_trips_sea_update[j][17] == 'new' and list_trips_sea_update[i][17] != 'new':
list_trips_sea_update[j].append(update_comment)
update_details = update_details + [list_trips_sea_update[j]]
else:
print('excel files are not organized')
# Ground: transit time column need to be checked
ground_trips = db_trips[~db_trips['mode'].isin(['sea', 'cfs'])]
ground_trips = ground_trips.reset_index(drop = True)
list_trips_ground_update = ground_trips[ground_trips.duplicated(subset = ['fromCode', 'toCode', 'mode'], keep = False)].values.tolist()
if len(list_trips_ground_update) != 0:
for i in tqdm(range(0, len(list_trips_ground_update) - 1)):
for j in range(i + 1, len(list_trips_ground_update)):
if list_trips_ground_update[i][2] == list_trips_ground_update[j][2] and list_trips_ground_update[i][9] == list_trips_ground_update[j][9] and list_trips_ground_update[i][14] == list_trips_ground_update[j][14]:
update_comment = ''
# Check display from / to
if list_trips_ground_update[i][5] != list_trips_ground_update[j][5]:
update_comment = update_comment + 'fromDisplayLocation was updated.'
if list_trips_ground_update[i][12] != list_trips_ground_update[j][12]:
update_comment = update_comment + 'toDisplayLocation was updated.'
# Check transit time
if list_trips_ground_update[i][15] != list_trips_ground_update[j][15]:
update_comment = update_comment + 'transit time was updated.'
# Get the updated trip (the row with status new)
if list_trips_ground_update[i][17] == 'new' and list_trips_ground_update[j][17] != 'new' :
list_trips_ground_update[i].append(update_comment)
update_details=update_details + [list_trips_ground_update[i]]
else:
if list_trips_ground_update[j][17] == 'new' and list_trips_ground_update[i][17] != 'new':
list_trips_ground_update[j].append(update_comment)
update_details = update_details + [list_trips_ground_update[j]]
else:
print('excel files are not organized')
And here an example of what my trip file looks like:
Any help is appreciated :)
If ever it can be useful to someone else, issue was coming from the type. When keeping my tripFile in memory, one of my column was "10.0" for example, whereas when imported this column was "10".
As I'm comparing with another imported tripFile, if both files are imported the column in both files are of same type, but if one of the files is kept in memory the column is of different type in both files and considered as updated. As such takes much longer when kept in memory as every row is considered updated.
I am trying to scrape the "PRINCIPAL STOCKHOLDERS" table from the linktext fileand convert it to a csv file. Right now I am only half successful. Namely, I can locate the table and parse it but somehow I cannot convert the text table to a standard one. My code is attached. Can someone help me with it?
url = r'https://www.sec.gov/Archives/edgar/data/1034239/0000950124-97-003372.txt'
# Different approach, the first approach does not work
filing_url = requests.get(url)
content = filing_url.text
splited_data = content.split('\n')
table_title = 'PRINCIPAL STOCKHOLDERS'
END_TABLE_LINE = '- ------------------------'
def find_no_line_start_table(table_title,splited_data):
found_no_lines = []
for index, line in enumerate(splited_data):
if table_title in line:
found_no_lines.append(index)
return found_no_lines
table_start = find_no_line_start_table(table_title,splited_data)
# I need help with locating the table. If I locate the table use the above function, it will return two locations and I have to manually choose the correct one.
table_start = table_start[1]
def get_start_data_table(table_start, splited_data):
for index, row in enumerate(splited_data[table_start:]):
if '<C>' in row:
return table_start + index
def get_end_table(start_table_data, splited_data ):
for index, row in enumerate(splited_data[start_table_data:]):
if END_TABLE_LINE in row:
return start_table_data + index
def row(l):
l = l.split()
number_columns = 8
if len(l) >= number_columns:
data_row = [''] * number_columns
first_column_done = False
index = 0
for w in l:
if not first_column_done:
data_row[0] = ' '.join([data_row[0], w])
if ':' in w:
first_column_done = True
else:
index += 1
data_row[index] = w
return data_row
start_line = get_start_data_table(table_start, splited_data)
end_line = get_end_table(start_line, splited_data)
table = splited_data[start_line : end_line]
# I also need help with convert the text table to a CSV file, somehow the following function does not #recognize my column.
def take_table(table):
owner = []
Num_share = []
middle = []
middle_1 = []
middle_2 = []
middle_3 = []
prior_offering = []
after_offering = []
for r in table:
data_row = row(r)
if data_row:
col_1, col_2, col_3, col_4, col_5, col_6, col_7, col_8 = data_row
owner.append(col_1)
Num_share.append(col_2)
middle.append(col_3)
middle_1.append(col_4)
middle_2.append(col_5)
middle_3.append(col_6)
prior_offering.append(col_7)
after_offering.append(col_8)
table_data = {'owner': owner, 'Num_share': Num_share, 'middle': middle, 'middle_1': middle_1,
'middle_2': middle_2, 'middle_3': middle_3, 'prior_offering': prior_offering,
'after_offering': after_offering}
return table_data
#print (table)
dict_table = take_table(table)
a = pd.DataFrame(dict_table)
a.to_csv('trail.csv')
I think what you need to do is
pd.DataFrame.from_dict(dict_table)
instead of
pd.DataFrame(dict_table)
I have a below string multiple lines. For each line, I want to split string and add this to a JSON output file. I had done this using string.gettext().split and a regular expression. However I am not sure this is the best way to do it.
Input file :
Server:prod01
Available memory: 20480 Disk:200 CPU:4
Used memory:12438 Disk:120 CPU:3
Unused memory:8042 Disk:80 CPU:1
Server:prod02
Available memory: 40960 Disk:500 CPU:8
Used memory:20888 Disk:320 CPU:3
Unused memory:20072 Disk:180 CPU:5
Expected output JSON:
{"prod01_available_memory":20480}
{"prod01_used_memory":12438}
{"prod01_unused_memory":8042}
{"prod01_available_disk":200}
{"prod01_used_disk":120}
{"prod01_unused_disk":80}
{"prod01_available_cpu":4}
{"prod01_used_cpu":3}
{"prod01_unused_cpu":1}
{"prod02_available_memory":40960}
{"prod02_used_memory":20888}
{"prod02_unused_memory":20072"}
{"prod02_available_disk":500"}
{"prod02_used_disk":380}
{"prod02_unused_disk":120}
{"prod02_available_cpu":8}
{"prod02_used_cpu":3}
{"prod02_unused_cpu":5}
Thanks,
Rinku
Below is my code -
def tsplit(string, *delimiters):
pattern = '|'.join(map(re.escape, delimiters))
return re.split(pattern, string)
prelist = pre.get_text().splitlines()
server_name = re.split('server|:',prelist[0])[2].strip()
if server_name == 'prod01':
#print prelist[1]
prod01_memory_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[2])
prod01_Disk_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[4])
prod01_CPU_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[6])
#print prelist[2]
prod01_memory_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[2])
prod01_Disk_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[4])
prod01_CPU_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[6])
#print prelist[4]
prod01_memory_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[2])
prod01_Disk_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[4])
prod01_CPU_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[6])
elif server_name == 'prod02':
#print prelist[1]
prod02memory_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[2])
prod02Disk_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[4])
prod02CPU_actv = int(re.split('Activated memory|:|Disk|:|CPU|:',prelist[1])[6])
#print prelist[2]
prod02memory_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[2])
prod02Disk_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[4])
prod02CPU_cons = int(re.split('memory consumed|:|Disk|:|CPU|:',prelist[2])[6])
#print prelist[4]
prod02memory_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[2])
prod02Disk_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[4])
prod02CPU_unused = int(re.split('memory unused|:|Disk|:|CPU|:',prelist[4])[6])
else
#assign all varaiables 0
.....
proc_item["logtime"] = str(t1)
proc_item["prod01_memory_actv"] = prod01_memory_actv
proc_item["prod01_Disk_actv"] = prod01_Disk_actv
proc_item["prod01_CPU_actv"] = prod01_CPU_actv
......
#for all otehr variables...
proc_data.append(proc_item)
with open("./proc_"+ str(date.today()) + ".txt", 'a+') as f:
json.dump(proc_data, f)
f.write("\n")
I have some basic knowledge on python.
- Just using string array indices
hostmtrcs = "Server:prod01 Available memory:20480 Disk:200 CPU:4 Used memory:12438 Disk:120 CPU:3 Unused memory:8042 " \
"Disk:80 CPU:1 Server:prod02 Available memory: 40960 Disk:500 CPU:8 Used memory:20888 Disk:320 CPU:3 Unused " \
"memory:20072 Disk:180 CPU:5 "
datasplt = hostmtrcs.split(":")
hstname = ''
attrkey = ''
attrvalue = ''
for word in range(0, datasplt.__len__()):
if not datasplt[word].__contains__("Server"):
elmnt = datasplt[word].split(" ")
if datasplt[word].__contains__('prod'):
hstname = elmnt[0].lower()
if elmnt.__len__() == 3:
attrkey = elmnt[1].lower() + "_" + elmnt[2].lower() # attrkey
else:
attrkey = elmnt[1]
# retreive the value from the next element in the 1st attry datasplit
if word != datasplt.__len__() - 1:
nxtelmnt = datasplt[word + 1].split(" ")
attrvalue = nxtelmnt[0] # sattrvalue frm next element
finalfrmt = '{' + '"' +hstname + "_" + attrkey + '"' + ":" + attrvalue + '}'
print(finalfrmt)
I think you can do it with dict then just dump over json.(in your case i dont think its valid json but its needs so as per your request i have dump dict over json) i havn't validates keys, i am assuming you get dictionary data correct.
d = { 'Server':'prod01',
'Available memory': 20480,
'Disk':200,
'CPU':4}
import json
s = json.dumps({str(d['Server']+"_"+key).replace(' ','_'):value for key,value in d.items()})
print(json.loads(s))
>>> {'prod01_Server': 'prod01', 'prod01_Available memory': 20480, 'prod01_Disk': 200, 'prod01_CPU': 4}
You should split the input text, section by section, according to what you're looking for.
data = '''Server:prod01
Available memory: 20480 Disk:200 CPU:4
Used memory:12438 Disk:120 CPU:3
Unused memory:8042 Disk:80 CPU:1
Server:prod02
Available memory: 40960 Disk:500 CPU:8
Used memory:20888 Disk:320 CPU:3
Unused memory:20072 Disk:180 CPU:5'''
import re
import json
print(json.dumps({'_'.join((s, l.split(' ', 1)[0], k)).lower(): int(v) for s, d in [i.split('\n', 1) for i in data.split('Server:') if i] for l in d.split('\n') for k, v in re.findall(r'(\w+):\s*(\d+)', l)}))
This outputs:
{"prod01_available_memory": 20480, "prod01_available_disk": 200, "prod01_available_cpu": 4, "prod01_used_memory": 12438, "prod01_used_disk": 120, "prod01_used_cpu": 3, "prod01_unused_memory": 8042, "prod01_unused_disk": 80, "prod01_unused_cpu": 1, "prod02_available_memory": 40960, "prod02_available_disk": 500, "prod02_available_cpu": 8, "prod02_used_memory": 20888, "prod02_used_disk": 320, "prod02_used_cpu": 3, "prod02_unused_memory": 20072, "prod02_unused_disk": 180, "prod02_unused_cpu": 5}