How to vitalize training and loss in Json file? - python

I have JSON file that has data where this dat orgnized in **dicttionary **format .I am writting small script for reading this json file and wanna convert it to spefic dict format (on other words rearrange data structuer and extract some spesfic data to make plot or vitulazation later )
The input Json file(trainer_state) can be found trainer_state.json
This file produced by trainer class from huggingface lib
Script.py
import pandas as pd
import json
filename = '/checkpoint-2000/trainer_state.json'
df = pd.read_json(filename)
column_names = list(df.columns.values)
print('column_names\n', column_names)
# Opening JSON file
with open(filename) as json_file:
data = json.load(json_file)
# print('show log_history', data['log_history'])
log_history =data['log_history']
# print('\nlog_history\n',log_history[0]['epoch'])
odd_dict , even_dict= {},{}
log_history_dict = {}
for count, value in enumerate(log_history):
log_history_dict[count] = value
print('\nlog_history_dict \n', log_history_dict)
for k ,v in log_history_dict.items():
if k % 2 == 0:
even_dict[k] = v
else:
odd_dict[k] = v
# print('\n even_dict',even_dict , '\nodd_dict' , odd_dict)
# log_history_clean = {}
# for v in odd_dict.values():
# log_history_clean ['epoch'] = v['epoch']
# log_history_clean['learning_rate']= v['learning_rate']
# log_history_clean['loss']= v['loss']
# log_history_clean['step']= v['step']
# # for key ,value in v.items():
# # log_history_clean[key] = value
# # print(key,value)
# print(log_history_clean)
Sample ex : we have this data structuer inside json file (trainer_state.json)
# {
# "best_metric": null,
# "best_model_checkpoint": null,
# "epoch": 1.4265335235378032,
# "global_step": 2000,
# "is_hyper_param_search": false,
# "is_local_process_zero": true,
# "is_world_process_zero": true,
# "log_history":[
# {
# "epoch": 0.36,
# "learning_rate": 3.94339514978602e-05,
# "loss": 0.5516,
# "step": 500
# },
# {
# "epoch": 0.36,
# "eval_cer": 4.407666576772222,
# "eval_loss": 0.25193867087364197,
# "eval_runtime": 1338.5651,
# "eval_samples_per_second": 13.973,
# "eval_steps_per_second": 0.583,
# "eval_wer": 17.79562559983836,
# "step": 500
# },
# ]
# }
The expacted dict out I am trying to meet is :
witten here
# Goal : { 'index' : 0
# 'epoch': 0.36 ,
# 'learning_rate': 3.94339514978602e-05,
# 'loss': 0.5516,
# 'step': 500 ,
# 'epoch': 0.36
# 'eval_cer': 4.407666576772222,
# 'eval_loss': 0.25193867087364,
# 'eval_runtime': 1338.5651,
# 'eval_samples_per_second': 13.973,
# 'eval_steps_per_second': 0.583,
# 'eval_wer': 17.79562559983836,
# 'step': 500,
# ...
#
# .....
#
# }

Related

Stopping the pipette with user input (opentrons)

Working on a biotech research project where a robot is doing the dilution and I was having trouble configuring the code so that the pipette stops a specific column. Ideally we want the code to ask the user for which column the pipette to stop at.
More info on the API: https://protocols.opentrons.com/protocol/customizable_serial_dilution_ot2
def get_values(*names):
import json
_all_values = json.loads("""{"pipette_type":"p300_single_gen2","mount_side":"right","tip_type":"standard","trough_type":"nest_12_reservoir_15ml","plate_type":"nest_96_wellplate_200ul_flat","dilution_factor":3,"num_of_dilutions":10,"total_mixing_volume":150,"blank_on":true,"tip_use_strategy":"never","air_gap_volume":10}""")
return [_all_values[n] for n in names]
"""DETAILS."""
metadata = {
'protocolName': 'Customizable Serial Dilution',
'author': 'Opentrons <protocols#opentrons.com>',
'source': 'Protocol Library',
'apiLevel': '2.11'
}
def run(protocol_context):
"""PROTOCOL BODY."""
[pipette_type, mount_side, tip_type, trough_type, plate_type,
dilution_factor, num_of_dilutions, total_mixing_volume,
blank_on, tip_use_strategy, air_gap_volume] = get_values( # noqa: F821
'pipette_type', 'mount_side', 'tip_type', 'trough_type',
'plate_type', 'dilution_factor', 'num_of_dilutions',
'total_mixing_volume', 'blank_on',
'tip_use_strategy', 'air_gap_volume'
)
# check for bad setup here
if not 1 <= num_of_dilutions <= 11:
raise Exception('Enter a number of dilutions between 1 and 11')
if num_of_dilutions == 11 and blank_on == 1:
raise Exception(
'No room for blank with 11 dilutions')
pip_range = pipette_type.split('_')[0].lower()
tiprack_map = {
'p10': {
'standard': 'opentrons_96_tiprack_10ul',
'filter': 'opentrons_96_filtertiprack_20ul'
},
'p20': {
'standard': 'opentrons_96_tiprack_20ul',
'filter': 'opentrons_96_filtertiprack_20ul'
},
'p50': {
'standard': 'opentrons_96_tiprack_300ul',
'filter': 'opentrons_96_filtertiprack_200ul'
},
'p300': {
'standard': 'opentrons_96_tiprack_300ul',
'filter': 'opentrons_96_filtertiprack_200ul'
},
'p1000': {
'standard': 'opentrons_96_tiprack_1000ul',
'filter': 'opentrons_96_filtertiprack_1000ul'
}
}
# labware
trough = protocol_context.load_labware(
trough_type, '2')
plate = protocol_context.load_labware(
plate_type, '3')
tip_name = tiprack_map[pip_range][tip_type]
tipracks = [
protocol_context.load_labware(tip_name, slot)
for slot in ['1', '4']
]
print(mount_side)
# pipette
pipette = protocol_context.load_instrument(
pipette_type, mount_side, tipracks)
# reagents
diluent = trough.wells()[0]
transfer_volume = total_mixing_volume/dilution_factor
diluent_volume = total_mixing_volume - transfer_volume
if 'multi' in pipette_type:
dilution_destination_sets = [
[row] for row in plate.rows()[0][1:num_of_dilutions]]
dilution_source_sets = [
[row] for row in plate.rows()[0][:num_of_dilutions-1]]
blank_set = [plate.rows()[0][num_of_dilutions+1]]
else:
dilution_destination_sets = plate.columns()[1:num_of_dilutions]
dilution_source_sets = plate.columns()[:num_of_dilutions-1]
blank_set = plate.columns()[num_of_dilutions+1]
all_diluent_destinations = [
well for set in dilution_destination_sets for well in set]
pipette.pick_up_tip()
for dest in all_diluent_destinations:
# Distribute diluent across the plate to the the number of samples
# And add diluent to one column after the number of samples for a blank
pipette.transfer(
diluent_volume,
diluent,
dest,
air_gap=air_gap_volume,
new_tip='never')
pipette.drop_tip()
# Dilution of samples across the 96-well flat bottom plate
if tip_use_strategy == 'never':
pipette.pick_up_tip()
for source_set, dest_set in zip(dilution_source_sets,
dilution_destination_sets):
for s, d in zip(source_set, dest_set):
pipette.transfer(
transfer_volume,
s,
d,
air_gap=air_gap_volume,
mix_after=(5, total_mixing_volume/2),
new_tip=tip_use_strategy)
if tip_use_strategy == 'never':
pipette.drop_tip()
if blank_on:
pipette.pick_up_tip()
for blank_well in blank_set:
pipette.transfer(
diluent_volume,
diluent,
blank_well,
air_gap=air_gap_volume,
new_tip='never')
pipette.drop_tip()
Any help is very much appreciated. Thank you!
Currently the robot just goes through all the columns but we want to find a way to have it stop as a specific column.

Simulations taking too long and failing with simple model in abaqus

I'm trying to simulate the compression of a tensegrity structure in abaqus but can't achieve a solution as the time increment is too slow and the simulations keep on failing.
The model simulates a tensegrity regular truncated cuboctahedron being compressed of 0.15 of the radius of the circumsphere surrounding the structure (not present in the model as it is only used to size the model).
The model is fixed at the bottom while the upper part is free to rotate while a displacement BC is used to compress the structure. I've added some constraint to the nodes in the middle to force them to stay in the same plane (this should be a typical behaviour for tensegrity structures and the BCs are okay since they don't produce significant reaction forces).
My goal would be to run this and many other tensegrity simulations adding bendability (using quadratic beams in the simulation) and other internal structures but all simulations have more or less the same problem as at some point it fails due to excess rotation.
BCs can be seen here
The model has few elements and nodes so it should be easy to run in abaqus but if I don't force mass scaling I end up having very small increments and that's not feasible for my model.
The material properties are taken from the literature as the model simulates the compression of a cytoskeleton of a cell. I've tried everything from increasing or decreasing the seed number on the beams to adjusting the boundary conditions and I don't know what to do anymore.
I'm attaching the python3 code that I made to create this model, in the initial part it is possible to adjust mass scaling, the seeding of the beams and the geometrical and mechanical parameters of the model.
Thanks to anyone who'll help me understand what I'm doing wrong!
# Sript aimed at generating the necessary geometries and material properties
# of the Tensegrity cuboctahedron with 1 layer (no nucleoskeleton).
## Units
# Length - mm
# Force - N
# Stress - MPa
# Mass - Ton
# Density - Ton/mm3
# Time - s
# # # # # # # # VARIABLES INITIALIZATION # # # # # # # #
# General
import copy
import math
# -*- coding: mbcs -*-
import mesh
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from optimization import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
import numpy as np
# import matplotlib.pyplot as plt
# General
MassScale = 1e-4
cpus = 1
seedmt = 2
# Type of experiment
Bendable = True
Compression = True
Time = 9 # seconds
Displace = 0.15 # Relative displacement (Percentage of cell radius)
# Microtubules
Emt = 1200 # Young's Module in MPa
BSmt = 1 # Bending Stiffness
CAmt = 0.00000000019 # Cross sectional Area in mm2
vmt = 0.3 # Poisson's ratio
Rmt = math.sqrt(CAmt / math.pi) # Radius of the cross sectional area
Densmt = 1.43e-09 # Density
# Microfilaments
Emf = 2600 # Young's Module in MPa
CAmf = 0.000000000019 # Cross sectional Area in nm2
vmf = 0.3 # Poisson's ratio
Rmf = math.sqrt(CAmf / math.pi) # Radius of the cross sectional area
Densmf = 1.43e-09 # Density
# Almost fixed variables
cell_radius = 0.015
prestress = 0
model_name = ''
Displace = cell_radius*Displace # Total displacement in nm (if Stress Relaxation)
# Compute model name
if Bendable:
model_name = model_name + 'B_'
if Compression:
model_name = model_name + 'Comp'
else:
model_name = model_name + 'Trac'
model_name = model_name
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # CALCULATIONS FOR NODES # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Input points
x = np.array([-1.5, -1.5, -1, -1, -1, -1, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1.5, 1.5])
y = np.array([-0.5, 0.5, -1, -1, 1, 1, -1.5, -0.5, -0.5, 0.5, 0.5, 1.5, -1.5, -0.5, -0.5, 0.5, 0.5, 1.5, -1, -1, 1, 1, -0.5, 0.5])
z = np.array([0, 0, -(1/math.sqrt(2)), 1/math.sqrt(2), -(1/math.sqrt(2)), 1/math.sqrt(2), 0, -(math.sqrt(2)), math.sqrt(2), -(math.sqrt(2)), math.sqrt(2), 0, 0, -(math.sqrt(2)), math.sqrt(2), -(math.sqrt(2)), math.sqrt(2), 0,-(1/math.sqrt(2)), 1/math.sqrt(2), -(1/math.sqrt(2)), 1/math.sqrt(2), 0, 0])
# Adjust to the cell radius
factor = (1/math.sqrt(5/2))*cell_radius
x1 = np.multiply(x, factor)
y1 = np.multiply(y, factor)
z1 = np.multiply(z, factor)
p1 = np.array([x1[0], y1[0], z1[0]])
p2 = np.array([x1[1], y1[1], z1[1]])
p3 = np.array([x1[2], y1[2], z1[2]])
p4 = np.array([x1[3], y1[3], z1[3]])
p5 = np.array([x1[4], y1[4], z1[4]])
p6 = np.array([x1[5], y1[5], z1[5]])
p7 = np.array([x1[6], y1[6], z1[6]])
p8 = np.array([x1[7], y1[7], z1[7]])
p9 = np.array([x1[8], y1[8], z1[8]])
p10 = np.array([x1[9], y1[9], z1[9]])
p11 = np.array([x1[10], y1[10], z1[10]])
p12 = np.array([x1[11], y1[11], z1[11]])
p13 = np.array([x1[12], y1[12], z1[12]])
p14 = np.array([x1[13], y1[13], z1[13]])
p15 = np.array([x1[14], y1[14], z1[14]])
p16 = np.array([x1[15], y1[15], z1[15]])
p17 = np.array([x1[16], y1[16], z1[16]])
p18 = np.array([x1[17], y1[17], z1[17]])
p19 = np.array([x1[18], y1[18], z1[18]])
p20 = np.array([x1[19], y1[19], z1[19]])
p21 = np.array([x1[20], y1[20], z1[20]])
p22 = np.array([x1[21], y1[21], z1[21]])
p23 = np.array([x1[22], y1[22], z1[22]])
p24 = np.array([x1[23], y1[23], z1[23]])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # PART CREATION # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
profile_name = 'CytoskeletonProfile'
Model = mdb.Model(modelType=STANDARD_EXPLICIT, name=model_name)
# Microtubuli
CskPart = Model.Part(dimensionality=THREE_D, name='Csk', type=DEFORMABLE_BODY)
CskPart.ReferencePoint(point=(p17[0], p17[1], p17[2]))
CskPart.DatumPointByCoordinate(coords=(p13[0], p13[1], p13[2]))
CskPart.DatumPointByCoordinate(coords=(p19[0], p19[1], p19[2]))
CskPart.DatumPointByCoordinate(coords=(p4[0], p4[1], p4[2]))
CskPart.DatumPointByCoordinate(coords=(p20[0], p20[1], p20[2]))
CskPart.DatumPointByCoordinate(coords=(p21[0], p21[1], p21[2]))
CskPart.DatumPointByCoordinate(coords=(p8[0], p8[1], p8[2]))
CskPart.DatumPointByCoordinate(coords=(p23[0], p23[1], p23[2]))
CskPart.DatumPointByCoordinate(coords=(p7[0], p7[1], p7[2]))
CskPart.DatumPointByCoordinate(coords=(p10[0], p10[1], p10[2]))
CskPart.DatumPointByCoordinate(coords=(p3[0], p3[1], p3[2]))
CskPart.DatumPointByCoordinate(coords=(p6[0], p6[1], p6[2]))
CskPart.DatumPointByCoordinate(coords=(p15[0], p15[1], p15[2]))
CskPart.DatumPointByCoordinate(coords=(p1[0], p1[1], p1[2]))
CskPart.DatumPointByCoordinate(coords=(p16[0], p16[1], p16[2]))
CskPart.DatumPointByCoordinate(coords=(p2[0], p2[1], p2[2]))
CskPart.DatumPointByCoordinate(coords=(p22[0], p22[1], p22[2]))
CskPart.DatumPointByCoordinate(coords=(p5[0], p5[1], p5[2]))
CskPart.DatumPointByCoordinate(coords=(p9[0], p9[1], p9[2]))
CskPart.DatumPointByCoordinate(coords=(p12[0], p12[1], p12[2]))
CskPart.DatumPointByCoordinate(coords=(p18[0], p18[1], p18[2]))
CskPart.DatumPointByCoordinate(coords=(p14[0], p14[1], p14[2]))
CskPart.DatumPointByCoordinate(coords=(p24[0], p24[1], p24[2]))
CskPart.DatumPointByCoordinate(coords=(p11[0], p11[1], p11[2]))
Microtubules = CskPart.WirePolyLine(meshable=ON, points=((CskPart.referencePoints[1], CskPart.datums[2]),
(CskPart.datums[3], CskPart.datums[4]),
(CskPart.datums[5], CskPart.datums[6]),
(CskPart.datums[7], CskPart.datums[8]),
(CskPart.datums[9], CskPart.datums[10]),
(CskPart.datums[11], CskPart.datums[12]),
(CskPart.datums[13], CskPart.datums[14]),
(CskPart.datums[15], CskPart.datums[16]),
(CskPart.datums[17], CskPart.datums[18]),
(CskPart.datums[19], CskPart.datums[20]),
(CskPart.datums[21], CskPart.datums[22]),
(CskPart.datums[23], CskPart.datums[24]))) # IMPRINT
Microfilaments = CskPart.WirePolyLine(meshable=ON, points=((CskPart.referencePoints[1], CskPart.datums[24]),
(CskPart.referencePoints[1], CskPart.datums[17]),
(CskPart.referencePoints[1], CskPart.datums[13]),
(CskPart.datums[2], CskPart.datums[5]),
(CskPart.datums[2], CskPart.datums[9]),
(CskPart.datums[2], CskPart.datums[3]),
(CskPart.datums[3], CskPart.datums[8]),
(CskPart.datums[3], CskPart.datums[22]),
(CskPart.datums[4], CskPart.datums[19]),
(CskPart.datums[4], CskPart.datums[14]),
(CskPart.datums[4], CskPart.datums[9]),
(CskPart.datums[5], CskPart.datums[13]),
(CskPart.datums[5], CskPart.datums[8]),
(CskPart.datums[6], CskPart.datums[15]),
(CskPart.datums[6], CskPart.datums[23]),
(CskPart.datums[6], CskPart.datums[21]),
(CskPart.datums[7], CskPart.datums[11]),
(CskPart.datums[7], CskPart.datums[10]),
(CskPart.datums[7], CskPart.datums[22]),
(CskPart.datums[8], CskPart.datums[23]),
(CskPart.datums[9], CskPart.datums[11]),
(CskPart.datums[10], CskPart.datums[18]),
(CskPart.datums[10], CskPart.datums[15]),
(CskPart.datums[11], CskPart.datums[14]),
(CskPart.datums[12], CskPart.datums[16]),
(CskPart.datums[12], CskPart.datums[20]),
(CskPart.datums[12], CskPart.datums[24]),
(CskPart.datums[13], CskPart.datums[19]),
(CskPart.datums[14], CskPart.datums[16]),
(CskPart.datums[15], CskPart.datums[22]),
(CskPart.datums[16], CskPart.datums[18]),
(CskPart.datums[17], CskPart.datums[23]),
(CskPart.datums[17], CskPart.datums[21]),
(CskPart.datums[18], CskPart.datums[20]),
(CskPart.datums[19], CskPart.datums[24]),
(CskPart.datums[20], CskPart.datums[21]))) # IMPRINT
# Sets
MtSet = CskPart.Set(edges=CskPart.getFeatureEdges(Microtubules.name), name='MtSet')
MfSet = CskPart.Set(edges=CskPart.getFeatureEdges(Microfilaments.name), name='MfSet')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # PROPERTIES # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## Material properties
# Microfilaments
MfMat = Model.Material(name='Microfilaments')
MfMat.Density(table=((Densmf,),))
MfMat.Elastic(noCompression=OFF, table=((Emf, vmf),)) # Table contains E and v
# Microtubules
MtMat = Model.Material(name='Microtubules')
MtMat.Density(table=((Densmt,),))
MtMat.Elastic(noCompression=OFF, table=((Emt, vmt),)) # Table contains E and v
## Section assignment
if Bendable:
# Microtubules
MtProfile = Model.CircularProfile(name='MicrotubulesProfile', r=Rmt)
MtSection = Model.BeamSection(consistentMassMatrix=False, integration=DURING_ANALYSIS, material=MtMat.name, name='MtSection', poissonRatio=0.3, profile=MtProfile.name, temperatureVar=LINEAR)
CskPart.SectionAssignment(offset=0.0, offsetField='', offsetType=MIDDLE_SURFACE, region=MtSet, sectionName=MtSection.name, thicknessAssignment=FROM_SECTION)
CskPart.assignBeamSectionOrientation(method=N1_COSINES, n1=(0.0, 1.0, -1.0), region=MtSet)
else:
# Microtubules
MtSection = Model.TrussSection(area=CAmt, material=MtMat.name, name='MtSection')
CskPart.SectionAssignment(offset=0.0, offsetField='', offsetType=MIDDLE_SURFACE, region=MtSet, sectionName=MtSection.name, thicknessAssignment=FROM_SECTION)
# Microfilaments
MfSection = Model.TrussSection(area=CAmf, material=MfMat.name, name='MfSection')
CskPart.SectionAssignment(offset=0.0, offsetField='', offsetType=MIDDLE_SURFACE, region=MfSet, sectionName=MfSection.name, thicknessAssignment=FROM_SECTION)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # ASSEMBLY # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Generate Assembly
Assembly = Model.rootAssembly
Assembly.DatumCsysByDefault(CARTESIAN)
# Create Cytoskeleton
CskInstance = Assembly.Instance(dependent=ON, name='Csk', part=CskPart)
Centroid = np.array([(p14[0] + p24[0]) / 2, (p14[1] + p24[1]) / 2, (p14[2] + p24[2]) / 2])
norm = Centroid / np.linalg.norm(Centroid)
zaxis = np.array([0,0,1])
direct = np.cross(norm,zaxis)
angle = math.acos(norm[2])
angle = angle * 180 / math.pi
Assembly.rotate(angle=angle, axisDirection=direct, axisPoint=(0.0, 0.0, 0.0), instanceList=[CskInstance.name])
Model.rootAssembly.regenerate()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # STEPS # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Preparing steps for explicit solver
Model.ExplicitDynamicsStep(improvedDtMethod=ON, timePeriod=Time,
massScaling=((SEMI_AUTOMATIC, MODEL, THROUGHOUT_STEP, 0.0, MassScale, BELOW_MIN, 1, 0, 0.0,
0.0, 0, None),),
name='Loading', previous='Initial')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # LOADS # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Create Set of fixed nodes
minz = 0
maxz = 0
for i in range(24):
tempz = CskInstance.vertices[i].pointOn[0][2]
if tempz < minz:
minz = tempz
elif tempz > maxz:
maxz = tempz
LoadNodes = []
FixedNodes = []
InternalNodes = []
for i in range(24):
node = CskInstance.vertices[i]
tempz = node.pointOn[0][2]
if tempz == minz:
FixedNodes.append(node)
elif tempz == maxz:
LoadNodes.append(node)
else:
InternalNodes.append(node)
# Reverse Force and displacement if traction
if not Compression:
Displace = - Displace
Model.TabularAmplitude(data=((0.0, 0.0), (Time/float(2), 0.5), (Time, 1)), name='LinearLoading', smooth=SOLVER_DEFAULT, timeSpan=STEP)
# Create Loads and boundary conditions
Model.DisplacementBC(name='Displacement', createStepName='Loading', region=LoadNodes, amplitude='LinearLoading', u3=-Displace, ur3=0)
Model.EncastreBC(createStepName='Initial', localCsys=None, name='FixedNodes', region=FixedNodes)
Model.DisplacementBC(name='InternalPlanes', createStepName='Loading', region=InternalNodes, amplitude='LinearLoading', ur1=0, ur2=0)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # MESH # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if not Bendable:
seedmt = 1
# Assign number of elements per microfilaments and Microtubules
CskPart.seedEdgeByNumber(edges=MtSet.edges[:], number=seedmt, constraint=FINER)
CskPart.seedEdgeByNumber(edges=MfSet.edges[:], number=1, constraint=FINER)
TrussMeshMf = mesh.ElemType(elemCode=T3D2, elemLibrary=EXPLICIT) # Microfilaments are always not bendable
if Bendable:
TrussMeshMt = mesh.ElemType(elemCode=B32, elemLibrary=EXPLICIT) # B31 Linear Beam, B32 Quadratic Beam
else:
TrussMeshMt = mesh.ElemType(elemCode=T3D2, elemLibrary=EXPLICIT)
# Assign Element type
CskPart.setElementType(regions=MfSet, elemTypes=(TrussMeshMf,))
CskPart.setElementType(regions=MtSet, elemTypes=(TrussMeshMt,))
# Generate the mesh
CskPart.generateMesh()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # JOB # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Build job name
JobName = 'Cubo'
if Bendable:
JobName = JobName + '_Bend'
if Compression:
JobName = JobName + '_Comp'
else:
JobName = JobName + '_Tract'
JobName = JobName
# Build Job
mdb.Job(name=JobName, model=model_name, description='A description',
type=ANALYSIS, atTime=None, waitMinutes=0, waitHours=0, queue=None,
memory=90, memoryUnits=PERCENTAGE, explicitPrecision=DOUBLE_PLUS_PACK,
nodalOutputPrecision=FULL, echoPrint=OFF, modelPrint=OFF,
contactPrint=OFF, historyPrint=OFF, userSubroutine='', scratch='',
resultsFormat=ODB, parallelizationMethodExplicit=DOMAIN, numDomains=cpus,
activateLoadBalancing=False, multiprocessingMode=DEFAULT, numCpus=cpus)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # HISTORY # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
Model.FieldOutputRequest(createStepName='Loading',
name='Loading', numIntervals=300, variables=PRESELECT)
Model.HistoryOutputRequest(createStepName='Loading',
name='Loading', variables=PRESELECT)
Model.rootAssembly.regenerate()

#pytest.mark.parametrize in similar tests

I'm planning to write a test for a query for the database but I wonder what is the best way to write it and can I use #pytest.mark.parametrize for this type of test, because I need to test 4 type of tests that looks very similar
def test_get_number(self):
#first create test data in the database
#then make queries
test_number = 'test_number'
data_base_records_number = self.get_records_query(number)
# data_base_records_number =
# [{'id': 1 ,
# 'number':"test_number",
# 'company':"test_company1",
# 'warehouse' : "test_warehouse1",
# 'product': "test_product1"},
# {'id': 2 ,
# 'number':"test_number",
# 'company':"test_company2",
# 'warehouse' : "test_warehouse2",
# 'product': "test_product2"},
# {'id': 3,
# 'number': "test_number",
# 'company': "test_company3",
# 'warehouse': "test_warehouse3",
# 'product': "test_product3"},
# {'id': 4,
# 'number': "test_number",
# 'company': "test_company4",
# 'warehouse': "test_warehouse4",
# 'product': "test_product4"}
# ]
assert len(data_base_records_number) == 4
for record in data_base_records:
assert record['number'] == number
def test_get_number_and_company(self):
#first create test data in the database
#then make queries
test_number = 'test_number'
company = 'test_company1'
data_base_records_number_company = self.get_records_query(number, company)
#Output
# data_base_records_number_company =
# [{'id': 1 ,
# 'number':"test_number",
# 'company':"test_company1",
# 'warehouse' : "test_warehouse1",
# 'product': "test_product1"},
#
# ]
assert len(data_base_records_number_company) == 1
assert data_base_records_number_company['number'] == number
assert data_base_records_number_company['company'] == company
def test_get_number_and_warehouse(self):
...
def test_get_number_and_product(self):
...

PYTHON convert multidimensionale list to JSON data

I'm struggling to bring my array to the right format for the JSON file! Might be an easy question for people with knowledge.
I grep the data from different CSV files in the format:
[['Dec', '196610'], ['Oct', '196699'], ['Sep', '131073'], ['Jul', '122050']]
I need to bring it in the format:
{
"Year_2021": [
{
"Dec": "196610",
"oct": "196699",
"Sep": "131073",
"Jul": "122050"
}
]
}
i have tried different possibilities and got the craziest constellations, just not the right one.
My current code:
import csv
import datetime
import time
import os.path
import json
from collections import defaultdict
sys_time = datetime.datetime.now()
format_sys_time = sys_time.strftime('[%Y-%m-%d] %H:%M:%S') # Other Datetime Format
act_month = int(sys_time.strftime('%m'))
#print act_month
output_json_file = "/var/www/json/EXP_MONTHLY_POWER_" + sys_time.strftime('%Y') + ".json"
start_time = time.time() # measure how long a programm run takes
columns = defaultdict(list) # each value in each column is appended to a list
# Help function to build dword
def word_to_dword(val_1, val_2):
result = val_1
result |= val_2 << 16
return result
# Calculate past days of the current month
dates = []
for act_month in range(act_month+1, 1, -1):
act_month -= 1
if act_month <=9:
INPUTFILE= "/var/www/csv/EXP_POWER_" + sys_time.strftime('%Y-0') + str(act_month) + ".csv"
else:
INPUTFILE= "/var/www/csv/EXP_POWER_" + sys_time.strftime('%Y-') + str(act_month) + ".csv"
dates.append(INPUTFILE)
#print(dates)
outputfields = [];
#Delete old file to avoid double entrys
if os.path.exists(output_json_file):
os.remove(output_json_file)
for dayfile in dates:
if os.path.exists(dayfile):
with open(dayfile) as csvdatei:
mvg_reader = csv.DictReader(csvdatei , delimiter=';') # read rows into a dictionary format
for row in mvg_reader: # read a row as {column1: value1, column2: value2,...}
for (fieldname, value) in row.items(): # go over each column name and value
columns[fieldname].append(value) # append the value into the appropriate list
# based on column name
outfields1=[]
for value in columns['Power_Value(kwh)']:
# print(int(value))
outfields1.append(int(value))
# Build Power Value from last values of list
if (len(outfields1) != 0):
DL_Actual_Power_Value = outfields1[-1]
else:
print("Keine Daten gefunden!")
compString = ""
compString = dayfile[28:30] # Build compString from Filename
switcher = {
"01": "Jan",
"02": "Feb",
"03": "Mar",
"04": "Apr",
"05": "May",
"06": "Jun",
"07": "Jul",
"08": "Aug",
"09": "Sep",
"10": "Oct",
"11": "Nov",
"12": "Dec"
}
outputfields.append([switcher.get(compString, "Invalid month"), str(DL_Actual_Power_Value)] )
print(outputfields)
export_json_main = {}
export_json_main.clear()
export_KEY = 'Year_' + sys_time.strftime('%Y') # Name des Keys festlegen
export_json_main[export_KEY] = []
export_json_main[export_KEY].append( outputfields )
#jsonStr = json.dumps(outputfields)
#print(jsonStr)
#for keys in (outputfields):
# print(keys[0])
#export_json_main[export_KEY].append({
# json.dumps(outputfields)
#})
with open(output_json_file, 'w') as outfile:
json.dump(export_json_main, outfile, indent=4)
This brings me the following output:
{
"Year_2021": [
[
[
"Dec",
"196610"
],
[
"Oct",
"196699"
],
[
"Sep",
"131073"
],
[
"Jul",
"122050"
]
]
]
}
not sure what your code does. However you mean something like this
arr = [['Dec', '196610'], ['Oct', '196699'], ['Sep', '131073'], ['Jul', '122050']]
d = {}
for a in arr:
d[a[0]] = a[1]
result = { "Year_2021": d }
?

Running threads in parallel takes more time then sequential execution in python

I have two ONNX deep learned models.
I want to run both the models parallelly.
I am using threads from python. But surprisingly it is taking more time then running both the models sequentially.
Task to be done.
make a class of model
load both the models in the init of that class.
run both the models parallelly for inferencing on the given input.
Is this normal behavior.
please suggest the workaround to this?
class ModelImp:
def __init__(self):
print('loading model...')
# Load your model here
curr_dir = os.getcwd()
model_path = os.path.join(curr_dir, "model", "hatev5.onnx")
self.hate_sess = onnxruntime.InferenceSession(model_path)
self.hate_input_name = self.hate_sess.get_inputs()[0].name
self.hate_seq_len=15
self.corona_seq_len=16
print('********************************Hate model loaded.**********************************************************')
model_path = os.path.join(curr_dir, "model", "corona.onnx")
self.corona_sess = onnxruntime.InferenceSession(model_path)
self.corona_input_name = self.corona_sess.get_inputs()[0].name
# self.model = keras.models.load_model(model_path, custom_objects={"gelu": gelu})
# print(self.model.summary())
print('********************************Corona model loaded.**********************************************************')
print("_________________________************MODEL.py : loading tokenizer ************___________________________")
curr_dir = os.getcwd()
vocab_path = os.path.join(curr_dir, "model", "vocab.txt")
self.wordpiece_tokenizer = tokenization.FullTokenizer(vocab_path, do_lower_case=True)
tokenizer_path = os.path.join(curr_dir, "model", "hate_tokenizer.json")
with open(tokenizer_path) as f:
data = json.load(f)
self.hate_tokenizer = tokenizer_from_json(data)
print("_________________________************ HATE MODEL.py : tokenizer loaded************___________________________")
tokenizer_path = os.path.join(curr_dir, "model", "corona_tokenizer.json")
with open(tokenizer_path) as f:
data = json.load(f)
self.corona_tokenizer = tokenizer_from_json(data)
print("_________________________************ CORONA MODEL.py : tokenizer loaded************___________________________")
curr_dir = os.getcwd()
# string version of Eval
# data is a string
def thread_eval(self,data,q):
# print("--------------------------------------corona started----------------------------------------------------------")
corona_lines = []
corona_line = ' '.join(trim(self.wordpiece_tokenizer.tokenize(data.strip()), self.corona_seq_len))
corona_lines.append(corona_line)
# print(texts)
corona_line_1 = self.corona_tokenizer.texts_to_sequences(corona_lines)
corona_line_2 = sequence.pad_sequences(corona_line_1, padding='post', maxlen=self.corona_seq_len)
corona_pred = self.corona_sess.run(None, {self.corona_input_name: corona_line_2})
corona_prob = corona_pred[0][0][1]
q.put(corona_prob)
# print("---------------------------------------corona ended------------------------------------------------------------")
def Eval(self, data):
try:
# pre_start = time.time()
# mp = ModelImp()
# with tf.Graph().as_default() as graph: #tf.device(config['gpu_device'] )
# print(data)
d = json.loads(data)
out_json = {}
if (not (("query" in d) or ("Query" in d))):
# print("Query: ",data)
score = -2 * 10000 # new_change
output = {"Output": [[score]]} # {"score" :score,"Succ" : False }
output_str = json.dumps(output)
return output_str
if ("query" in d):
query = d["query"][0] # new_change
# print("Query 1: ",query)
elif ("Query" in d):
query = d["Query"][0] # new_change
# print("Query 2: ",query)
if (len(query.strip()) == 0):
query = "good"
# print("Query 3: ",query)
## HATE MODEL input preprocess
que = queue.Queue()
x = threading.Thread(target=self.thread_eval, args=(query,que),daemon=True)
x.start()
hate_lines = []
hate_line = ' '.join(trim(self.wordpiece_tokenizer.tokenize(query.strip()), self.hate_seq_len))
hate_lines.append(hate_line)
# print(texts)
hate_line_1 = self.hate_tokenizer.texts_to_sequences(hate_lines)
hate_line_2 = sequence.pad_sequences(hate_line_1, padding='post', maxlen=self.hate_seq_len)
## CORONA MODEL input preprocess
# print(line_2)
# print("----------------------------------------hate started----------------------------------------")
hate_pred = self.hate_sess.run(None, {self.hate_input_name: hate_line_2})
# print("----------------------------------------hate ended----------------------------------------")
# print("pred: ",pred[0])
# prob = math.exp(pred[0][0][1])/(math.exp(pred[0][0][0]) + math.exp(pred[0][0][1]))
hate_prob = hate_pred[0][0][1]
# print("hate_prob: ",hate_prob)
# hate_score = int(hate_prob * 10000) # new_change
x.join()
corona_prob=que.get()
# print("pred: ",pred[0])
# prob = math.exp(pred[0][0][1])/(math.exp(pred[0][0][0]) + math.exp(pred[0][0][1]))
# print("corona_prob: ",corona_prob)
output_prob = max(corona_prob,hate_prob)
# corona_score = int(corona_prob * 10000) # new_change
output_score = int(output_prob * 10000)
output = {"Output": [[output_score]]} # {"score" :score,"Succ" : True }
output_str = json.dumps(output)
return output_str
except Exception as e:
print("Exception: ",data)
score = -3 * 10000 # new_change
output = {"Output": [[score]]} # {"score" :score,"Succ" : False }
output_str = json.dumps(output)
print(e)
return output_str

Categories