I try to read the World Coordinate System (WCS) from a FITS file using satrapy and this code:
from astropy.wcs import WCS
from astropy.io import fits
data = 'file.fits'
hdu = fits.open(data)
w = WCS(hdu[0].header)
I get the error:
WARNING: FITSFixedWarning: RADECSYS= 'ICRS '
RADECSYS is non-standard, use RADESYSa. [astropy.wcs.wcs]
The header file is:
SIMPLE = T
BITPIX = -32
NAXIS = 2
NAXIS1 = 2048
NAXIS2 = 1489
RADECSYS= 'ICRS '
CTYPE1 = 'DEC--TAN'
CTYPE2 = 'RA---TAN'
CUNIT1 = 'deg '
CUNIT2 = 'deg '
CRPIX1 = 1.02500000000000E+03
CRPIX2 = 7.45000000000000E+02
CRVAL1 = 7.34210000000000E-01
CRVAL2 = 2.49604300000000E+01
CD1_1 = 1.09999999400000E-04
CD2_2 = 1.09999999400000E-04
CD1_2 = 0.00000000000000E+00
CD2_1 = 0.00000000000000E+00
COADD_0 = 'fpCs-002570-i5-0112.resamp.fits'
COADD_1 = 'fpCs-002570-i5-0113.resamp.fits'
COADD_2 = 'fpCs-002650-i5-0142.resamp.fits'
COADD_3 = 'fpCs-002650-i5-0143.resamp.fits'
COADD_4 = 'fpCs-002677-i5-0142.resamp.fits'
COADD_5 = 'fpCs-002677-i5-0143.resamp.fits'
COADD_6 = 'fpCs-002700-i5-0032.resamp.fits'
COADD_7 = 'fpCs-002700-i5-0033.resamp.fits'
COADD_8 = 'fpCs-002728-i5-0579.resamp.fits'
COADD_9 = 'fpCs-002728-i5-0580.resamp.fits'
COADD_10= 'fpCs-002738-i5-0084.resamp.fits'
COADD_11= 'fpCs-002738-i5-0085.resamp.fits'
COADD_12= 'fpCs-002820-i5-0032.resamp.fits'
COADD_13= 'fpCs-002820-i5-0033.resamp.fits'
COADD_14= 'fpCs-002855-i5-0038.resamp.fits'
COADD_15= 'fpCs-002855-i5-0039.resamp.fits'
COADD_16= 'fpCs-002873-i5-0075.resamp.fits'
COADD_17= 'fpCs-002873-i5-0076.resamp.fits'
COADD_18= 'fpCs-003362-i5-0033.resamp.fits'
COADD_19= 'fpCs-003362-i5-0034.resamp.fits'
COADD_20= 'fpCs-003362-i5-0035.resamp.fits'
COADD_21= 'fpCs-003384-i5-0535.resamp.fits'
COADD_22= 'fpCs-003384-i5-0536.resamp.fits'
COADD_23= 'fpCs-004128-i5-0289.resamp.fits'
COADD_24= 'fpCs-004128-i5-0290.resamp.fits'
COADD_25= 'fpCs-004157-i5-0042.resamp.fits'
COADD_26= 'fpCs-004157-i5-0043.resamp.fits'
COADD_27= 'fpCs-004198-i5-0528.resamp.fits'
COADD_28= 'fpCs-004198-i5-0529.resamp.fits'
COADD_29= 'fpCs-004207-i5-0538.resamp.fits'
COADD_30= 'fpCs-004207-i5-0539.resamp.fits'
COADD_31= 'fpCs-004868-i5-0374.resamp.fits'
COADD_32= 'fpCs-004868-i5-0375.resamp.fits'
COADD_33= 'fpCs-004874-i5-0587.resamp.fits'
COADD_34= 'fpCs-004874-i5-0588.resamp.fits'
COADD_35= 'fpCs-004895-i5-0202.resamp.fits'
COADD_36= 'fpCs-004895-i5-0203.resamp.fits'
COADD_37= 'fpCs-004905-i5-0168.resamp.fits'
COADD_38= 'fpCs-004905-i5-0169.resamp.fits'
COADD_39= 'fpCs-004933-i5-0529.resamp.fits'
COADD_40= 'fpCs-004933-i5-0530.resamp.fits'
COADD_41= 'fpCs-004948-i5-0109.resamp.fits'
COADD_42= 'fpCs-004948-i5-0110.resamp.fits'
COADD_43= 'fpCs-005566-i5-0395.resamp.fits'
COADD_44= 'fpCs-005566-i5-0396.resamp.fits'
COADD_45= 'fpCs-005603-i5-0614.resamp.fits'
COADD_46= 'fpCs-005603-i5-0615.resamp.fits'
COADD_47= 'fpCs-005633-i5-0582.resamp.fits'
COADD_48= 'fpCs-005633-i5-0583.resamp.fits'
COADD_49= 'fpCs-005642-i5-0242.resamp.fits'
COADD_50= 'fpCs-005642-i5-0243.resamp.fits'
COADD_51= 'fpCs-005658-i5-0069.resamp.fits'
COADD_52= 'fpCs-005658-i5-0070.resamp.fits'
COADD_53= 'fpCs-005765-i5-0161.resamp.fits'
COADD_54= 'fpCs-005765-i5-0162.resamp.fits'
COADD_55= 'fpCs-005770-i5-0548.resamp.fits'
COADD_56= 'fpCs-005770-i5-0549.resamp.fits'
COADD_57= 'fpCs-005777-i5-0013.resamp.fits'
COADD_58= 'fpCs-005777-i5-0014.resamp.fits'
COADD_59= 'fpCs-005781-i5-0546.resamp.fits'
COADD_60= 'fpCs-005781-i5-0547.resamp.fits'
COADD_61= 'fpCs-005792-i5-0587.resamp.fits'
COADD_62= 'fpCs-005792-i5-0588.resamp.fits'
COADD_63= 'fpCs-005792-i5-0589.resamp.fits'
COADD_64= 'fpCs-005800-i5-0568.resamp.fits'
COADD_65= 'fpCs-005800-i5-0569.resamp.fits'
COADD_66= 'fpCs-005813-i5-0605.resamp.fits'
COADD_67= 'fpCs-005813-i5-0606.resamp.fits'
COADD_68= 'fpCs-005823-i5-0572.resamp.fits'
COADD_69= 'fpCs-005823-i5-0573.resamp.fits'
COADD_70= 'fpCs-005898-i5-0610.resamp.fits'
COADD_71= 'fpCs-005898-i5-0611.resamp.fits'
COADD_72= 'fpCs-005918-i5-0587.resamp.fits'
COADD_73= 'fpCs-005918-i5-0588.resamp.fits'
I have compared this to other WCS in header files and it does look different, but I am sure how to fix things. Also I am unsure how to use RADESYSa rather then RADECSYS. There does not seem to be any documentation that I can find. Any help would be appreciated.
The issue is that the keyword header should be RADESYS not RADECSYS according to the FITS standard (please report this to the people who made this FITS file). To avoid the warning, you can do:
from astropy.wcs import WCS
from astropy.io import fits
data = 'file.fits'
hdu = fits.open(data)
hdu[0].header.rename_keyword('RADECSYS', 'RADESYS')
w = WCS(hdu[0].header)
Related
I am trying to generate 3D point cloud from 4 RGB-D images. I am able to do that with Open3D but I am unable to maintain the position of the images. You can find the camera_parameters.json here.
import open3d as o3d
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import json
def load_json(path):
with open(path) as f:
return json.load(f)
def parse_camera_params(camera):
param = {}
param['camera'] = camera['camera']
param['depth'] = {}
param['depth']['fx'] = camera['K_depth'][0][0]
param['depth']['fy'] = camera['K_depth'][1][1]
param['depth']['cx'] = camera['K_depth'][0][2]
param['depth']['cy'] = camera['K_depth'][1][2]
param['depth']['K'] = camera['K_depth']
# ignore distCoeffs_depth's 5th (1000) and 6th (0) element
# since they are strange
param['depth']['distCoeffs'] = np.array(camera['distCoeffs_depth'][:5])
param['depth_width'] = camera['depth_width']
param['depth_height'] = camera['depth_height']
param['color'] = {}
param['color']['fx'] = camera['K_color'][0][0]
param['color']['fy'] = camera['K_color'][1][1]
param['color']['cx'] = camera['K_color'][0][2]
param['color']['cy'] = camera['K_color'][1][2]
param['color']['K'] = camera['K_color']
# ignore distCoeffs_color's 5th (1000) and 6th (0) element
# since they are strange
param['color']['distCoeffs'] = np.array(camera['distCoeffs_color'][:5])
param['color_width'] = camera['color_width']
param['color_height'] = camera['color_height']
# world to depth
w2d_T = np.array(camera['M_world2sensor'])
param['w2d_R'] = w2d_T[0:3, 0:3]
param['w2d_t'] = w2d_T[0:3, 3]
param['w2d_T'] = camera['M_world2sensor']
d2w_T = np.linalg.inv(w2d_T)
param['d2w_R'] = d2w_T[0:3, 0:3]
param['d2w_t'] = d2w_T[0:3, 3]
param['d2w_T'] = d2w_T
return param
if __name__ == '__main__':
data_dir = "data/"
camera_params = load_json(os.path.join(data_dir,
'camera_parameters.json'))
SVC_NUM = 4
pcd_combined = o3d.geometry.PointCloud()
for i in range(SVC_NUM):
param = parse_camera_params(camera_params['sensors'][i])
color = cv2.imread(os.path.join(data_dir, 'color_{:05d}.png'.format(i)))
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
depth = cv2.imread(os.path.join(data_dir, 'depth_{:05d}.png'.format(i)), -1)
# depth = depth * 0.001 # factor to scale the depth image from carla
o3d_color = o3d.geometry.Image(color)
o3d_depth = o3d.geometry.Image(depth)
rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format(o3d_color, o3d_depth, False)
h, w = depth.shape
dfx, dfy, dcx, dcy = param['depth']['fx'], param['depth']['fy'], param['depth']['cx'], param['depth']['cy']
intrinsic = o3d.camera.PinholeCameraIntrinsic(w, h, dfx,dfy, dcx, dcy)
intrinsic.intrinsic_matrix = param['depth']['K']
cam = o3d.camera.PinholeCameraParameters()
cam.intrinsic = intrinsic
cam.extrinsic = np.array(param['w2d_T'])
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, cam.intrinsic, cam.extrinsic)
o3d.io.write_point_cloud("svc_{:05d}_v13.pcd".format(i), pcd)
pcd_combined += pcd
o3d.io.write_point_cloud("svc_global_v13.pcd", pcd_combined)
With the above code I am getting output of svc_global_v13.pcd like below
As you can see, all the images are projected into center. As indicated in the json file, I would like the images to be positioned as left, right, front and rear in the 3D point cloud.
May I know what is it I am missing here?
I'm trying to create a network connection graph with import networkx as nx and from pyvis.network import Network. The code is as follows
rules = pd.read_csv("EDMV.conexiones_neg.20220613.apriori.base2.txt", sep = "|")
G = nx.from_pandas_edgelist(rules, source = "desc.x", target = "desc.y", edge_attr = "lift")
net = Network(notebook = True, width=1000, height=600)
net.from_nx(G)
When I run the finale line I get an AssertionError.
Any ideas what is causing error?
All you need to do is to make sure that your nodes label type are either integer or string (in your case 'desc.x' and 'desc.y' columns).
try the following:
rules = pd.read_csv("EDMV.conexiones_neg.20220613.apriori.base2.txt", sep = "|")
rules["desc.x"] = rules["desc.x"].astype(str)
rules["desc.y"] = rules["desc.y"].astype(str)
G = nx.from_pandas_edgelist(rules, source = "desc.x", target = "desc.y", edge_attr = "lift")
net = Network(notebook = True, width=1000, height=600)
net.from_nx(G)
I have the following development which I'm working with the ElementTree and Pandas module in Python:
import xml.etree.ElementTree as ET
import pandas as pd
file_xml = ET.parse('example1.xml')
rootXML = file_xml.getroot()
def transfor_data_atri(rootXML):
file_xml = ET.parse(rootXML)
data_XML = [
{"Name": signal.attrib["Name"],
# "Value": signal.attrib["Value"]
"Value": int(signal.attrib["Value"].split(' ')[0])
} for signal in file_xml.findall(".//Signal")
]
signals_df = pd.DataFrame(data_XML)
extract_name_value(signals_df)
def extract_name_value(signals_df):
#print(signals_df)
signal_ig_st = signals_df[signals_df.Name.isin(["Status"])]
row_values_ig_st = signal_ig_st.T
vector_ig_st = row_values_ig_st.iloc[[1]]
signal_nav_DSP_rq = signals_df[signals_df.Name.isin(["SetDSP"])]
row_values_nav_DSP_rq = signal_nav_DSP_rq.T
vector_nav_DSP_rq = row_values_nav_DSP_rq.iloc[[1]]
signal_HMI_st = signals_df[signals_df.Name.isin(["HMI"])]
row_values_HMI_st = signal_HMI_st.T
vector_HMI_st = row_values_HMI_st.iloc[[1]]
signal_delay_ac = signals_df[signals_df.Name.isin(["Delay"])]
row_values_delay_ac = signal_delay_ac.T
vector_delay_ac = row_values_delay_ac.iloc[[1]]
signal_AutoConfigO_Rear = signals_df[signals_df.Name.isin(["AutoConfigO_Rear"])]
row_values_AutoConfigO_Rear = signal_AutoConfigO_Rear.T
vector_AutoConfigO_Rear = row_values_AutoConfigO_Rear.iloc[[1]]
signal_ACO_Front = signals_df[signals_df.Name.isin(["AutoConfigO_Front"])]
row_values_ACO_Front = signal_ACO_Front.T
vertor_ACO_Front = row_values_ACO_Front.iloc[[1]]
signal_ACO_Drvr = signals_df[signals_df.Name.isin(["AutoConfigO_Drvr"])]
row_values_ACO_Drvr = signal_ACO_Drvr.T
vector_ACO_Drvr = row_values_ACO_Drvr.iloc[[1]]
signal_ACO_Allst = signals_df[signals_df.Name.isin(["AutoConfigO_Allst"])]
row_values_ACO_Allst = signal_ACO_Allst.T
vector_ACO_Allst = row_values_ACO_Allst.iloc[[1]]
signal_RURRq_st = signals_df[signals_df.Name.isin(["RUResReqstStat"])]
row_values_RURRq_st = signal_RURRq_st.T
vector_RURRq_st = row_values_RURRq_st.iloc[[1]]
signal_RURqSy_st = signals_df[signals_df.Name.isin(["RUReqstrSystem"])]
row_values_RURqSy_st = signal_RURqSy_st.T
vector_RURqSy_st = row_values_RURqSy_st.iloc[[1]]
signal_RUAudS_st = signals_df[signals_df.Name.isin(["RUSource"])]
row_values_RUAudS_st = signal_RUAudS_st.T
vector_RUAudS_st = row_values_RUAudS_st.iloc[[1]]
signal_DSP_st = signals_df[signals_df.Name.isin(["DSP"])]
row_values_DSP = signal_DSP.T
vector_DSP = row_values_DSP.iloc[[1]]
print('1: ', vector_ig_st)
print('2: ', vector_nav_DSP_rq)
print('3: ', vector_HMI_st)
print('4: ', vector_delay_ac)
The output of the above is the following, they are the first 4 prints and it is fine, because it is what they want, but I have to simplify the code, so that any type of xml file of the type example.xml, can be read not only example1.xml:
The simplified code is required to bring the data as it is in the names_list variable, but not to use this variable, which is actually hard-coded:
names_list = [
'Status', 'SetDSP', 'HMI', 'Delay', 'AutoConfigO_Rear',
'AutoConfigO_Front', 'AutoConfigO_Drvr','AutoConfigO_Allst',
'RUResReqstStat', 'RUReqstrSystem', 'RUSource', 'DSP'
]
So when the client wants to put another XML file with the same structure, but with other names that are not in the code, it can read them without problem. Beforehand thank you very much.
I hope I'm understanding the questions correctly. my understanding is that
you want to dynamically produce the extract_name_value() function, and make it not as bulky in your code.
Im sorry, but I failed to comprehend the for i in signal_name: print(i) part of the question. perhaps you can rephrase the question, and help me understand?
my solution to the extract_name_value() part would be using the exec() function.
it is a built-in solution for dynamic execution.
name_list = ['Status', 'SetDSP', 'HMI', 'Delay', 'AutoConfigO_Rear',
'AutoConfigO_Front', 'AutoConfigO_Drvr', 'AutoConfigO_Allst',
'RUResReqstStat', 'RUReqstrSystem', 'RUSource', 'DSP']
def _build_extract_name_value_func(name_list):
extract_name_value_func = ""
for name in name_list:
holder_func = f"""
signal_{name} = signals_df[signals_df.Name.isin([{name}])]
row_values_{name} = signal_{name}.T
vector_{name} = row_values_{name}.iloc[[1]]
vector_list.append(vector_{name})
"""
extract_name_value_func += holder_func
return extract_name_value_func
def extract_name_value(name_list):
extract_name_value_func = build_extract_name_value_func(name_list)
exec(extract_name_value_func)
the code was not tested with actual data, because I am not familiar with handling xml structures. But I hope the python part can be some help to you.
I was able to solve it, I used a for loop and iterated the dataframe itself:
for i in signals_df.Name:
signal = signals_df [signals_df.Name.isin ([i])]
row_values = signal.T
vector = row_values.iloc [[1]]
print (vector)
I m trying to extract energy at each integration point in Abaqus. I can do it for stresses or strains but i cant do for the energetical quantities. The obtained error is : “KeyError: 'ELEN'”, but in Abaqus it is the good keyword… Below it is my code to extract it :
from odbAccess import *
import numpy as np
odb = openOdb(path='C:/Desktop/Fish1.odb')
# lastFrame = odb.steps['Step-2'].frames[-1]
lastFrame = odb.steps['Step-1'].frames[-1]
topCenter = \
odb.rootAssembly.instances['PART-1-1']
stressField = lastFrame.fieldOutputs['ELEN']
field = stressField.getSubset(region=topCenter,
position=INTEGRATION_POINT, elementType = 'CPS3')
fieldValues = field.values
sortie = open('C:/Users/tests.txt', 'w')
sortie.write('Eleme \t Integ \t\t PE11 \t\t\t PE22 \t\t\t PE12 \n')
for v in fieldValues:
sortie.write('%-10.2f'% ( v.elementLabel))
if v.integrationPoint:
sortie.write('%-10.2f'% (v.integrationPoint))
sortie.write('%-10.3f\t\t %-10.3f\t\t %-10.3f\t\t %-10.3f\t\t \n'% (v.data[0], v.data[1], v.data[2], v.data[3]))
sortie.close()
I guess you have already checked in Abaqus Viewer whether the FieldOutput ELEN is available there.
ELEN is a whole element variable, so you can't extract it at integration points, because it is not available there.
from odbAccess import *
import numpy as np
odb = openOdb(path='C:/Desktop/Fish1.odb')
lastFrame = odb.steps['Step-1'].frames[-1]
topCenter = odb.rootAssembly.instances['PART-1-1']
stressField = lastFrame.fieldOutputs['ELEN']
field = stressField.getSubset(region=topCenter, elementType = 'CPS3')
fieldValues = field.values
Even though it is not really the solution you asked for, i hope this will help.
Here is my code:
n = 100000 #This is what makes it tricky - lots of files going into this hdf5 file
with h5py.File('image1.h5','w') as f:
dset_X = f.create_dataset('X',(1,960,224,224),maxshape=(None,960,224,224),chunks=True,compression='gzip')
dset_y = f.create_dataset('y',(1,112,224*224),maxshape=(None,112,224*224),chunks=True,compression='gzip')
n_images = 0
for fl in files[:n]:
X_chunk,y_chunk = get_arrays(fl)
dset_X.resize(n_images+1,axis=0)
dset_y.resize(n_images+1,axis=0)
print dset_X.shape,dset_y.shape
dset_X[n_images:n_images+1,:,:,:]=X_chunk
dset_y[n_images:n_images+1,:,:]=y_chunk
n_images+=1
This works fine and dandy. However, with 1 file, the size of the hdf5 is 6.7MB. With 2 files its 37MB ( should be 12 MB right?). With 10 its all the way up to 388MB (should be 67 right?)
So clearly adding the compression flag to the end of the 2nd and third line isn't working as intended. How can I achieve something like this?
I ended up doing this successfully using pytables.
def get_arrays(each_file):
lab = color.rgb2lab(io.imread(each_file))
X = lab[:,:,:1]
y = lab[:,:,1:]
X_rows,X_columns,X_channels=X.shape
y_rows,y_columns,y_channels=y.shape
X_channels_first = np.transpose(X,(2,0,1))
X_sample = np.expand_dims(X_channels_first,axis=0)
X_3d = np.tile(X_sample,(1,3,1,1))
X_3d_scaled = X_3d * 255.0/X_3d.max()
hc = extract_hypercolumn(model,[3,8,15,22],X_3d_scaled)
hc_scaled = (hc -hc.min())/(hc.max()-hc.min())
print hc_scaled.max(),hc_scaled.min()
hc_expand_dims = np.expand_dims(hc_scaled,axis=0)
y_reshaped = np.reshape(y,(y_rows*y_columns,y_channels))
classed_pixels_first = KNN.predict_proba(y_reshaped)
classed_classes_first = np.transpose(classed_pixels_first,(1,0))
classed_expand_dims = np.expand_dims(classed_classes_first,axis=0)
print "hypercolumn shape: ",hc_expand_dims.shape,"classified output color shape: ",classed_expand_dims.shape
return hc_expand_dims,classed_expand_dims
filters = tables.Filters(complevel=5, complib='zlib')
with tables.openFile('raw.h5','w') as f:
# filters = tables.Filters(complib='blosc', complevel=5)
dset_X = f.create_earray(f.root, 'X', tables.Atom.from_dtype(np.dtype('Float64')), (0,960,224,224),filters=filters)
dset_y = f.create_earray(f.root, 'y', tables.Atom.from_dtype(np.dtype('Float64')), (0,112,224*224),filters=filters)
for fl in files[0:12000]:
X_chunk,y_chunk=get_arrays(fl)
dset_X.append(X_chunk)
dset_y.append(y_chunk)