pandas data-frame continuously update - python

please see the pandas based Patten scanner, here i am using csv as data source and loading the same in to data.
since data is loading from csv file, i have to reload/rerun the script every 5 min to read the updated csv file hence repeating the plot every 5min.
is there any way to use df.update to avoid reloading of the script and prevent the reloading of plot again and again.
import pandas as pd
import numpy as np
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
from harmonic_functions import *
import uuid
from csv import DictReader
data = pd.read_csv('temp.csv')
data.time = pd.to_datetime(data.time,format='%d.%m.%Y %H:%M:%S.%f')
data.index = data['time']
# data = data.drop_duplicates(keep=False)
price = data.close.copy()
err_allowed = 10.0/100
pnl = []
trade_dates=[]
correct_pats=0
pats=0
# plt.ion()
for i in range (100,len(price)):
current_idx,current_pat,start,end = peak_detect(price.values[:i],order=7)
XA = current_pat[1] - current_pat[0]
AB = current_pat[2] - current_pat[1]
BC = current_pat[3] - current_pat[2]
CD = current_pat[4] - current_pat[3]
moves = [XA,AB,BC,CD]
gart = is_gartley(moves,err_allowed)
butt = is_butterfly(moves,err_allowed)
bat = is_bat(moves,err_allowed)
crab = is_crab(moves,err_allowed)
shark = is_shark(moves,err_allowed)
trio = is_trio(moves,err_allowed)
cyph = is_cyph(moves,err_allowed)
three_dives = is_3dives(moves, err_allowed)
fivezero = is_50(moves, err_allowed)
altbat = is_altbat(moves, err_allowed)
deepcrab = is_deepcrab(moves, err_allowed)
dragon = is_dragon(moves, err_allowed)
snorm = is_snorm(moves, err_allowed)
harmonics = np.array([gart,butt,bat,crab,shark,trio,cyph,three_dives,fivezero,altbat,deepcrab,dragon,snorm])
labels = ['Garterly','Butterfly','Bat','Crab','Shark','Trio','Cypher','3Dives','5Zero','AltBat','DeepCrab','Dragon','Snorm']
if np.any(harmonics == 1) or np.any(harmonics == -1):
for j in range (0,len(harmonics)):
if harmonics[j] == 1 or harmonics[j]==-1:
pats+=1
sense = 'Bearish ' if harmonics[j]==-1 else 'Bullish '
label = sense + labels[j] + ' found'
print(label)
print(price.values[start])
plt.title(label)
plt.plot(np.arange(start,i+5),price.values[start:i+5])
plt.scatter(current_idx,current_pat,c='r')
filename = str(uuid.uuid1())[:8]
print(current_pat)
print(current_idx)
# with open('temp.csv', mode='r') as csv_file:
# file = DictReader(csv_file, delimiter=',')
# close = str(current_pat[4])
# print(current_pat)
# rows = [row for row in file if row['close'] in close]
# closetime = rows[-1]['ID']
# print(closetime)
write1 = str(current_idx)
write2 = str(current_pat)
write = write1 + ',' + write2
print(write)
with open("datadb", "r+") as file:
for line in file:
if write in line:
break
else: # not found, we are at the eof
file.write(f"{write}\n") # append missing data
print(filename)
plt.savefig(filename)
plt.close(filename)
# plt.show()
plt.clf()

Related

Data from three .txt files - loop together to one, and make a plot?

I want to end up with a scatterplot that differentiates color between different values.
First i need to analyze my data. Problem is i have a FE-Model, that exports element numbers coupled with 4 nodes. These 4 nodes have 4 coordinate sets, but if 4 elements share 1 node, it will only give 1 coordinate set for these 4 nodes.
I end up with three .txt files.
.txt with element number (and data i am analyzing for the plot)
.txt with the element number and node numbers.
.txt with node coordinates.
Is it possible to make a loop that connects these datapoints?
I would like to include an example, but i have not yet made one.
I have tried something like this
from numpy import loadtxt
from fpdf import FPDF
Sek = ['Sektion_100']
I = 0
HeaderY =['nr.','LC','Element nr.','Myy','Nyy','MRd_y','URy']
HeaderX =['nr.','LC','Element nr.','Mxx','Nxx','MRd_x','URx']
#load the excel file
#header = loadtxt"Loads\Area_233.txt", unpack=False, skiprows=1)
pdf = FPDF(orientation = 'P', unit = 'mm', format = 'A4')
MaxURx =[]
MaxURy =[]
data = loadtxt("Loads/Sektion_150.txt", unpack=False, skiprows=1)
nr = data[:,1]
Mxx = data[:,2]
Nxx = -data[:,4]
Myy = data[:,3]
Nyy = -data[:,5]
topologi = loadtxt("Loads/Sektion_150_topologi.txt", unpack=False, skiprows=1)
nr1 = topologi[:,0]
node1 = topologi[:,1]
node2 = topologi[:,2]
node3 = topologi[:,3]
node4 = topologi[:,4]
knuder = loadtxt("Loads/Sektion_150_knuder.txt", unpack=False, skiprows=1)
nr2 = knuder[:,0]
x = knuder[:,1]
y = knuder[:,2]
z = knuder[:,3]
Picture of dataset
I have included a picture of my dataset here. In "Sektion_150_Knuder" NR = Node number.
I hope anyone have some pointers in the right direction to solve this problem.
I found the answer.
import xlwings as xw
import matplotlib.pyplot as plt
from math import pi
from numpy import loadtxt
import numpy as np
from fpdf import FPDF
import matplotlib as mpl
from matplotlib.ticker import ScalarFormatter
Sek = ['Sektion_100','Sektion_110','Sektion_120','Sektion_130','Sektion_140','Sektion_150']
I = 0
HeaderY =['nr.','LC','Element nr.','Myy','Nyy','MRd_y','URy']
HeaderX =['nr.','LC','Element nr.','Mxx','Nxx','MRd_x','URx']
#load the excel file
#header = loadtxt"Loads\Area_233.txt", unpack=False, skiprows=1)
pdf = FPDF(orientation = 'P', unit = 'mm', format = 'A4')
MaxURx =[]
MaxURy =[]
Elem = np.array(loadtxt("Element/Sektion_100_elements.txt", unpack=False, skiprows=1))
Node = np.array(loadtxt("Element/Sektion_100_nodes.txt", unpack=False, skiprows=1))
#Elem = np.array(loadtxt("Element/"+Sek+"_elements.txt", unpack=False, skiprows=1))
#Node = np.array(loadtxt("Element/"+Sek+"_nodes.txt", unpack=False, skiprows=1))
data = loadtxt("Loads/Sektion_100.txt", unpack=False, skiprows=1)
Mxx = data[:,2]
Nxx = -data[:,4]
Myy = data[:,3]
Nyy = -data[:,5]
R1x = []
R2x = []
MRdx = []
URx = []
R1y = []
R2y = []
MRdy = []
URy = []
min_nx = int(round(max(min(Nxx),-300),-1)-10)
max_nx = int(round(max(Nxx),-1)+10)
min_ny = int(round(min(Nyy),-1)-10)
max_ny = int(round(max(Nyy),-1)+10)
xrange = range(min_nx, max_nx+50, round((max_nx+50-min_nx)/20))
yrange = range(min_ny, max_ny+50, round((max_ny+50-min_ny)/20))
x2 =range(0,len(data),1)
wbx =xw.Book("Capacity\\Sektion_100_L.xlsm", None, True)
sht_x1 = wbx.sheets["Beregning"]
for i in xrange:
kx = sht_x1.range("N25").value = i
Q1x = sht_x1["AV24"].value
Q2x = sht_x1["BC24"].value
R1x+=[Q1x]
R2x+=[Q2x]
for i in x2:
if Myy[i] <= 0:
mrdx = np.interp(Nxx[i],xrange,R1x)
urx = Mxx[i] / mrdx
else:
mrdx = np.interp(Nxx[i],xrange,R2x)
urx = Mxx[i] / mrdx
MRdx += [round(mrdx,2)]
URx += [round(urx,2)]
TabelX=np.c_[data[:,[0,1,2,4]],MRdx,URx]
sort_tabelX = np.flipud(TabelX[TabelX[:,5].argsort()])
LimX = 25
for i in x2:
if sort_tabelX[i,5] > 1.05 :
LimX = i+2
else:
break
LimX = max(25,LimX)
TABx=np.c_[list(range(1,LimX+1)),sort_tabelX[0:LimX,:]]
TABx2 = np.unique(TABx[:,2])
print(TABx2)
print(len(TABx2))
#OUE=np.array(TABx2)
#np.savetxt("array1.txt", TABx2)
# %%
NumOUE = len(TABx2)
NumElem = len(Elem)
EleRange = range(0,NumElem)
OUERange = range(0,NumOUE)
EO = np.searchsorted(Elem[:,0], TABx2)
print(EO)
EleCorOx =[]
EleCorOy =[]
EleCorOz =[]
EleCorUx =[]
EleCorUy =[]
EleCorUz =[]
for i in EleRange:
Na = np.searchsorted(Node[:,0],Elem[i,1])
Nb = np.searchsorted(Node[:,0],Elem[i,2])
Nc = np.searchsorted(Node[:,0],Elem[i,3])
Nd = np.searchsorted(Node[:,0],Elem[i,4])
print(Na,Nb,Nc,Nd)
if i in EO:
EleCorOx += [(Node[Na,1] + Node[Nb,1] + Node[Nc,1] + Node[Nd,1])/4]
EleCorOy += [(Node[Na,2] + Node[Nb,2] + Node[Nc,2] + Node[Nd,2])/4]
EleCorOz += [(Node[Na,3] + Node[Nb,3] + Node[Nc,3] + Node[Nd,3])/4]
else:
EleCorUx += [(Node[Na,1] + Node[Nb,1] + Node[Nc,1] + Node[Nd,1])/4]
EleCorUy += [(Node[Na,2] + Node[Nb,2] + Node[Nc,2] + Node[Nd,2])/4]
EleCorUz += [(Node[Na,3] + Node[Nb,3] + Node[Nc,3] + Node[Nd,3])/4]
fig = plt.figure()
fig.set_size_inches(20,10)
ax = fig.add_subplot(projection='3d')
ax.scatter3D(EleCorUx,EleCorUy,EleCorUz,color = 'Blue')
ax.scatter3D(EleCorOx,EleCorOy,EleCorOz,color = 'red')
ax.set_zlim(0,27000)
plt.show()
This code is for showing Sektion 100. Small changes gives me the plots for 110, 120, 130 and so on.
If anyone can use it.

Add one more value to csv from text file in python

I am converting multiple text files to a csv file. My text file looks like this:
ITEM: TIMESTEP
55000
ITEM: NUMBER OF ATOMS
4365
ITEM: BOX BOUNDS ff ff ff
-0.3 0.3
-0.6 0.6
-0.6 0.6
ITEM: ATOMS id type x y z vx vy vz fx fy fz omegax omegay omegaz radius
4356 1 -0.0885288 -0.0101421 -0.48871 -0.000941682 0.778688 -0.0153902 -0.00720861 -0.0533703 0.0104717 0.35581 -0.0601358 -0.436049 0.01
4227 1 0.0157977 0.00542603 -0.488429 -0.00996111 0.784119 0.00813807 -0.000491847 0.0144889 -0.0120111 1.08208 -0.0671177 0.369492 0.01
3973 1 0.0179724 0.0256167 -0.48799 -0.00582994 0.772455 0.0394544 0.0109589 -0.0187232 -0.00111718 -0.0586513 -0.162943 1.12784 0.01
4300 1 0.0900919 0.0248592 -0.488025 -0.000455483 0.769978 0.0388239 -0.00364509 0.0409803 -0.00269227 3.94355 -0.0249566 -0.223111 0.01
4200 1 -0.0230223 0.0329911 -0.483108 -0.00238 0.778547 0.0500186 0.0421189 -0.021588 0.05607 0.112989 -0.0813771 -1.09981 0.015
4339 1 0.00143577 0.0368542 -0.488107 0.000587848 0.784672 0.0593572 0.00385562 -0.00475113 -0.00710483 -0.201196 0.158512 -5.63826 0.01
4106 1 0.0648392 0.0269728 -0.483248 -0.00365836 0.766081 0.0395827 0.0418642 0.1802 0.0547313 -0.0578358 0.124205 -0.96464 0.015
4104 1 -0.084453 0.0507114 -0.482726 -0.000596577 0.75636 0.0806599 0.000817826 0.0119286 -0.0150014 -0.0864852 -0.103877 0.198773 0.015
Right now my csv file contains value after line 9 (in python code line 8).
I want to include line 2 (Header - TIMESTEP) also in csv along with all the values after 9.
I tried to edit my code but couldn't succeed. Can I get some help:
My code is here:
import numpy as np
import pandas as pd
import csv
import glob
import time
def main():
start = time.time()
data_folder = "./all/" #folder name
files = glob.glob(data_folder + '*dump*.data')
print("Total files:", len(files))
# get header from one of the files
#header = []
with open('all/dump46000.data', 'r') as f:
#lines = f.readlines()
for _ in range(8):
next(f) # skip first 8 lines
header = ','.join(f.readline().split()[2:]) + '\n'
headers = ','.join(f.readline().split()[2:])
#header.append(headers)
#header.append('timestep')
print(header)
for file in files:
with open(file, 'r') as f, open(f'all.csv', 'a') as g: # note the 'a'
g.write(header) # write the header
for _ in range(9):
next(f) # skip first 9 lines
for line in f:
g.write(line.rstrip().replace(' ', ',') + '\n')
print(time.time() - start)
if __name__ == "__main__":
main()
My folder all contains more than 600 files:
['./all/dump501000.data',
'./all/dump307000.data',
'./all/dump612000.data',
'./all/dump369000.data',
'./all/dump23000.data',
'./all/dump470000.data',
'./all/dump235000.data',
'./all/dump6000.data',
'./all/dump568000.data',
'./all/dump506000.data',
'./all/dump623000.data',
'./all/dump329000.data',
'./all/dump220000.data',
.....................
....................
I want this csv file from text file:
id type x y z vx vy vz fx fy fz omegax omegay omegaz radius TIMESTEP
But I am getting this csv
id type x y z vx vy vz fx fy fz omegax omegay omegaz radius
Thank you
enter code hereHere is something you can try to add TIMESTEP with your data in csv. I am just wondering if you need to print the header for each file. My understanding is you can print header at the top for once. If you want to print that for each file, bring it into the for loop.
import numpy as np
import pandas as pd
import csv
import glob
import time
def main():
start = time.time()
data_folder = "./all/" #folder name
files = glob.glob(data_folder + '*dump*.data')
print("Total files:", len(files))
# get header from one of the files
header = []
with open('all/dump46000.data', 'r') as f:
#lines = f.readlines()
header.extend(f.readline().split()[1:])
timeStep = f.readline().split()
for _ in range(6):
next(f) # skip first 8 lines
header.extend(f.readline().split()[2:])
a = True
print(header)
headerString = ','.join(header)
for file in files:
with open(file, 'r') as f, open(f'all.csv', 'a') as g: # note the 'a'
next(f)
g.write(headerString+ '\n') # write the header
timeStep = f.readline().split()
for _ in range(7):
next(f)
for line in f:
file_line = line.split()
file_line.insert(0,timeStep[0])
data = ','.join(file_line)
g.write(data + '\n')
print(time.time() - start)
if __name__ == "__main__":
main()
Based on what you want, here's what should work
import numpy as np
import pandas as pd
import csv
import glob
import time
def main():
start = time.perf_counter()
data_folder = "./all/" #folder name
files = glob.glob(data_folder + '*dump*.data')
print("Total files:", len(files))
for file in files:
with open(file, 'r') as f, open(f'all.csv', 'a') as g: # note the 'a'
header = f.readline().split("ITEM: ")[1] + '\n'
headers = f.readline()
print(header)
g.write(header)
g.write(headers)
for _ in range(6):
next(f)
for line in f:
g.write(line.rstrip().replace(' ', ',') + '\n')
print(time.perf_counter() - start)
if __name__ == "__main__":
main()
Let me know if you need any other syntax or something else in the final CSV.
Also to time something always use time.perf_counter it's more accurate.

National Instruments USB X-SERIES with Python-nidaqmx

I am trying to drive 4 independent channels using the python nidaqmx module and the NI X-series 6341 (PN: 781438-01). I have 2 analogue outputs and two digital outputs and I would like all these streams independent of each other. For some reason when I execute the code only my 2 analogue outs and digital out on line 0 fire. I do not see the digital stream on line 1. Does anyone know what may be going on here? I've tried it with another box and get the same behaviour so I don't think its hardware related. Here is the code:
import nidaqmx
from numpy import array
from nidaqmx import stream_writers
import numpy as np
from tkinter import filedialog
from numpy import genfromtxt
import pandas as pd
from nidaqmx.constants import LineGrouping
Devs = []
system = nidaqmx.system.System.local()
print(system.driver_version)
for device in system.devices:
dev = str(device).split('=')[1].split(')')[0]
Devs.append(dev)
print(device)
print(dev)
def detectDelimiter(csvFile):
with open(csvFile, 'r') as myCsvfile:
header=myCsvfile.readline()
if header.find(";")!=-1:
return ";"
if header.find(",")!=-1:
return ","
if header.find("\t")!=-1:
return "\t"
My_Data = []
My_Data_unscaled = []
def load_data():
file_path = filedialog.askopenfilename()
delim = detectDelimiter(file_path)
my_data = genfromtxt(file_path, delimiter=delim)
if len (My_Data) > 0 :
print('Deleting Data in the buffer...')
My_Data.clear()
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
else:
#original_data = my_data
#My_Data = []
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
load_data()
look = My_Data[0]
e_dataframe = pd.DataFrame(look)
v_step = 20/2**16
e_dataframe[0] = e_dataframe[0]*v_step
e_dataframe[1] = e_dataframe[1]*v_step
samples_new = [e_dataframe[1].T,e_dataframe[0].T]
samples_new = array(samples_new)
dig_samples_new = [e_dataframe[2].T,e_dataframe[2].T]
dig_samples_new = array(dig_samples_new)
dig_samples_new[0,0] = 1
dig_samples_new[0,0] = 1
dig_samples_new[1,0] = 1
def fire_galvos(dev,rate,dig_las):
#define channels
channel1 = dev +'/' + 'ao0' # laser trigger
channel2 = dev + '/' + 'ao1' # this is the auxillary trigger signal
channel3 = dev + '/line0'
channel4 = dev + '/line1'
#define clock
sample_clock = '/'+dev+'/ao/SampleClock'
with nidaqmx.Task() as analog_output, nidaqmx.Task() as digital_output:
dig_las = np.uint32(dig_las)
#add channels
analog_output.ao_channels.add_ao_voltage_chan(channel1,'mychannel1',-10,10)
analog_output.ao_channels.add_ao_voltage_chan(channel2,'mychannel2',-10,10)
digital_output.do_channels.add_do_chan(channel3, 'mychannel3')
digital_output.do_channels.add_do_chan(channel4, 'mychannel4')
#digital_output.do_channels.add_do_chan(channel4, 'mychannel4',line_grouping=LineGrouping.CHAN_PER_LINE)
#digital_output.ao_load_impedance = 50
#define clock timings
analog_output.timing.cfg_samp_clk_timing(rate=rate, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan = len(samples_new[0]))
digital_output.timing.cfg_samp_clk_timing(rate=rate, source = sample_clock, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan=len(dig_samples_new[0])) #source=sample_clock,
#writing commands
writer_ana = stream_writers.AnalogMultiChannelWriter(analog_output.out_stream, auto_start=False)
writer_dig = stream_writers.DigitalMultiChannelWriter(digital_output.out_stream,auto_start=False)
#writer_dig = stream_writers.DigitalSingleChannelWriter(digital_output.out_stream,auto_start=False)
writer_ana.write_many_sample(samples_new)
writer_dig.write_many_sample_port_uint32(dig_las)
digital_output.start()
analog_output.start()
digital_output.wait_until_done(timeout=60)
analog_output.wait_until_done(timeout=60)
fire_galvos(dev,3000,dig_samples_new)```

Extra Comma in specific data field using pandas

I am combining very large data sets using python. The script works completely fine. However there is one specific row that may or may not have a comma in side of it. Does anyone know how to remove the comma? FYI this is how the data is collected it cannot be removed on collection. The field that it is in is the ["NAME'] field.
I have tried to implement a sep=r',(?!\s)' look ahead and that screws my data up even more
THANKS!
import csv
import shutil
import os
import pandas as pd
from os import path
def combinecsv(source_folder):
all_files = os.listdir(source_folder)
master_df = None
for anyfile in all_files:
if anyfile.lower().endswith(".csv"):
file_path = path.join(source_folder, anyfile)
print("opening file path: {}".format(file_path))
df = pd.read_csv(file_path)
if master_df is None:
master_df = df
else:
master_df = master_df.append(df)
new_df = pd.DataFrame()
new_df["MSG_TYPE"] = master_df["MSG_TYPE"]
new_df["MMSI"]= master_df["MMSI"]
new_df["NAME"]= master_df.apply(lambda row: check_for_none(row["NAME"]), axis = 1)
new_df["LAT_AVG"]= master_df["LAT_AVG"]
new_df["LON_AVG"]= master_df["LON_AVG"]
new_df["PERIOD"]= master_df.apply(lambda row: convert_period(row["PERIOD"]),axis = 1)
new_df["SPEED_KNOTS"]= master_df.apply(lambda row: check_for_none(row["SPEED_KNOTS"]), axis = 1)
new_df["COG_DEG"]= master_df.apply(lambda row: check_for_none(row["COG_DEG"]), axis = 1)
new_df["SHIP_AND_CARGO_TYPE"]= master_df.apply(lambda row: check_for_none(row["SHIP_AND_CARGO_TYPE"]), axis = 1)
new_df["DRAUGHT"]= master_df.apply(lambda row: check_for_none(row["DRAUGHT"]), axis = 1)
new_df["LEN"]= master_df.apply(lambda row: combine_bowstern(row["DIM_BOW"],row["DIM_STERN"]), axis = 1)
# axis traverses rows not columns
new_folder = path.join(source_folder, "output")
if not path.exists(new_folder):
os.mkdir(new_folder)
new_csvpath = path.join(new_folder, "output.csv")
print("saving csv to {}".format(new_csvpath))
new_df.to_csv(new_csvpath, index=False, quoting = csv.QUOTE_NONNUMERIC)
def check_for_none(df):
if (df) == 'None':
return ""
else:
return (df)
def convert_period(period):
y = str(period[2:4])
m = str(period[5:7])
d = str(period[8:10])
t = str(period[11:16])
periodnewformat = "{}/{}/{} {}".format(d,m,y,t)
return periodnewformat
def combine_bowstern(bow, stern):
bow_int = 0
stern_int = 0
if bow !="None":
bow_int = int(bow)
if stern !="None":
stern_int = int(stern)
return bow_int + stern_int
if __name__ == "__main__":
source_folder = r'C:\Users\MTTA Standalone\Desktop\Code\csvcombine'
combinecsv(source_folder)
Here is a sample of with and without comma data set:
MSG_TYPE,MMSI,NAME,IMO_NUMBER,CALL_SIGN,LAT_AVG,LON_AVG,PERIOD,SPEED_KNOTS,COG_DEG,HEADING_DEG,NAV_STATUS,NAV_SENSOR,SHIP_AND_CARGO_TYPE,DRAUGHT,DIM_BOW,DIM_STERN,DIM_PORT,DIM_STARBOARD,MMSI_COUNTRY_CD,RECEIVER
1,249830000,ZIM LUANDA,9403229,9HA2029,37.825850,-74.340755,2018-08-01 00:00:00.000,11.5,196.4,198,0,1,71,10.9,197,63,21,11,MT,D05MN-HR-CHIBS1
1,256819000,IOLCOS, DESTINY,9486049,9HA2936,36.833089,-75.672449,2018-08-01 00:00:00.000,9.7,93.1,95,0,1,70,14.4,199,30,13,24,MT,D05MN-NC-MAMBS1

mayavi surf() how do I show colorbar?

I can not figure out how to get the colorbar to show. I don't have much experience using mayavi, so I'm not sure what steps I need to to take to be able to figure this out?
Anyone else have similar problems
My code is shown below:
from tkFileDialog import askopenfilename
from StringIO import StringIO
import numpy as np
from mayavi import mlab
#getting the data from a txt file
filename = askopenfilename()
type(filename)
fileAsStr =''
data = []
count= 0
atData=False
with open(filename,'r') as f:
for line in f:
if line.startswith("Note"):
title = line
title = title.strip("Note:")
title = title.strip()
print title
if atData and not line.startswith('Total'): #after the second one begin reading file
line = line.replace(' ','')
#data.append(line)
fileAsStr = fileAsStr + line
if line.startswith('-----'):
count = count +1
if count == 2:
atData = True
dataStrIO = StringIO(fileAsStr)
dataArray = np.loadtxt(dataStrIO, delimiter=',')
dataDim = dataArray.shape
dx = dataArray[0:dataDim[0], 3]
dy = dataArray[0:dataDim[0], 4]
dz = dataArray[0:dataDim[0], 5]
bTotal = dataArray[0:dataDim[0],9]
firstNum = dy[0]
count = 0
while firstNum == dy[count]:
count = count + 1
print 'count=' + str(count)
#arranging the arrays into an acceptable format
from itertools import islice
def solve(lis, n):
it = iter(lis)
return [list(islice(it,n)) for _ in xrange(len(lis)/n)]
dx = np.transpose(solve(dx, count))
dy = np.transpose(solve(dy, count))
dz = solve(dz, count)
bTotal = solve(bTotal, count)
bTotal = np.log10(bTotal)
#making the plot
mlab.options.backend = 'envisage'
surf = mlab.surf(dx,dy, bTotal,warp_scale=2)
mlab.axes(surf, x_axis_visibility= True, y_axis_visibility = True,
z_axis_visibility = True, xlabel='x axis (mm)', ylabel='y axis (mm)',
zlabel ='z axis (mm)', nb_labels=10)
mlab.axes.label_text_property.font_size = 5
mlab.title(title, line_width = .5, height = 1)
mlab.colorbar( title = "magnetic field (Tesla)")
I believe you are missing an mlab.show() command at the end of your script.

Categories