Looping back to the next column in a csv - python

I'm trying to get a script to run on each individual column of a csv file. I've figured out how to tell python which column I would like to run the script on but I want it to analyze column one, output the results, the move to column two and continue on and on through the file. What I want is a "if etc goto etc" command. I've found how to do this with simple oneliners but I have a larger script. Any help would be great as I'm sure I'm just missing something. Like if I could loop back to where I define my data (h=data) but tell it to choose the next column. Here is my script.
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import pylab
from scipy import linalg
import sys
import scipy.interpolate as interpolate
import scipy.optimize as optimize
a=raw_input("Data file name? ") #Name of the data file including the directory, must be .csv
datafile = open(a, 'r')
data = []
for row in datafile:
data.append(row.strip().split(',')) #opening and organizing the csv file
print('Data points= ', len(data))
print data
c=raw_input("Is there a header row? y/n?") #Remove header line if present
if c is ('y'):
del data[0]
data2=data
print('Raw data= ', data2)
else:
print('Raw data= ', data)
'''
#if I wanted to select a column
b=input("What column to analyze?") #Asks what column depth data is in
if b is 1:
h=[[rowa[i] for rowa in data] for i in range(1)] #first row
'''
h=data # all columns
g=reduce(lambda x,y: x+y,h) #prepares data for calculations
a=map(float, g)
a.sort()
print ('Organized data= ',a)
def GRLC(values):
'''
Calculate Gini index, Gini coefficient, Robin Hood index, and points of
Lorenz curve based on the instructions given in
www.peterrosenmai.com/lorenz-curve-graphing-tool-and-gini-coefficient-calculator
Lorenz curve values as given as lists of x & y points [[x1, x2], [y1, y2]]
#param values: List of values
#return: [Gini index, Gini coefficient, Robin Hood index, [Lorenz curve]]
'''
n = len(values)
assert(n > 0), 'Empty list of values'
sortedValues = sorted(values) #Sort smallest to largest
#Find cumulative totals
cumm = [0]
for i in range(n):
cumm.append(sum(sortedValues[0:(i + 1)]))
#Calculate Lorenz points
LorenzPoints = [[], []]
sumYs = 0 #Some of all y values
robinHoodIdx = -1 #Robin Hood index max(x_i, y_i)
for i in range(1, n + 2):
x = 100.0 * (i - 1)/n
y = 100.0 * (cumm[i - 1]/float(cumm[n]))
LorenzPoints[0].append(x)
LorenzPoints[1].append(y)
sumYs += y
maxX_Y = x - y
if maxX_Y > robinHoodIdx: robinHoodIdx = maxX_Y
giniIdx = 100 + (100 - 2 * sumYs)/n #Gini index
return [giniIdx, giniIdx/100, robinHoodIdx, LorenzPoints]
result = GRLC(a)
print 'Gini Index', result[0]
print 'Gini Coefficient', result[1]
print 'Robin Hood Index', result[2]

I'm ignoring all of that GRLC function and just solving the looping question. Give this a try. It uses while True: to loop forever (you can just break out by ending the program; Ctrl+C in Windows, depends on OS). Just load the data from the csv once then each time it loops, you can re-build some variables. If you have questions please ask. Also, I didn't test it as I don't have all the NumPy packages installed :)
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import pylab
from scipy import linalg
import sys
import scipy.interpolate as interpolate
import scipy.optimize as optimize
def GRLC(values):
'''
Calculate Gini index, Gini coefficient, Robin Hood index, and points of
Lorenz curve based on the instructions given in
www.peterrosenmai.com/lorenz-curve-graphing-tool-and-gini-coefficient-calculator
Lorenz curve values as given as lists of x & y points [[x1, x2], [y1, y2]]
#param values: List of values
#return: [Gini index, Gini coefficient, Robin Hood index, [Lorenz curve]]
'''
n = len(values)
assert(n > 0), 'Empty list of values'
sortedValues = sorted(values) #Sort smallest to largest
#Find cumulative totals
cumm = [0]
for i in range(n):
cumm.append(sum(sortedValues[0:(i + 1)]))
#Calculate Lorenz points
LorenzPoints = [[], []]
sumYs = 0 #Some of all y values
robinHoodIdx = -1 #Robin Hood index max(x_i, y_i)
for i in range(1, n + 2):
x = 100.0 * (i - 1)/n
y = 100.0 * (cumm[i - 1]/float(cumm[n]))
LorenzPoints[0].append(x)
LorenzPoints[1].append(y)
sumYs += y
maxX_Y = x - y
if maxX_Y > robinHoodIdx: robinHoodIdx = maxX_Y
giniIdx = 100 + (100 - 2 * sumYs)/n #Gini index
return [giniIdx, giniIdx/100, robinHoodIdx, LorenzPoints]
#Name of the data file including the directory, must be .csv
a=raw_input("Data file name? ")
datafile = open(a.strip(), 'r')
data = []
#opening and organizing the csv file
for row in datafile:
data.append(row.strip().split(','))
#Remove header line if present
c=raw_input("Is there a header row? y/n?")
if c.strip().lower() == ('y'):
del data[0]
while True :
#if I want the first column, that's index 0.
b=raw_input("What column to analyze?")
# Validate that the column input data is correct here. Otherwise it might be out of range, etc.
# Maybe try this. You might want more smarts in there, depending on your intent:
b = int(b.strip())
# If you expect the user to inpt "2" to mean the second column, you're going to use index 1 (list indexes are 0 based)
h=[[rowa[b-1] for rowa in data] for i in range(1)]
# prepares data for calculations
g=reduce(lambda x,y: x+y,h)
a=map(float, g)
a.sort()
print ('Organized data= ',a)
result = GRLC(a)
print 'Gini Index', result[0]
print 'Gini Coefficient', result[1]
print 'Robin Hood Index', result[2]

Related

IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed. Works for first two loops

Let me start by saying that I know this error message has posts about it, but I'm not sure what's wrong with my code. The block of code works just fine for the first two loops, but then fails. I've even tried removing the first two loops from the data to rule out issues in the 3rd loop, but no luck. I did have it set to print out the unsorted temporary list, and it just prints an empty array for the 3rd loop.
Sorry for the wall of comments in my code, but I'd rather have each line commented than cause confusion over what I'm trying to accomplish.
TL;DR: I'm trying to find and remove outliers from a list of data, but only for groups of entries that have the same number in column 0.
Pastebin with data
import numpy as np, csv, multiprocessing as mp, mysql.connector as msc, pandas as pd
import datetime
#Declare unsorted data array
d_us = []
#Declare temporary array for use in loop
tmp = []
#Declare sorted data array
d = []
#Declare Sum variable
tot = 0
#Declare Mean variable
m = 0
#declare sorted final array
sort = []
#Declare number of STDs
t = 1
#Declare Standard Deviation variable
std = 0
#Declare z-score variable
z_score
#Timestamp for output files
nts = datetime.datetime.now().timestamp()
#Create output file
with open(f"calib_temp-{nts}.csv", 'w') as ctw:
pass
#Read data from CSV
with open("test.csv", 'r', newline='') as drh:
fr_rh = csv.reader(drh, delimiter=',')
for row in fr_rh:
#append data to unsorted array
d_us.append([float(row[0]),float(row[1])])
#Sort array by first column
d = np.sort(d_us)
#Calculate the range of the data
l = round((d[-1][0] - d[0][0]) * 10)
#Declare the starting value
s = d[0][0]
#Declare the ending value
e = d[-1][0]
#Set the while loop counter
n = d[0][0]
#Iterate through data
while n <= e:
#Create array with difference column
for row in d:
if row[0] == n:
diff = round(row[0] - row[1], 1)
tmp.append([row[0],row[1],diff])
#Convert to numpy array
tmp = np.array(tmp)
#Sort numpy array
sort = tmp[np.argsort(tmp[:,2])]
#Calculate sum of differences
for row in tmp:
tot = tot + row[2]
#Calculate mean
m = np.mean(tot)
#Calculate Standard Deviation
std = np.std(tmp[:,2])
#Calculate outliers and write to output file
for y in tmp:
z_score = (y[2] - m)/std
if np.abs(z_score) > t:
with open(f"calib_temp-{nts}.csv", 'a', newline='') as ct:
c = csv.writer(ct, delimiter = ',')
c.writerow([y[0],y[1]])
#Reset Variables
tot = 0
m = 0
n = n + 0.1
tmp = []
std = 0
z_score = 0
Do this before the loop:
#Create output file
ct = open(f"calib_temp-{nts}.csv", 'w')
c = csv.writer(ct, delimiter = ',')
Then change the loop to this. Note that I have moved your initializations to the top of the loop, so you don't need to initialize them twice. Note the if tmp: line, which solves the numpy exception.
#Iterate through data
while n <= e:
tot = 0
m = 0
tmp = []
std = 0
z_score = 0
#Create array with difference column
for row in d:
if row[0] == n:
diff = round(row[0] - row[1], 1)
tmp.append([row[0],row[1],diff])
#Sort numpy array
if tmp:
#Convert to numpy array
tmp = np.array(tmp)
sort = tmp[np.argsort(tmp[:,2])]
#Calculate sum of differences
for row in tmp:
tot = tot + row[2]
#Calculate mean
m = np.mean(tot)
#Calculate Standard Deviation
std = np.std(tmp[:,2])
#Calculate outliers and write to output file
for y in tmp:
z_score = (y[2] - m)/std
if np.abs(z_score) > t:
c.writerow([y[0],y[1]])
#Reset Variables
n = n + 0.1

Pandas: If statments with multiple criteria

I am trying to figure out a way to write an if statement based on a couple criteria. I have a large CSV file that I have cleaned and already organized. There are a couple things I need to do:
I first need a way that will check to see if the machine is "on" for more than 3 rows. If that is true then I need to get its corredsponding pressure for that cycle and find the average of it. For example, in the df aboce, in rows 14-19 the machine is on for more than 3 rows so then I need to get the average pressure across all instances in this period.
This data contains 40,000 rows, so I will then need this to cycle through the entire sheet to keep track of the total number of times the machine is on, and each corresponding average pressure.
import pandas as pd
import numpy as np
header_list = ['Time']
df = pd.read_csv('S8-1.csv' , skiprows=6 , names = header_list)
#splits the data into proper columns
df[['Date/Time','Pressure']] = df.Time.str.split(",,", expand=True)
#deletes orginal messy column
df.pop('Time')
#convert Pressure from object to numeric
df['Pressure'] = pd.to_numeric(df['Pressure'], errors = 'coerce')
#converts to a time
df['Date/Time'] = pd.to_datetime(df['Date/Time'], format = '%m/%d/%y %H:%M:%S.%f' , errors = 'coerce')
df['Moving Average'] = df['Pressure'].rolling(window=5).mean()
df['Rolling Average Center']= df['Pressure'].rolling(window=5, center=True).mean()
df['Machine On/Off'] = ['1' if x >= 115 else '0' for x in df['Rolling Average Center'] ]
arr = df['Machine On/Off']
def find_runs(x):
x = np.asanyarray(x)
if x.ndim !=1:
raise Valueerror('Only 1D array supported')
n = x.shape[0]
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
run = find_runs(arr)
df.iloc[_start:run_length]['whatever column']
Suggested first step: make new column with ones and zeros... 1 for on, 0 for off.
df['newcolumnname'] = 0
df['newcolumnname'][df['machine on/off'] == 'on'] = 1
Grab that column as a numpy array:
arr = df['newcolumnname'].to_numpy()
Then using the following code credit: https://gist.github.com/alimanfoo/c5977e87111abe8127453b21204c1065
import numpy as np
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError('only 1D array supported')
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
get the "runs"...should only have zeros and ones.
run_values, run_starts, run_lengths = find_runs(arr)
print(run_values) #just to see what order they are in
print(run_starts.shape)#get an idea of the shapes
print(run_lengths.shape)
_ix = run_values.tolist().index(1)
To get data from your pandas,
for _start, _run_length in zip(run_starts[_ix], run_lengths[_ix]):
tmp_df = df.iloc[_start:_start+_run_length]
#do what you want
print(tmp_df)
break #remove this...I just don't know how big your data is
You're correct when you are thinking "this answer does not run". But it should give you enough to get going and solve your problem.
Incomplete question gets an incomplete response.
TODO: make your machine on/off to be integer...not sure if it is absolutely needed, but if it's a numerical numpy array then there are more options available to you.
df['Machine On/Off'] = [1 if x >= 115 else 0 for x in df['Rolling Average Center'] ]

Grouping pairs of combination data based on given condition

Suppose I have a huge array of data and sample of them are :
x= [ 511.31, 512.24, 571.77, 588.35, 657.08, 665.49, -1043.45, -1036.56,-969.39, -955.33]
I used the following code to generate all possible pairs
Pairs=[(x[i],x[j]) for i in range(len(x)) for j in range(i+1, len(x))]
Which gave me all possible pairs. Now, I would like to group these pairs if they are within threshold values of -25 or +25 and label them accordingly.
Any idea or advice on how to do this? Thanks in advance
If I understood correctly your problem, the code below should do the trick. The idea is to generate a dictionary whose keys are the mean value, and just keep appending data onto it:
import numpy as np #I use numpy for the mean.
#Your threshold
threshold = 25
#A dictionary will hold the relevant pairs
mylist = {}
for i in Pairs:
#Check for the threshold and discard otherwise
diff = abs(i[1]-i[0])
if(diff < threshold):
#Name of the entry in the dictionary
entry = str('%d'%int(np.mean(i)))
#If the entry already exists, append. Otherwise, create a container list
if(entry in mylist):
mylist[entry].append(i)
else:
mylist[entry] = [i]
which results in the following output:
{'-1040': [(-1043.45, -1036.56)],
'-962': [(-969.39, -955.33)],
'511': [(511.1, 511.31),
(511.1, 512.24),
(511.1, 512.35),
(511.31, 512.24),
(511.31, 512.35)],
'512': [(511.1, 513.35),
(511.31, 513.35),
(512.24, 512.35),
(512.24, 513.35),
(512.35, 513.35)],
'580': [(571.77, 588.35)],
'661': [(657.08, 665.49)]}
This should be a fast way to do that:
import numpy as np
from scipy.spatial.distance import pdist
# Input data
x = np.array([511.31, 512.24, 571.77, 588.35, 657.08,
665.49, -1043.45, -1036.56,-969.39, -955.33])
thres = 25.0
# Compute pairwise distances
# default distance metric is'euclidean' which
# would be equivalent but more expensive to compute
d = pdist(x[:, np.newaxis], 'cityblock')
# Find distances within threshold
d_idx = np.where(d <= thres)[0]
# Convert "condensed" distance indices to pair of indices
r = np.arange(len(x))
c = np.zeros_like(r, dtype=np.int32)
np.cumsum(r[:0:-1], out=c[1:])
i = np.searchsorted(c[1:], d_idx, side='right')
j = d_idx - c[i] + r[i] + 1
# Get pairs of values
v_i = x[i]
v_j = x[j]
# Find means
m = np.round((v_i + v_j) / 2).astype(np.int32)
# Print result
for idx in range(len(m)):
print(f'{m[idx]}: ({v_i[idx]}, {v_j[idx]})')
Output
512: (511.31, 512.24)
580: (571.77, 588.35)
661: (657.08, 665.49)
-1040: (-1043.45, -1036.56)
-962: (-969.39, -955.33)

Cannot combine lists from output

I have the following program, it seems that the amp and period at the end print out a list of list(see below). And I am unable to plot them (I want to plot period against amp)
I have tried methods in How to make a flat list out of list of lists? to combine the output of amp and period so that they are plot-table, but nothing worked.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
def derivatives(t,y,q,F):
return [y[1], -np.sin(y[0])-q*y[1]+F*np.sin((2/3)*t)]
t = np.linspace(0.0, 100, 10000)
#initial conditions
theta0 = np.linspace(0.0,np.pi,100)
q = 0.0 #alpha / (mass*g), resistive term
F = 0.0 #G*np.sin(2*t/3)
for i in range (0,100):
sol = solve_ivp(derivatives, (0.0,100.0), (theta0[i], 0.0), method = 'RK45', t_eval = t,args = (q,F))
velocity = sol.y[1]
time = sol.t
zero_cross = 0
value = []
amp = []
period = []
for k in range (len(velocity)-1):
if (velocity[k+1]*velocity[k]) < 0:
zero_cross += 1
value.append(k)
else:
zero_cross += 0
zero_cross = zero_cross - zero_cross % 2 # makes the total number of zero-crossings even
if zero_cross != 0:
amp.append(theta0[i])
# period calculated using the time evolved between the first and last zero-crossing detected
period.append((2*(time[value[zero_cross - 1]] - time[value[0]])) / (zero_cross -1))
If I print out amp inside the loop, it displays as follows:
[0.03173325912716963]
[0.06346651825433926]
[0.0951997773815089]
[0.12693303650867852]
[0.15866629563584814]
[0.1903995547630178]
[0.2221328138901874]
[0.25386607301735703]
[0.28559933214452665]
[0.3173325912716963]
[0.3490658503988659]
[0.3807991095260356]
[0.4125323686532052]
[0.4442656277803748]
[0.47599888690754444]
[0.5077321460347141]
[0.5394654051618837]
[0.5711986642890533]
[0.6029319234162229]
[0.6346651825433925]
[0.6663984416705622]
[0.6981317007977318]
[0.7298649599249014]
[0.7615982190520711]
[0.7933314781792408]
[0.8250647373064104]
[0.85679799643358]
[0.8885312555607496]
[0.9202645146879193]
[0.9519977738150889]
[0.9837310329422585]
[1.0154642920694281]
[1.0471975511965979]
[1.0789308103237674]
[1.110664069450937]
[1.1423973285781066]
[1.1741305877052763]
[1.2058638468324459]
[1.2375971059596156]
[1.269330365086785]
[1.3010636242139548]
[1.3327968833411243]
[1.364530142468294]
[1.3962634015954636]
[1.4279966607226333]
[1.4597299198498028]
[1.4914631789769726]
[1.5231964381041423]
[1.5549296972313118]
[1.5866629563584815]
[1.618396215485651]
[1.6501294746128208]
[1.6818627337399903]
[1.71359599286716]
[1.7453292519943295]
[1.7770625111214993]
[1.8087957702486688]
[1.8405290293758385]
[1.872262288503008]
[1.9039955476301778]
[1.9357288067573473]
[1.967462065884517]
[1.9991953250116865]
[2.0309285841388562]
[2.0626618432660258]
[2.0943951023931957]
[2.126128361520365]
[2.1578616206475347]
[2.1895948797747042]
[2.221328138901874]
[2.2530613980290437]
[2.284794657156213]
[2.3165279162833827]
[2.3482611754105527]
[2.379994434537722]
[2.4117276936648917]
[2.443460952792061]
[2.475194211919231]
[2.5069274710464007]
[2.53866073017357]
[2.57039398930074]
[2.6021272484279097]
[2.633860507555079]
[2.6655937666822487]
[2.6973270258094186]
[2.729060284936588]
[2.7607935440637577]
[2.792526803190927]
[2.824260062318097]
[2.8559933214452666]
[2.887726580572436]
[2.9194598396996057]
[2.9511930988267756]
[2.982926357953945]
[3.0146596170811146]
[3.141592653589793]
[Finished in 3.822s]
I am not sure what type of output that is and how to handle, any help would be appreciated!
You are declaring the lists inside the loop, which means they will be reset to empty at every iteration. Consider declaring amp, period, and any array that should be set to empty only once (as initial state) before the loop, like so:
#initialize arrays, executes only once before the loop
amp = []
period = []
for i in range (0,100):
#your logic here, plus appending values to `amp` and `period`
#now `amp` and `period` should contain all desired values

Python increase performance of random.sample

I am writing a function to select randomly elements stored in a dictionary:
import random
from liblas import file as lasfile
from collections import defaultdict
def point_random_selection(list,k):
try:
sample_point = random.sample(list,k)
except ValueError:
sample_point = list
return(sample_point)
def world2Pixel_Id(x,y,X_Min,Y_Max,xDist,yDist):
col = int((x - X_Min)/xDist)
row = int((Y_Max - y)/yDist)
return("{0}_{1}".format(col,row))
def point_GridGroups(inFile,X_Min,Y_Max,xDist,yDist):
Groups = defaultdict(list)
for p in lasfile.File(inFile,None,'r'):
id = world2Pixel_Id(p.x,p.y,X_Min,Y_Max,xDist,yDist)
Groups[id].append(p)
return(Groups)
where k is the number of element to select. Groups is the dictionary
file_out = lasfile.File("outPut",mode='w',header= h)
for m in Groups.iteritems():
# select k point for each dictionary key
point_selected = point_random_selection(m[1],k)
for l in xrange(len(point_selected)):
# save the data
file_out.write(point_selected[l])
file_out.close()
My problem is that this approach is extremely slow (for file of ~800 Mb around 4 days)
You could try and update your samples as you read the coordinates. This at least saves you from having to store everything in memory before running your sample. This is not guaranteed to make things faster.
The following is based off of BlkKnght's excellent answer to build a random sample from file input without retaining all the lines. This just expanded it to keep multiple samples instead.
import random
from liblas import file as lasfile
from collections import defaultdict
def world2Pixel_Id(x, y, X_Min, Y_Max, xDist, yDist):
col = int((x - X_Min) / xDist)
row = int((Y_Max - y) / yDist)
return (col, row)
def random_grouped_samples(infile, n, X_Min, Y_Max, xDist, yDist):
"""Select up to n points *per group* from infile"""
groupcounts = defaultdict(int)
samples = defaultdict(list)
for p in lasfile.File(inFile, None, 'r'):
id = world2Pixel_Id(p.x, p.y, X_Min, Y_Max, xDist, yDist)
i = groupcounts[id]
r = random.randint(0, i)
if r < n:
if i < n:
samples[id].insert(r, p) # add first n items in random order
else:
samples[id][r] = p # at a decreasing rate, replace random items
groupcounts[id] += 1
return samples
The above function takes inFile and your boundary coordinates, as well as the sample size n, and returns grouped samples that have at most n items in each group, picked uniformly.
Because all you use the id for is as a group key, I reduced it to only calculating the col, row tuple, there is no need to make it a string.
You can write these out to a file with:
file_out = lasfile.File("outPut",mode='w',header= h)
for group in samples.itervalues():
for p in group:
file_out.write(p)
file_out.close()

Categories