I am plotting EEG-data using MNE and I want to exit a plot and continue the loop every time a button is pressed. Below is some example code of my program.
os.chdir('M:\\')
#Create a dictionary containing all files by participant, including cleaned files
root_folder = 'M:\\'
for folder in os.listdir(root_folder):
for filename in os.listdir(folder):
# Create the file path by joining the directory path and the filename
file_path = os.path.join(folder, filename)
# Load the pickled file
with open(file_path,'rb') as g:
raw = pickle.load(g)
#Plot EEG data and select bads
raw.plot(duration=10, start=0, n_channels= 64, scalings = 'auto', block=False)
#Interpolate bad channels
raw_interp = raw.interpolate_bads(reset_bads=False)
if plt.waitforbuttonpress(100000) == 'q':
plt.close()
#Pickle data file
nufilename = filename.split("_HighPassed",maxsplit=1)[0]
nufilename = nufilename + "_Interp_Chans.pkl"
Dir1 = 'M:\\'
savepath = os.path.join(Dir1,folder)
FullSavePath = os.path.join(savepath,nufilename)
with open(FullSavePath, "wb") as f:
pickle.dump(raw_interp, f)
f.close()
I want to employ the button press function after the raw has been plotted and continue the loop of plotting eeg-data. I have looked at the plt waitforbuttonpress function, but haven't gotten it to work yet. Any suggestions?
Related
I am executing a loop and save data in a csv file. to store the data i am creating a path and directory and append the csv file. as it is the code executes fine and generates one csv file upon completion. i would like the csv file to increment each time i run the code so i dont have to delete or overwrite the previous one such a way that i get file_0 for run1, file_1 for run 2 and so on. i inserted `# fileName = "{}/{}_{}.csv".format(file_path[0], self.file_name, file_path[1])'' but this saves me each point in a separate file. any suggestions welcome. thanks.
import csv
from fileinput import filename
from locale import currency
import time
from numpy import append, outer
import time
from datetime import datetime
import os
from random import random
#create folder timestamped
class CreateFile():
def __init__(self, procedure):
self.procedure = procedure # Get the procedure name.
self.createfile() # Call create file function.
def createfile(self):
date = datetime.now().date()
PARENT_DIR = "C:\test/".format(date) # Get the path.
DIR = '{}'.format(self.procedure) # Get procedure name.
self.PATH = os.path.join(PARENT_DIR, DIR) # Form a full path.
try:
if not os.path.exists(self.PATH): # If the given path does not exists.
os.makedirs(self.PATH) # Make a directory.
except OSError: # OSError occur, don't make the directory.
print ("Creation of the directory [%s] failed." % DIR) # Print message.
else: # Successfully created the director, print the message.
print ("Successfully created the directory %s " % DIR)
def get_file(self):
file_list = os.listdir(self.PATH) # Load path into list directory.
file_count = len(file_list) # Chech the number of file(s) under the given path.
return [self.PATH, file_count] # Return full path and file count under this folder.
# initialization and setpoints list
startpoint = 300
setpoint = 310
step = 10
temp_list = []
for x in range(startpoint, setpoint+1, step):
temp_list.append(x)
print(temp_list)
class save_data1():
def __init__(self, file_name):
self.file_name = file_name
file_path_count = CreateFile(file_name).get_file()
self.fileName = "{}/{}.csv".format(file_path_count[0], file_name)
def record_csv(self, fileName, now, ep1):
with open(fileName, 'a', newline='') as csvfile:
header = ["Timestamp",'Temp', "ep1"]
writer = csv.DictWriter(csvfile, fieldnames=header)
if csvfile.tell() == 0:
writer.writeheader()
writer.writerow(
{
"Timestamp": now,
'Temp': temp,
"ep1": ep1
}
)
csvfile.close()
def test(self):
file_path = CreateFile(self.file_name).get_file()
# fileName = "{}/{}_{}.csv".format(file_path[0], self.file_name, file_path[1])
fileName = "{}/{}.csv".format(file_path[0], self.file_name)
now = datetime.now()
ep1 = random() #here just random number instead of instrument
self.record_csv(fileName, now, ep1)
# Set setpoint in Temp list
for temp in temp_list:
# print a header
print('')
hdr = '______ T ______ __________ H __________\t______ Ext Param ______'
print(hdr)
time.sleep(0.5)
print('setpoint:', temp)
if temp == 300:
save_data1('meas1').test()
else:
print('Waiting ')
save_data1('meas1').test()
I have written code where it is reading excel file and then after processing required function I want to write it to Excel file . Now I have done this for one excel file . and now my question is when I want to do it for multiple excel file that is reading multiple excel file and then output should be also in multiple excel file how will I apply for loop here so I get separate output excel file for each input file
Following is my code
from ParallelP import *
import time,json
import pandas as pd
if __name__ == '__main__':
__ip__ = "ip/"
__op__ = "op/"
__excel_file_name__ = __ip__ + '80chars.xlsx'
__prediction_op__ = __op__ + basename(__excel_file_name__) + "_processed.xlsx"
df = pd.read_excel(__excel_file_name__)
start_time = time.time()
df_preprocessed = run(df)
print("Time Needed to execute all data is {0} seconds".format((time.time() - start_time)))
print("Done...")
df_preprocessed.to_excel(__prediction_op__)
I tried to stick to your example and just expand it as I would do it. The below example is untested and does not mean that it is the best way to do it!
from ParallelP import *
import time,json
import pandas as pd
import os
from pathlib import Path # Handles directory paths -> less error prone than manually sticking together paths
if __name__ == '__main__':
__ip__ = "ip/"
__op__ = "op/"
# Get a list of all excel files in a given directory
excel_file_list = [f for f in os.listdir(__ip__) if f.endswith('.xlsx')]
# Loop over the list and process each excel file seperately
for excel_file in excel_file_list:
excel_file_path = Path(__ip__, excel_file) # Create the file path
df = pd.read_excel(str(excel_file)) # Read the excel file to data frame
start_time = time.time()
df_preprocessed = run(df) # Run your routine
print("Time Needed to execute all data is {0} seconds".format((time.time() - start_time)))
print("Done...")
# Create the output file name
prediction_output_file_name = '{}__processed.xlsx'.format(str(excel_file_path.resolve().stem))
# Create the output file path
prediction_output_file_path = str(Path(__op__, prediction_output_file_name))
# Write the output to the excel file
df_preprocessed.to_excel(prediction_output_file_path)
Sidenote: I have to mention that your variable names feel like a misuse of the __ . These 'dunder' functions are special and indicate that they have a meaning for python (see for example here). Please, just name your variables input_dir and output_dir instead of __ip__ and __op__, respectively.
I do have some code i wrote. Maybe you can alter this for your requirements.
# This is where your input file should be
in_folder = 'input/xls/file/folder'
# This will be your output folder
out_folder = 'output/xls/file/folder'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
file_exist = False
dir_list = os.listdir(in_folder)
for xlfile in dir_list:
if xlfile.endswith('.xlsx') or xlfile.endswith('.xls'):
file_exist = True
str_file = os.path.join(in_folder, xlfile)
#work_book = load_workbook(filename=str_file)
#work_sheet = work_book['qa']
#Do ur work hear with excel
#out_Path = os.path.join(out_folder,)
#and output it to the out_Path
if not file_exist:
print('cannot find any valid excel file in the folder ' + in_folder)
I am trying to create multiple feature classes from data with .txt extension. My code runs, but only produces one .shp file. The variable xyTable when checked does contain all the file extensions. These then should individually run through both Arcpy functions and produce the relevant featureclass files named in accordance with their .txt files.
import arcpy
import os
import tempfile
import shutil
shpFileArray = []
print "\n"
arcpy.env.overwriteOutput = True
newFolder = "destinationpath"
if os.path.exists(newFolder):
tmp = tempfile.mktemp(dir=os.path.dirname(newFolder))
shutil.move(newFolder, tmp)
shutil.rmtree(tmp)
os.makedirs(newFolder)
arcpy.env.workspace = newFolder
for file in os.listdir("sourcepath"):
layerName = file[:-4]
fileSHP = layerName+".shp"
for file in os.listdir("sourcepath"):
if file.endswith(".txt"):
xyTable = (os.path.join("destinationpath", file))
arcpy.MakeXYEventLayer_management(table= xyTable, in_x_field="EastingM", in_y_field="NorthingM", out_layer="layerName",...continues...
arcpy.FeatureClassToFeatureClass_conversion(in_features="layerName", out_path="destinationpath", out_name= fileSHP,....continues....
Looks like you are not giving the FeatureClassToFeatureClass tool unique shapefile names. After the first For loop finishes, fileSHP doesn't change. Looks like you have the shpFileArray set up to hold the list of fileSHPs. Perhaps try something like this to save your set of fileSHPs in the first For loop and refer to them in the second For loop. My python might not be exactly right, but I think the idea is.
import arcpy
import os
import tempfile
import shutil
shpFileArray = []
print "\n"
arcpy.env.overwriteOutput = True
newFolder = "destinationpath"
if os.path.exists(newFolder):
tmp = tempfile.mktemp(dir=os.path.dirname(newFolder))
shutil.move(newFolder, tmp)
shutil.rmtree(tmp)
os.makedirs(newFolder)
arcpy.env.workspace = newFolder
for file in os.listdir("sourcepath"):
layerName = file[:-4]
fileSHP = layerName+".shp"
shpFileArray.append(fileSHP)
for idx, file in enumerate(os.listdir("sourcepath")):
if file.endswith(".txt"):
xyTable = (os.path.join("destinationpath", file))
outShape = shapeFileArray[idx]
arcpy.MakeXYEventLayer_management(table= xyTable, in_x_field="EastingM", in_y_field="NorthingM", out_layer="layerName",...continues...
arcpy.FeatureClassToFeatureClass_conversion(in_features="layerName", out_path="destinationpath", out_name= outShape,....continues....
Lets say I have n files in a directory with filenames: file_1.txt, file_2.txt, file_3.txt .....file_n.txt. I would like to import them into Python individually and then do some computation on them, and then store the results into n corresponding output files: file_1_o.txt, file_2_o.txt, ....file_n_o.txt.
I've figured out how to import multiple files:
import glob
import numpy as np
path = r'home\...\CurrentDirectory'
allFiles = glob.glob(path + '/*.txt')
for file in allFiles:
# do something to file
...
...
np.savetxt(file, ) ???
Not quite sure how to append the _o.txt (or any string for that matter) after the filename so that the output file is file_1_o.txt
Can you use the following snippet to build the output filename?
parts = in_filename.split(".")
out_filename = parts[0] + "_o." + parts[1]
where I assumed in_filename is of the form "file_1.txt".
Of course would probably be better to put "_o." (the suffix before the extension) in a variable so that you can change at will just in one place and have the possibility to change that suffix more easily.
In your case it means
import glob
import numpy as np
path = r'home\...\CurrentDirectory'
allFiles = glob.glob(path + '/*.txt')
for file in allFiles:
# do something to file
...
parts = file.split(".")
out_filename = parts[0] + "_o." + parts[1]
np.savetxt(out_filename, ) ???
but you need to be careful, since maybe before you pass out_filename to np.savetxt you need to build the full path so you might need to have something like
np.savetxt(os.path.join(path, out_filename), )
or something along those lines.
If you would like to combine the change in basically one line and define your "suffix in a variable" as I mentioned before you could have something like
hh = "_o." # variable suffix
..........
# inside your loop now
for file in allFiles:
out_filename = hh.join(file.split("."))
which uses another way of doing the same thing by using join on the splitted list, as mentioned by #NathanAck in his answer.
import os
#put the path to the files here
filePath = "C:/stack/codes/"
theFiles = os.listdir(filePath)
for file in theFiles:
#add path name before the file
file = filePath + str(file)
fileToRead = open(file, 'r')
fileData = fileToRead.read()
#DO WORK ON SPECIFIC FILE HERE
#access the file through the fileData variable
fileData = fileData + "\nAdd text or do some other operations"
#change the file name to add _o
fileVar = file.split(".")
newFileName = "_o.".join(fileVar)
#write the file with _o added from the modified data in fileVar
fileToWrite = open(newFileName, 'w')
fileToWrite.write(fileData)
#close open files
fileToWrite.close()
fileToRead.close()
I am trying to add multiple files to an MXD file using some of the code found here:
How do I add a shapefile in ArcGIS via python scripting?
The code below does not return any errors, however none of the shapefiles seem to get added to the blank mxd document.
Any help as to why this is not working would be appreciated.
import arcpy
import arcpy.mapping
from shutil import copyfile
from os import listdir
from os.path import isfile, join
def AddAllShapeFilesToNewMXD(source_directory):
# Source file is the template that the will be copied to the directory with
# All the shape files in it.
source_file = 'M:\Ops Field Map\Blank Map.mxd'
# Output file is the name of the file that will have the shape files added to it
output_file = 'GPS_Map'
rev_count = 0
while isfile(join(source_directory, output_file + '.mxd')):
#Make sure a unique file is created
print ('File ' + output_file + '.mxd exists.'),
rev_count += 1
output_file = output_file + '_rev' + str(rev_count)
print ('Trying ' + output_file + '.mxd ...')
# Create the destination file. This is the file the shape files are added to
destination_file = join(source_directory, output_file + '.mxd')
copyfile(source_file, destination_file)
print 'MXD file created: ' + destination_file
# Get the map doccument
mxd = arcpy.mapping.MapDocument(destination_file)
# Get the data frame
data_frame = arcpy.mapping.ListDataFrames(mxd, "*")[0]
# Get a list of all the shape files
shp_files = [ f for f in listdir(source_directory) if isfile(join(source_directory, f)) and f.endswith('.shp') ]
# Add all the shapefiles to the mxd file
for s in shp_files:
new_layer_full_path = join(source_directory, s)
new_layer = arcpy.mapping.Layer(new_layer_full_path)
arcpy.mapping.AddLayer(data_frame, new_layer, "BOTTOM")
print 'Layer added ' + new_layer_full_path
del new_layer
return True
directory = 'C:\Users\gps\Desktop\dd test'
AddAllShapeFilesToNewMXD(directory)
It is hard to know without files to play with, but one reason the code above may not be giving an error but not displaying anything is that for many arcgis map display operations, you have to make sure that the arcgis geoprocessing option of 'add results of geoprocessing operations to the display' under geoprocessing> geoprocessing options is turned on.
It could be that you are missing those two important lines:
arcpy.RefreshActiveView()
arcpy.RefreshTOC()
Looks like you're almost there and that both Lucas and BelowZero are offering good suggestions if your code is running w/in a active session. If it's creating an *.mxd for later use, I don't see where the results are saved. Here's some simpler sample code, note the last line:
mxd = arcpy.mapping.MapDocument(srcdir+'/data_bin/Untitled.mxd')
data_frame = arcpy.mapping.ListDataFrames(mxd)[0]
mxd.activeView = data_frame.name
flowlinesLyr=arcpy.mapping.Layer('..\\NHDPlus\\nhdflowline_en')
flowlinesLyr.name='NHDPlus Flowlines'
arcpy.mapping.AddLayer (data_frame, flowlinesLyr,'TOP')
gagesEventLyr=arcpy.mapping.Layer('..\\NHDPlus\\StreamGageEvent')
gagesEventLyr.name='Original stream gage locations'
arcpy.mapping.AddLayer (data_frame, gagesEventLyr,'TOP')
mxd.saveACopy(datadir+'\NHDPlus'+Region+'_Gage_QAQC.mxd')