CSV File not showing up at file location - python

So I am writing a script that runs when a simulation running in AirSim runs. It is continuously collecting data (theoretically) and writing it to the csv file. However, when I go to the location that it should be saved, there is no file. Like even if there is an error in my method of saving the data, the file itself is not there.
import setup_path
import airsim
import numpy as np
import os
import os.path
import string
import csv
import tempfile
import pprint
import csv
import datetime
client = airsim.MultirotorClient()
client.confirmConnection()
'''
First create a directory for all the csv files to store into.
'''
dirmain = r"C:\AirSimData"
if not os.path.exists(dirmain):
os.mkdir(dirmain)
'''
Create format for file names
'''
run_date_and_time = datetime.datetime.now()
run_date_and_time_string = str(run_date_and_time)
extension = ".csv"
file_name_base = run_date_and_time_string + extension
imu = "imu"
gps = "gps"
magnetometer = "magnetometer"
barometer = "barometer"
gps_file_name = gps + file_name_base
'''Create csv files
'''
gps_file = open(r"gps_file_name",'w')
gps_header = ['lat','lon','alt']
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
while True:
#state = client.getMultirotorState()
#s = pprint.pformat(state)
#print("state: %s" % s)
#imu_data = client.getImuData()
#s = pprint.pformat(imu_data)
#print("imu_data: %s" % s)
#barometer_data = client.getBarometerData()
#s = pprint.pformat(barometer_data)
#print("barometer_data: %s" % s)
#magnetometer_data = client.getMagnetometerData()
#s = pprint.pformat(magnetometer_data)
#print("magnetometer_data: %s" % s)
gps_data = client.getGpsData().gnss.geo_point
alt = (gps_data.altitude)
lat = (gps_data.latitude)
lon = (gps_data.longitude)
gps_data_struct = [lat,lon,alt]
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_data_struct)
gpscsvfile.close()
#print("Altitude: %s\nLatitude %s\nLongitude %s" %(alt,lat,lon) )
if False:
break

Here you are creating a file name literally "gps_file_name"
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
You should instead use the variables with the name elements that you created. os.path.join() is a safe way to join filenames with path names.
gps_file_name = gps + file_name_base
output_file = os.path.join(dirmain, gps_file_name)
# Should read something like this "C:\AirSimData\gps2021-01-21 13:37:39.867152.csv"
Then you can use it here.
with open(output_file,'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
The next problem is that your datetime string contains invalid characters for filename colons (:) can not be used in file names. so you need to re-think that part.
One option could be to use no colons and have your time look like this.
run_date_and_time_string = run_date_and_time.strftime('%y-%m-%d_%H%M%S')
# 'C:\\AirSimData\\gps21-01-21_134531.csv'

Related

While obtaining hash files, some folders and files from the directory are not showing up

My code was working just fine before adding the hash function. I was getting the list of all folders and files in my directory in the Pretty Table. Once I added the hash function, I got maybe 5 of the files in that directory with hashes in the table. I am not sure where I have gone wrong. Please forgive me, I am new to this. We are not learning to code from scratch, but have to modify existing codes to function the way we need it to.
# Python Standard Libaries
import os #file system methode
import hashlib #hashing function
import sys #system methods
import time #time conversions
# Python 3rd Party Libraries
from prettytable import PrettyTable # pip install prettytable
# Local Functions
def GetFileMetaData(fileName):
#obtain file system metadata
try:
metaData = os.stat(fileName) # Use the stat method to obtain meta data
fileSize = metaData.st_size # Extract fileSize and MAC Times
timeLastAccess = metaData.st_atime
timeLastModified = metaData.st_mtime
timeCreated = metaData.st_ctime
macTimeList = [timeLastModified, timeCreated, timeLastAccess] # Group the MAC Times in a List
return True, None, fileSize, macTimeList
except Exception as err:
return False, str(err), None, None
# Psuedo Constants
# Start of the Script
tbl = PrettyTable(['FilePath','FileSize','UTC-Modified', 'UTC-Accessed', 'UTC-Created', 'SHA-256 HASH'])
#file check
while True:
targetFolder = input("Enter Target Folder: ")
if os.path.isdir(targetFolder):
break
else:
print("\nInvalid Folder ... Please Try Again")
print("Walking: ", targetFolder, "\n")
print()
for currentRoot, dirList, fileList in os.walk(targetFolder):
for nextFile in fileList:
fullPath = os.path.join(currentRoot, nextFile)
absPath = os.path.abspath(fullPath)
fileSize = os.path.getsize(absPath)
success, errInfo, fileSize, macList = GetFileMetaData(absPath)
if success:
#convert to readable Greenich Time
modTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[0]))
accTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[1]))
creTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[2]))
#hashing function
with open(absPath, 'rb') as target:
fileContents = target.read()
sha256Obj = hashlib.sha256()
sha256Obj.update(fileContents)
hexDigest = sha256Obj.hexdigest()
tbl.add_row( [ absPath, fileSize,modTime, accTime, creTime, hexDigest] )
tbl.align = "l" # align the columns left justified
# display the table
print (tbl.get_string(sortby="FileSize", reversesort=True))
print("\nScript-End\n")

list showing index out of range even though there are entries present in the file

I am trying to print 'value' but after a while it throws out of index error. I have checked the location and there are many more entries present there
import pandas as pd
import os
import time
import datetime as datetime
path = "G:\ML\Investing\intraQuarter"
def Key_Stats(gather="Total Debt/Equity (mrq)"):
statspath = path+'/_KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
#print('stock_list : ',stock_list[1:10])
for each_dir in stock_list[1:]:
each_file = os.listdir(each_dir)
ticker = each_dir.split("_KeyStats\\")[1]
if len(each_file) > 0:
#parsing time from the html file
for file in each_file:
date_stamp = time.strptime(file, '%Y%m%d%H%M%S.html')
unix_time = time.mktime(date_stamp)
#print(date_stamp, unix_time)
full_file_path = each_dir+'/'+file
source = open(full_file_path, 'r').read()
value = source.split(gather+':</td><td class="yfnc_tabledata1">')[1].split('</td>')[0]
print(ticker+":", value)
time.sleep(0.1)
Key_Stats()

python zip extract with timestamp under Windows [duplicate]

I'm trying to extract files from a zip file using Python 2.7.1 (on Windows, fyi) and each of my attempts shows extracted files with Modified Date = time of extraction (which is incorrect).
import os,zipfile
outDirectory = 'C:\\_TEMP\\'
inFile = 'test.zip'
fh = open(os.path.join(outDirectory,inFile),'rb')
z = zipfile.ZipFile(fh)
for name in z.namelist():
z.extract(name,outDirectory)
fh.close()
I also tried using the .extractall method, with the same results.
import os,zipfile
outDirectory = 'C:\\_TEMP\\'
inFile = 'test.zip'
zFile = zipfile.ZipFile(os.path.join(outDirectory,inFile))
zFile.extractall(outDirectory)
Can anyone tell me what I'm doing wrong?
I'd like to think this is possible without having to post-correct the modified time per How do I change the file creation date of a Windows file?.
Well, it does take a little post-processing, but it's not that bad:
import os
import zipfile
import time
outDirectory = 'C:\\TEMP\\'
inFile = 'test.zip'
fh = open(os.path.join(outDirectory,inFile),'rb')
z = zipfile.ZipFile(fh)
for f in z.infolist():
name, date_time = f.filename, f.date_time
name = os.path.join(outDirectory, name)
with open(name, 'wb') as outFile:
outFile.write(z.open(f).read())
date_time = time.mktime(date_time + (0, 0, -1))
os.utime(name, (date_time, date_time))
Okay, maybe it is that bad.
Based on Jia103's answer, I have developed a function (using Python 2.7.14) which preserves directory and file dates AFTER everything has been extracted. This isolates any ugliness in the function, and you can also use zipfile.Zipfile.extractAll() or whatever zip extract method you want:
import time
import zipfile
import os
# Restores the timestamps of zipfile contents.
def RestoreTimestampsOfZipContents(zipname, extract_dir):
for f in zipfile.ZipFile(zipname, 'r').infolist():
# path to this extracted f-item
fullpath = os.path.join(extract_dir, f.filename)
# still need to adjust the dt o/w item will have the current dt
date_time = time.mktime(f.date_time + (0, 0, -1))
# update dt
os.utime(fullpath, (date_time, date_time))
To preserve dates, just call this function after your extract is done.
Here's an example, from a script I wrote to zip/unzip game save directories:
z = zipfile.ZipFile(zipname, 'r')
print 'I have opened zipfile %s, ready to extract into %s' \
% (zipname, gamedir)
try: os.makedirs(gamedir)
except: pass # Most of the time dir already exists
z.extractall(gamedir)
RestoreTimestampsOfZipContents(zipname, gamedir) #<-- USED
print '%s zip extract done' % GameName[game]
Thanks everyone for your previous answers!
Based on Ethan Fuman's answer, I have developed this version (using Python 2.6.6) which is a little more consise:
zf = ZipFile('archive.zip', 'r')
for zi in zf.infolist():
zf.extract(zi)
date_time = time.mktime(zi.date_time + (0, 0, -1))
os.utime(zi.filename, (date_time, date_time))
zf.close()
This extracts to the current working directory and uses the ZipFile.extract() method to write the data instead of creating the file itself.
Based on Ber's answer, I have developed this version (using Python 2.7.11), which also accounts for directory mod dates.
from os import path, utime
from sys import exit
from time import mktime
from zipfile import ZipFile
def unzip(zipfile, outDirectory):
dirs = {}
with ZipFile(zipfile, 'r') as z:
for f in z.infolist():
name, date_time = f.filename, f.date_time
name = path.join(outDirectory, name)
z.extract(f, outDirectory)
# still need to adjust the dt o/w item will have the current dt
date_time = mktime(f.date_time + (0, 0, -1))
if (path.isdir(name)):
# changes to dir dt will have no effect right now since files are
# being created inside of it; hold the dt and apply it later
dirs[name] = date_time
else:
utime(name, (date_time, date_time))
# done creating files, now update dir dt
for name in dirs:
date_time = dirs[name]
utime(name, (date_time, date_time))
if __name__ == "__main__":
unzip('archive.zip', 'out')
exit(0)
Since directories are being modified as the extracted files are being created inside them, there appears to be no point in setting their dates with os.utime until after the extraction has completed, so this version caches the directory names and their timestamps till the very end.

no output generated from this simple python code?

Do I miss anything here? Why does this code not outputting data to the file I opened? Any ideas?
The following is the essential part of the entire code that has passed the complier without errors, but not outputting data to the file.
#! /usr/bin/python
#Basic imports
import sys
from time import sleep
from datetime import datetime,date,time
import numpy as np
#Create Bridge objects
try:
bridge_1 = Bridge()
outfile = open("prototype.csv", "a")
# Initialize all sensors and discard the readings
lc1_ini = bridge_1.getBridgeValue(0) * 2674.0 - 210.7
sleep(1)
lc1 = bridge_1.getBridgeValue(0) * 2674.0 - 210.7
# create empty array to store the converted digital data
readings_lc1 = np.empty([])
avg_lc1 = np.empty([])
max_samples = 3
readings_lc1 = np.append(readings_lc1 , lc1 )
if len(readings_lc1) == max_samples:
avg_lc1 = np.mean(readings_lc1[1:])
#Write the data to the text file
outfile.write(str(datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')) + "," + str(round(lc1,2)) + "\n")
outfile.close()

Using python script to search in multiple files and outputting an individual file for each one

I am trying to get a program up and running that takes astronomical data files with the extension .fits and takes all of the files with that extension in a folder and searches for specific header information, and subsequently places it into a text folder corresponding to each file. I am using a while loop, and please forgive me if this code is badly formatted, it is my first time using python! My main problem is that I can only get the program to read one file before it closes itself.
#!/usr/bin/env python
#This code properly imports all '.fits' files in a specified directory and
#outputs them into a .txt format that allows several headers and their contained
#data to be read.
import copy
import sys
import pyfits
import string
import glob
import os.path
import fnmatch
import numpy as np
DIR = raw_input("Please input a valid directory : ") #-----> This prompts for input from the user to find the '.fits' files
os.chdir(DIR)
initialcheck = 0 #Initiates the global counter for the number of '.fits' files in the specified directory
targetcheck = 0 #Initiates the global counter for the amount of files that have been processed
def checkinitial(TD):
#This counts the number of '.fits' files in your directory
for files in glob.iglob('*.fits'):
check = len(glob.glob1(TD,"*.fits"))
global initialcheck
initialcheck = check
if initialcheck == 0:
print 'There are no .FITS files in this directory! Try Again...'
sys.exit()
return initialcheck
def sorter(TD, targcheck, inicheck):
#This function will call the two counters and compare them until the number of processed files is greater than the files in the #directory, thereby finishing the loop
global initialcheck
inicheck = initialcheck
global targetcheck
targcheck = targetcheck
while targcheck <= inicheck:
os.walk(TD)
for allfiles in glob.iglob('*.fits'):
print allfiles #This prints out the filenames the porgram is currently processing
with pyfits.open(allfiles) as HDU:
#This block outlines all of the search terms in their respective headers, you will need to set the indices #below to search in the correct header for the specified term you are looking for, however no alterations to #the header definitions should be made.
HDU_HD_0 = HDU[0].header
HDU_HD_1 = HDU[1].header
#HDU_HD_2 = HDU[2].header -----> Not usually needed, can be activated if data from this header is required
#HDU_HD_3 = HDU[3].header -----> Use this if the '.fits' file contains a third header (unlikely but possible)
KeplerIDIndex = HDU_HD_0.index('KEPLERID')
ChannelIndex = HDU_HD_0.index('SKYGROUP')
TTYPE1Index = HDU_HD_1.index('TTYPE1')
TTYPE8Index = HDU_HD_1.index('TTYPE8')
TTYPE9Index = HDU_HD_1.index('TTYPE9')
TTYPE11Index = HDU_HD_1.index('TTYPE11')
TTYPE12Index = HDU_HD_1.index('TTYPE12')
TTYPE13Index = HDU_HD_1.index('TTYPE13')
TTYPE14Index = HDU_HD_1.index('TTYPE14')
TUNIT1Index = HDU_HD_1.index('TUNIT1')
TUNIT8Index = HDU_HD_1.index('TUNIT8')
TUNIT9Index = HDU_HD_1.index('TUNIT9')
TUNIT11Index = HDU_HD_1.index('TUNIT11')
TUNIT12Index = HDU_HD_1.index('TUNIT12')
TUNIT13Index = HDU_HD_1.index('TUNIT13')
TUNIT14Index = HDU_HD_1.index('TUNIT14')
#The below variables are an index search for the data found in the specified indices above, allowing the data #to be found in teh numpy array that '.fits' files use
File_Data_KID = list( HDU_HD_0[i] for i in [KeplerIDIndex])
File_Data_CHAN = list( HDU_HD_0[i] for i in [ChannelIndex])
Astro_Data_1 = list( HDU_HD_1[i] for i in [TTYPE1Index])
Astro_Data_8 = list( HDU_HD_1[i] for i in [TTYPE8Index])
Astro_Data_9 = list( HDU_HD_1[i] for i in [TTYPE9Index])
Astro_Data_11 = list( HDU_HD_1[i] for i in [TTYPE11Index])
Astro_Data_12 = list( HDU_HD_1[i] for i in [TTYPE12Index])
Astro_Data_13 = list( HDU_HD_1[i] for i in [TTYPE13Index])
Astro_Data_14 = list( HDU_HD_1[i] for i in [TTYPE14Index])
Astro_Data_Unit_1 = list( HDU_HD_1[i] for i in [TUNIT1Index])
Astro_Data_Unit_8 = list( HDU_HD_1[i] for i in [TUNIT8Index])
Astro_Data_Unit_9 = list( HDU_HD_1[i] for i in [TUNIT9Index])
Astro_Data_Unit_11 = list( HDU_HD_1[i] for i in [TUNIT11Index])
Astro_Data_Unit_12 = list( HDU_HD_1[i] for i in [TUNIT12Index])
Astro_Data_Unit_13 = list( HDU_HD_1[i] for i in [TUNIT13Index])
Astro_Data_Unit_14 = list( HDU_HD_1[i] for i in [TUNIT14Index])
HDU.close()
with open('Processed ' + allfiles + ".txt", "w") as copy:
targetcheck += 1
Title1_Format = '{0}-----{1}'.format('Kepler I.D.','Channel')
Title2_Format = '-{0}--------{1}------------{2}------------{3}------------{4}------------{5}-------------{6}-'.format('TTYPE1','TTYPE8','TTYPE9','TTYPE11','TTYPE12','TTYPE13','TTYPE14')
File_Format = '{0}--------{1}'.format(File_Data_KID, File_Data_CHAN)
Astro_Format = '{0}---{1}---{2}---{3}---{4}---{5}---{6}'.format(Astro_Data_1, Astro_Data_8, Astro_Data_9, Astro_Data_11, Astro_Data_12, Astro_Data_13, Astro_Data_14)
Astro_Format_Units = '{0} {1} {2} {3} {4} {5} {6}'.format(Astro_Data_Unit_1, Astro_Data_Unit_8, Astro_Data_Unit_9, Astro_Data_Unit_11, Astro_Data_Unit_12, Astro_Data_Unit_13, Astro_Data_Unit_14)
copy.writelines("%s\n" % Title1_Format)
copy.writelines( "%s\n" % File_Format)
copy.writelines('\n')
copy.writelines("%s\n" % Title2_Format)
copy.writelines( "%s\n" % Astro_Format)
copy.writelines('\n')
copy.writelines( "%s\n" % Astro_Format_Units)
Results = copy
return Results
checkinitial(DIR)
sorter(DIR, targetcheck, initialcheck)
I think you keep getting confused between a single file and a list of files. Try something like this:
def checkinitial(TD):
#This counts the number of '.fits' files in your directory
check = len(glob.glob1(TD,"*.fits"))
if not check:
print 'There are no .FITS files in this directory! Try Again...'
sys.exit()
return check
def sorter(TD, targcheck, inicheck):
"""This function will call the two counters and compare them until the number of processed
files is greater than the files in the directory, thereby finishing the loop
"""
for in_file in glob.iglob(os.path.join(TD,'*.fits')):
print in_file # This prints out the filenames the program is currently processing
with pyfits.open(in_file) as HDU:
# <Process input file HDU here>
out_file_name = 'Processed_' + os.path.basename(in_file) + ".txt"
with open(os.path.join(TD, out_file_name), "w") as copy:
# <Write stuff to your output file copy here>

Categories