Do I miss anything here? Why does this code not outputting data to the file I opened? Any ideas?
The following is the essential part of the entire code that has passed the complier without errors, but not outputting data to the file.
#! /usr/bin/python
#Basic imports
import sys
from time import sleep
from datetime import datetime,date,time
import numpy as np
#Create Bridge objects
try:
bridge_1 = Bridge()
outfile = open("prototype.csv", "a")
# Initialize all sensors and discard the readings
lc1_ini = bridge_1.getBridgeValue(0) * 2674.0 - 210.7
sleep(1)
lc1 = bridge_1.getBridgeValue(0) * 2674.0 - 210.7
# create empty array to store the converted digital data
readings_lc1 = np.empty([])
avg_lc1 = np.empty([])
max_samples = 3
readings_lc1 = np.append(readings_lc1 , lc1 )
if len(readings_lc1) == max_samples:
avg_lc1 = np.mean(readings_lc1[1:])
#Write the data to the text file
outfile.write(str(datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')) + "," + str(round(lc1,2)) + "\n")
outfile.close()
Related
I don't have much programming skills, but I need to send a output of a command into .csv table. I managed to create this, but it prints only 1st line of the table instead whole table, and I don't know how to procceed futher with turning it into csv.
Any help would be much appreciated.
from __future__ import print_function
from datetime import date
import sys
import os
import time
today1 = date.today().strftime('%Y_%m_%d')
strTime = time.strftime('%Y_%m_%d')
command = 'My command here'
cmd = session.command()
response = cmd.execute(command)
element_group = response.get_output()
table = element_group.groups()[0]
for cell in table[0]:
print(cell.labels()[0] + ' , ' + '\t', end='')
print('\n')
for cell in table[5]:
print(cell.value() + ' , ', end='')
print('\n')
I have tried script in description. I was expecting to print whole table and turning it into .csv file.
I have figured it out. Here is the script I wanted.
from __future__ import print_function
import csv
from datetime import date
import sys
import os
import time
today1 = date.today().strftime('%Y_%m_%d')
strTime = time.strftime('%Y_%m_%d')
command = 'My command here'
cmd = session.command()
response = cmd.execute(command)
element_group = response.get_output()
table = element_group.groups()[0]
header = [cell.labels()[0] for cell in table[0]]
rows = [[cell.value() for cell in row] for row in table]
directory = 'location'
filename = directory + 'filename' + today1 + '.csv'
with open(filename, mode='w') as file:
writer = csv.writer(file)
writer.writerow(header)
writer.writerows(rows)
I am trying to read Blob-Data which is stored in a csv-file. I can import it in string format but if I want to use numpy.frombuffer with dtype='<f4' (fixed to get correct output), I get the error:
line 52, in <module>
data = np.frombuffer(blob_data, dtype='<f4') #<f4
ValueError: buffer size must be a multiple of element size
The code is the following:
import numpy as np
import datetime
import math
import csv
import pandas
from binascii import unhexlify
#import mysql.connector
# from pylab import figure, plot, show, legend, xlabel, ylabel
from matplotlib import pyplot as plt
def read_CSV(dataid):
with open('spectrometer2.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
result = bytes('1',encoding='UTF-8')
for row in csv_reader:
if line_count > 0:
#print(row[0])
if str(dataid) == str(row[0]):
result = row[3][2:-1]
print('FOUND####################################')
print(str(row[3])[2:-1])
break
line_count += 1
print(str(type(result)))
return result
####### MAIN PROGRAM #######
#spectrumRange = np.arange(10, 11011, 200)
spectrumRange = np.arange(8000, 9000, 200) # Auszug zur Leistungsoptimierung im Test
query_init = "SELECT * FROM `spectrometer2` WHERE data_id="
plt.figure()
for spectrum_id in spectrumRange:
spectrometerquery = query_init + str(spectrum_id) + ";"
print("Current data ID: " + str(spectrum_id))
#y_stream = interact_with_MySQL("database_name", spectrometerquery)
blob_data = read_CSV(spectrum_id)
#print(binary2int(blob_data))
if(blob_data != 0):
blob_data = bytes(blob_data,encoding = 'UTF-8')
print(blob_data)
data = np.frombuffer(blob_data, dtype='<f4') #<f4
print(data)
plt.plot(data)
plt.title('Spectrometer 2 data')
legend = []
for x in spectrumRange:
legend.append(('data id:', x))
plt.legend(legend)
plt.show()
I don't know much about the csv-file but it should contain the output of a optical sensor. The sql-statements are commented out because I have to replace them with the csv-file.
Extract of the file (shown in excel):
So I am writing a script that runs when a simulation running in AirSim runs. It is continuously collecting data (theoretically) and writing it to the csv file. However, when I go to the location that it should be saved, there is no file. Like even if there is an error in my method of saving the data, the file itself is not there.
import setup_path
import airsim
import numpy as np
import os
import os.path
import string
import csv
import tempfile
import pprint
import csv
import datetime
client = airsim.MultirotorClient()
client.confirmConnection()
'''
First create a directory for all the csv files to store into.
'''
dirmain = r"C:\AirSimData"
if not os.path.exists(dirmain):
os.mkdir(dirmain)
'''
Create format for file names
'''
run_date_and_time = datetime.datetime.now()
run_date_and_time_string = str(run_date_and_time)
extension = ".csv"
file_name_base = run_date_and_time_string + extension
imu = "imu"
gps = "gps"
magnetometer = "magnetometer"
barometer = "barometer"
gps_file_name = gps + file_name_base
'''Create csv files
'''
gps_file = open(r"gps_file_name",'w')
gps_header = ['lat','lon','alt']
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
while True:
#state = client.getMultirotorState()
#s = pprint.pformat(state)
#print("state: %s" % s)
#imu_data = client.getImuData()
#s = pprint.pformat(imu_data)
#print("imu_data: %s" % s)
#barometer_data = client.getBarometerData()
#s = pprint.pformat(barometer_data)
#print("barometer_data: %s" % s)
#magnetometer_data = client.getMagnetometerData()
#s = pprint.pformat(magnetometer_data)
#print("magnetometer_data: %s" % s)
gps_data = client.getGpsData().gnss.geo_point
alt = (gps_data.altitude)
lat = (gps_data.latitude)
lon = (gps_data.longitude)
gps_data_struct = [lat,lon,alt]
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_data_struct)
gpscsvfile.close()
#print("Altitude: %s\nLatitude %s\nLongitude %s" %(alt,lat,lon) )
if False:
break
Here you are creating a file name literally "gps_file_name"
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
You should instead use the variables with the name elements that you created. os.path.join() is a safe way to join filenames with path names.
gps_file_name = gps + file_name_base
output_file = os.path.join(dirmain, gps_file_name)
# Should read something like this "C:\AirSimData\gps2021-01-21 13:37:39.867152.csv"
Then you can use it here.
with open(output_file,'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
The next problem is that your datetime string contains invalid characters for filename colons (:) can not be used in file names. so you need to re-think that part.
One option could be to use no colons and have your time look like this.
run_date_and_time_string = run_date_and_time.strftime('%y-%m-%d_%H%M%S')
# 'C:\\AirSimData\\gps21-01-21_134531.csv'
I'm having some issues with updating my code. I can run my code and it works fine. But when I make adjustments to the code the output stays the same as when I originally ran the file. If I create a new .py file and just copy and paste the updated code it produces the desired output with the updates. Why is my original file not reflecting the changes in the output?
My specific example is in the code below. The code ran and produced outputs as expected. Then I updated it to add the "sector" and "close" variables. However, the new output did not include the data, just the names in the header. Does it have to do with the .pyc file?
import multiprocessing
import datetime
import re
from progressbar import ProgressBar
import csv
import urllib2
from lxml import etree
def mp_worker(s):
url1 = ("https://research.tdameritrade.com/grid/public/research/stocks/fundamentals?symbol=" + s)
url2 = ("https://research.tdameritrade.com/grid/public/research/stocks/summary?symbol=" + s)
url3 = ("https://research.tdameritrade.com/grid/public/research/stocks/industryposition?symbol=" + s)
htmlparser = etree.HTMLParser()
try:
response3 = urllib2.urlopen(url3)
tree3 = etree.parse(response3, htmlparser)
perf = tree3.xpath("""//*[#id="stock-industrypositionmodule"]/div/div/table/tbody[1]/tr[3]/td[1]/text()""")
if len(perf) > 0:
EPS5yr = tree3.xpath("""//*[#id="stock-industrypositionmodule"]/div/div/table/tbody[2]/tr[4]/td[1]/text()""")
else:
response1 = urllib2.urlopen(url1)
tree1 = etree.parse(response1, htmlparser)
EPS5yr = tree1.xpath("""//*[#id="layout-full"]/div[3]/div/div[3]/section/div/div/div[1]/div/dl/dd[1]/div/label/span/text()""")
response2 = urllib2.urlopen(url2)
tree2 = etree.parse(response2, htmlparser)
EPSttm = tree2.xpath("""//*[#id="stock-summarymodule"]/div/div/div[1]/div/div[2]/dl/ul/li[3]/dd/text()""")
sector = tree2.xpath("""//*[#id="layout-header"]/div[1]/div/text()""")
indy = tree2.xpath("""//*[#id="layout-header"]/div[1]/div/a[1]/text()""")
close = tree2.xpath("""//*[#id="stock-quotebar"]/div/div/table/tbody/tr/td[1]/dl/dd/text()""")
except Exception as e:
EPS5yr = 'Error'
EPSttm = 'Error'
perf = 'Error'
indy = 'Error'
close = 'Error'
sector = 'Error'
pass
return s, close, EPS5yr, EPSttm, perf, sector, indy
def mp_handler():
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d_%H%M")
file = ('total_market' + '_' + date +'.csv')
p = multiprocessing.Pool(16)
symbols = {'AABA',
'AAOI',
'AAPL',
'AAWC'}
with open(file, "ab") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(['Symbol','Price','5 Yr EPS','EPS TTM','52 Wk Perf','Sector','Industry'])
for result in p.imap(mp_worker, symbols):
# (filename, count) tuples from worker
writer.writerow(result)
if __name__=='__main__':
mp_handler()
Though this is an old question I'd like to provide another possible solution. If the module is something you installed (e.g you have a setup.py file and you've pip installed your project) then uninstalling the module could fix it. This turned out to solve my problem when I ran into it.
With the following code I'm trying to grab data from a website every 5 mins, timestamp it, calculate its logn return and append all that to a csv file.
Grabbing the data, time stamping it and appending to csv works, but when I try to figure out how to include the log return I'm kind of stuck.
import time
from time import strftime, gmtime
import numpy as np
import urllib2
from urllib2 import urlopen
from math import log
coiAr = []
dateAr = []
logReAr = []
def mcapGrab():
while True:
try:
sourceCode = urllib2.urlopen('http://coinmarketcap.com').read()
mcapUSD = sourceCode.split('<strong>Total Market Cap: <span id="total-marketcap" data-usd="')[1].split('"')[0]
coiAr.append(float(mcapUSD.replace(',','')))
date = strftime('%d %b %Y %H:%M:%S', gmtime())
dateAr.append(date)
# if len(coiAr) > 0:
# indexLog = 1
# logRe = log(coiAr[indexLog]/coiAr[indexLog-1])
# logReAr.append(logRe)
# indexLog += 1
# else:
# logReAr.append(0)
for eachMcap in coiAr:
saveLine = date+','+str(eachMcap)+'\n'
saveFile = open('mcapdata.csv', 'a')
saveFile.write(saveLine)
saveFile.close()
s = 0
print dateAr[s]+','+str(coiAr[s])
time.sleep(300)
s+= 1
except Exception, e:
print 'Failed to grab market cap', str(e)
mcapGrab()
I've commented out the section where I attempt to calc and append log return but doesn't work.
Any help would be appreciated!
Don't use global lists; just write each entry to the file as you find it. Using the csv module would make this all a bit easier still:
import csv
sourceCode = urllib2.urlopen('http://coinmarketcap.com').read()
mcapUSD = sourceCode.split('<strong>Total Market Cap: <span id="total-marketcap" data-usd="')[1].split('"')[0]
mcap = float(mcapUSD.replace(',','')
# read previous value from the CSV first
with open('mcapdata.csv', 'rb') as infh:
last = None
for row in csv.reader(infh):
last = row[1] # second column
# Now calculate the log value based on the last value
logRe = log(mcap/float(last))
# write new values
with open(mcapdata.csv', 'ab') as outfh:
date = strftime('%d %b %Y %H:%M:%S', gmtime())
csv.writer(outfh).writerow([date, mcap, logRe])
This code will read the mcapdata.csv file, picking out just the last value written to it. You could instead also keep all of the rows in memory and just picking out the last entry in a list of lists.