This is a question for the astronomy-minded folks on here.
I am an amateur astrophotographer looking to develop a personal script to aid my photography of next year's total solar eclipse. I am developing a Python script to automate my photography, so that I may enjoy the eclipse with my own eyes while my DSLR clicks away. Here's the script I've developed so far. The script uses digicamcontrol to control the camera.
Right now in the script I have just develop automation based on the partial phase of the eclipse (first contact, C1) and the timing of the eclipse in UTC (as well as my own PC). But a thought occurred to me: What if I can't connect to the internet and get the exact timing of the solar eclipse based on my location? I'd like to be able to generate those times. Is there a more efficient method to utilize astropy for this task? Thanks in advance.
import digiCamControlPython as dccp
import time
from datetime import datetime
from astropy.time import Time
local_time = Time.now()
utc_time_now = local_time.utc
def PartialEclipse(start_time:str, end_time:str):
camera = dccp.Camera()
camera.setIso(100)
camera.setShutterspeed("1/50")
camera.setFolder(r"C:\Users\My_Name\Pictures\digiCamControl")
# Set the target capture time in astropy time format
partial_eclipse_start = Time(start_time, format='isot')
partial_eclipse_end = Time(end_time, format='isot')
# Wait until the capture time
while utc_time_now < partial_eclipse_start:
time.sleep(1)
# Start capturing images
while utc_time_now < partial_eclipse_end:
camera.capture()
time.sleep(30) # Capture an image every 30 seconds
PartialEclipse("2024-04-08T17:12:13", "2024-04-08T18:29:24") #times of partial eclipse start and T-15s before totality
EDIT: In the event anyone ever looks at this question, I did make some progress on this.
import numpy as np
import astropy.units as u
from astropy.coordinates import solar_system_ephemeris, AltAz, EarthLocation, SkyCoord
from astropy.coordinates import get_body, get_moon, get_sun
from astropy.time import Time
myLocation = EarthLocation(lat=26*u.deg, lon=-80*u.deg, height=0*u.m)
# set the time step (how often to check for a solar eclipse in seconds)
time_step = 3600 # 1 hour
# set the number of days to check for a solar eclipse
num_days = 365
# set the start and end times to check for a solar eclipse
start_time = Time.now()
end_time = start_time + num_days * u.day
# initialize a list to store the times of a solar eclipse
eclipse_times = []
# loop over the desired time range, checking for a solar eclipse every time_step seconds
with solar_system_ephemeris.set('jpl'):
for t in np.arange(start_time.unix, end_time.unix, time_step):
time = Time(t, format='unix')
moon = get_body('moon', time, myLocation)
sun = get_body('sun', time, myLocation)
sun_coord = SkyCoord(sun.ra, sun.dec, sun.distance, frame='icrs')
moon_coord = SkyCoord(moon.ra, moon.dec, moon.distance, frame='icrs')
# check if the angular separation between the moon and sun is close to zero
angular_separation = moon_coord.separation(sun_coord)
if angular_separation < 0.6 * u.deg: #elongation where the partial eclipse begins
eclipse_times.append(time)
# print the times of the next solar eclipse
if len(eclipse_times) > 0:
print("The next solar eclipse is at: ", eclipse_times[0].iso)
else:
print("No solar eclipses found in the specified time range.")
I think your approach is really good! If you want to increase the accuracy of your start time prediction without using a lot more computational power, you can use scipy.optimize.root_scalar to refine the start time you found.
In my solution below, I've defined a function called distance_contact() whose root represents the start of the eclipse. This function is zero if the Sun and Moon are barely touching, positive if they are separated, and negative if they are overlapping. Then I define a grid of times with a timestep of 1 hour similar to your code, and pass it into this function to search for eclipses. It then finds the first time where distance_contanct is negative and uses that time and the timestep before as the search space for scipy.optimize.root_scalar.
Also, instead of using 0.6 * u.deg as the separation distance for an eclipse to occur, I've calculated the angular radius of the Sun and Moon for the time argument to distance_contact to make the prediction as accurate as possible.
import numpy as np
import scipy.optimize
import astropy.units as u
import astropy.time
import astropy.constants
import astropy.coordinates
def distance_contact(
location: astropy.coordinates.EarthLocation,
time: astropy.time.Time,
eclipse_type: str,
) -> u.Quantity:
radius_sun = astropy.constants.R_sun
radius_moon = 1737.4 * u.km
coordinate_sun = astropy.coordinates.get_sun(time)
coordinate_moon = astropy.coordinates.get_moon(time)
frame_local = astropy.coordinates.AltAz(obstime=time, location=location)
alt_az_sun = coordinate_sun.transform_to(frame_local)
alt_az_moon = coordinate_moon.transform_to(frame_local)
angular_radius_sun = np.arctan2(radius_sun, alt_az_sun.distance).to(u.deg)
angular_radius_moon = np.arctan2(radius_moon, alt_az_moon.distance).to(u.deg)
if eclipse_type == 'total':
separation_max = angular_radius_moon - angular_radius_sun
elif eclipse_type == 'partial':
separation_max = angular_radius_moon + angular_radius_sun
else:
raise ValueError("Unknown eclipse type")
return (alt_az_moon.separation(alt_az_sun).deg * u.deg) - separation_max
def calc_time_start(
location: astropy.coordinates.EarthLocation,
time_search_start: astropy.time.Time,
time_search_stop: astropy.time.Time,
eclipse_type: str = 'partial'
) -> astropy.time.Time:
astropy.coordinates.solar_system_ephemeris.set("de430")
# If we're only looking for a partial eclipse, we can accept a coarser search grid
if eclipse_type == "partial":
step = 1 * u.hr
elif eclipse_type == "total":
step = 1 * u.min
else:
raise ValueError("Unknown eclipse type")
# Define a grid of times to search for eclipses
time = astropy.time.Time(np.arange(time_search_start, time_search_stop, step=step))
# Find the times that are during an eclipse
mask_eclipse = distance_contact(location=location, time=time, eclipse_type=eclipse_type) < 0
# Find the index of the first time that an eclipse is occuring
index_start = np.argmax(mask_eclipse)
# Search around that time to find when the eclipse actually starts
time_eclipse_start = scipy.optimize.root_scalar(
f=lambda t: distance_contact(location, astropy.time.Time(t, format="unix"), eclipse_type=eclipse_type).value,
bracket=[time[index_start - 1].unix, time[index_start].unix],
).root
time_eclipse_start = astropy.time.Time(time_eclipse_start, format="unix")
return time_eclipse_start
def test_calc_time_start():
location = astropy.coordinates.EarthLocation(lat=26 * u.deg, lon=-80 * u.deg, height=0 * u.m)
eclipse_type = 'partial'
time_start = calc_time_start(
location=location,
time_search_start=astropy.time.Time.now(),
time_search_stop=astropy.time.Time.now() + 0.9 * u.yr,
eclipse_type=eclipse_type,
)
print(time_start.isot)
which outputs:
2023-10-14T15:57:38.068
Related
I bought a cheap ublox7 GPS dongle and stuck it on my raspberry pi 3. When I looked at the output and tried to stick it into a map program I got weird results. Here is some sample output from the device after parsing with a library called "pynmea2".
$GPGLL,3745.81303,N,12214.62049,W,175033.00,A,D*7C
I did some research about how to convert this output to something useful and I found a formula that involved splitting the number up and dividing it by .6.
Doing GPS Conversion – Degrees to Latitude Longitude and vice versa
so I wrote a python program to try to capture and convert all of this, and the output is off by like a mile. I am trying to figure out what I am doing wrong, how could I be this close yet still off by about one mile?
from time import sleep
import pynmea2
import serial
import re
degree_sign = u"\N{DEGREE SIGN}"
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1.0)
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))
while True:
line = sio.readline()
msg = pynmea2.parse(line)
msg = str(msg)
if re.search("GPGLL", msg):
raw_nums = re.findall(r'\b\d*\.\d*', msg)
lat_whole = (raw_nums[0])
lat_part1 = lat_whole[0:2]
lat_part2 = lat_whole[2:4]
lat_part2 = int(lat_part2)
lat_part2 = lat_part2 / .6
lat_part2 = int(lat_part2)
lat_part2 = str(lat_part2)
lat_part3 = lat_whole[5:9]
lat_part3 = float(lat_part3)
lat_part3 = lat_part3 / .6
lat_part3 = round(lat_part3, 0)
lat_part3 = int(lat_part3)
lat_part3 = str(lat_part3)
lon_whole = raw_nums[1]
lon_part1 = lon_whole[0:3]
lon_part1 = int(lon_part1)
lon_part1 = -lon_part1
lon_part1 = str(lon_part1)
lon_part2 = lon_whole[3:5]
lon_part2 = int(lon_part2)
lon_part2 = lon_part2 / .6
lon_part2 = str(lon_part2)
lon_part2 = lon_part2[0:2]
lon_part3 = lon_whole[6:10]
lon_part3 = float(lon_part3)
lon_part3 = lon_part3 / .6
lon_part3 = round(lon_part3, 0)
lon_part3 = int(lon_part3)
lon_part3 = str(lon_part3)
print(lat_part1 + "." + lat_part2 + lat_part3 +"," , lon_part1 + "." + lon_part2 + lon_part3)
print(lat_part1+degree_sign+lat_part2+"'"+lat_part3+"\"" + "N", lon_part1 + degree_sign+ lon_part2 + "'" + lon_part3+"\"" + "W")
sleep(1)
Here is the list that regex generated using the pynmea2 output:
['3745.81246', '12214.61512', '224329.00'] assigned to raw_nums.
Output from the script:
37.7513540, -122.2310268
37°75'13540"N -122°23'10268"W
Entering the first bit of output into google maps brings up a place near me but about a mile away, the second number doesn't work on Google maps for some reason - but it works on apple maps.
My questions:
I know there must be at least 100 better ways to write this code, do you have suggestions for getting there quicker?
Does the formula make sense? Am I applying it correctly?
Do you see a reason why this should return a result that is close but no cigar?
Do you know why the second line of output would not work as input into google maps?
What accuracy should I expect from a ublox 7 GPS dongle I got from Amazon for $12?
Thanks in advance, I really appreciate it.
Update: I looked up my address on gps coordinates conversion
and the latitude they show for my address is 3745.50084 while my gps is reporting 3745.81246. So it just seems like I am starting with bad data...
If your parsed string from the GPS device is always of the form you specified, you can simply split the string on the commas like split_msg = msg.split(","). Then your lat will be split_msg[1] and your long split_msg[3]. With indexes 2 and 4 being the heading direction.
The lat is provided as DDmm.mm and long is provided as DDDmm.mm, which you seem to have captured above. So 3745.81246 would be 37 degrees and 45.81246 minutes. You can take the decimal portion of the minutes (i.e. 0.81246) and multiply times 60 to get seconds. So you would get 37 degrees, 45 minutes, and 48.75 seconds. As a sanity check, minutes and seconds should always be less than 60 as either of them being 60 would increment the next value (e.g. 60 minutes in a degree, 60 seconds in a minute).
To convert the minutes to a decimal degree number, simply divide the minutes number by 60 (45.81246/60=0.763541) then add that to your degrees. So 3745.81246 would become 37.763541.
So within the if statement:
split_msg = msg.split(",")
lat, lat_dir, long, long_dir = split_msg[1:5]
lat_d, lat_m = float(lat[:2]), float(lat[2:])
long_d, long_m = float(long[:3]), float(long[3:])
lat_dec = lat_d + lat_m/60
long_dec = long_d + long_m/60
lat_min = math.floor(lat_m)
lat_sec = 60*(lat_m - lat_min)
long_min = math.floor(long_m)
long_sec = 60*(long_m - long_min)
print(f"{lat_dec} {lat_dir}, {long_dec} {long_dir}")
print(f"{lat_d}{degree_sign} {lat_min}' {lat_sec}\" {lat_dir}, {long_d}{degree_sign} {long_min}' {long_sec}\" {long_dir}")
I have not tested the above code, but this is the general way I would approach this problem.
Since you're using the pynmea2 library you can make use of the object properties to ease the subsequent steps.
import pynmea2
line = "$GPGLL,3745.81303,N,12214.62049,W,175033.00,A,D*7C"
nmeaobj = pynmea2.parse(line)
coord = f'{nmeaobj.latitude} {nmeaobj.longitude}'
print(coord)
# 37.763551 -122.243675
The decimal degree difference between the script output and the library is around 0.012 which is similar to the precision length (1.1132 km) cited in the Degree precision versus length table. This would explain why you are seen a discrepancy of about a mile.
abs(-122.243675 - -122.2310268)
0.012648200000000998
abs(37.7635505 - 37.7513540)
0.01219650000000172
You could use the formula cited in the previous link to convert from decimal degree to DMS components, and this would yield a valid location. But notice that the directions of the coordinate (NS/WE) were left out of the final string formation.
def dd_to_dms(coord):
d = int(coord)
abs_d = abs(coord-d)
m = int(60 * abs_d)
s = 3600 * abs_d - 60 * m
return d,m,s
lat = '''%02d°%02d'%07.4f"''' % dd_to_dms(nmeaobj.latitude)
lon = '''%02d°%02d'%07.4f"''' % dd_to_dms(nmeaobj.longitude)
print(f'{lat} {lon}')
# 37°45'48.7818" -122°14'37.2294"
The second line of output would not work as input into google maps because, as already mentioned, minutes and seconds should always be less than 60. Moreover, adding the directions of the coordinate to a negative degree could make you (depending on the algorithm used to parse the string) "walk" in the opposite direction of the desired location as a consequence of having the degree sign (or the coordinate direction NS/WE) ignored, or, in Google maps case, simply not understanding the coordinate.
37°45'48.7818" -122°14'37.2294" # works
37°45'48.7818"N -122°14'37.2294"W # don't work
37°45'48.7818"N 122°14'37.2294"W # works
I am using NI DAQ USB-6210.
I am trying to make the program that can be applied counter output with python.
import nidaqmx
from nidaqmx.constant import AcquisitionType
Counter1 = nidaqmx.Task()
counter1 = Counter1.co_channels.add_co_pulse_chan_freq(
counter = "Dev2/ctr0",
units = FrequencyUnits.HZ,
freq = rate)
#counter1.co_pulse_term = "Dev2/PFI7"
Counter1.timing.cfg_implicit_timing(
sample_mode = AcquisitionType.CONTINUOUS,
samps_per_chan = 1000)
Counter1.start()
h = input('ok ? >>')
Counter1.close()
NI DAQ is working, counter signal is applied from PFI4.
When I removed #, it must be applied from PFI7.
but it is shown the error below.
"DaqError: Destination terminal to be routed could not be found on the device."
my question is how to select the terminal I want use?
please tell me solution to this problem?
I'm using ephem for the first time, and having trouble understanding the output of oberver.sidereal_time()
I've written a couple scripts to determine solar time from hour angle. The first one uses ephem to compute right ascension and a formula from Meeus' Astronomical Algorithms to get Greenwich mean sidereal time, which can be converted to local mean sidereal with the longitude.
import sys
from datetime import datetime, time, timedelta
import ephem
def hour_angle(dt, longit, latit, elev):
obs = ephem.Observer()
obs.date = dt.strftime('%Y/%m/%d %H:%M:%S')
obs.lon = longit
obs.lat = latit
obs.elevation = elev
sun = ephem.Sun()
sun.compute(obs)
# get right ascention
ra = ephem.degrees(sun.g_ra)
# get sidereal time at greenwich (AA ch12)
jd = ephem.julian_date(dt)
t = (jd - 2451545.0) / 36525
theta = 280.46061837 + 360.98564736629 * (jd - 2451545) \
+ .000387933 * t**2 - t**3 / 38710000
# hour angle (AA ch13)
ha = (theta + longit - ra * 180 / ephem.pi) % 360
return ha
def main():
if len(sys.argv) != 6:
print 'Usage: hour_angle.py [YYYY/MM/DD] [HH:MM:SS] [longitude] [latitude] [elev]'
sys.exit()
else:
dt = datetime.strptime(sys.argv[1] + ' ' + sys.argv[2], '%Y/%m/%d %H:%M:%S')
longit = float(sys.argv[3])
latit = float(sys.argv[4])
elev = float(sys.argv[5])
# get hour angle
ha = hour_angle(dt, longit, latit, elev)
# convert hour angle to timedelta from noon
days = ha / 360
if days > 0.5:
days -= 0.5
td = timedelta(days=days)
# make solar time
solar_time = datetime.combine(dt.date(), time(12)) + td
print solar_time
if __name__ == '__main__':
main()
This gives output I would expect when I plug in some data:
> python hour_angle_ephem.py 2012/11/16 20:34:56 -122.2697 37.8044 3.0
2012-11-16 12:40:54.697115
The second script I wrote calculates right ascension the same way, but uses ephem's sidereal_time() to get the local apparent sidereal time.
import sys
from datetime import datetime, time, timedelta
import math
import ephem
def solartime(observer, sun=ephem.Sun()):
sun.compute(observer)
# sidereal time == ra (right ascension) is the highest point (noon)
t = observer.sidereal_time() - sun.ra
return ephem.hours(t + ephem.hours('12:00')).norm # .norm for 0..24
def main():
if len(sys.argv) != 6:
print 'Usage: hour_angle.py [YYYY/MM/DD] [HH:MM:SS] [longitude] [latitude] [elev]'
sys.exit()
else:
dt = datetime.strptime(sys.argv[1] + ' ' + sys.argv[2], '%Y/%m/%d %H:%M:%S')
longit = float(sys.argv[3])
latit = float(sys.argv[4])
elev = float(sys.argv[5])
obs = ephem.Observer()
obs.date = dt.strftime('%Y/%m/%d %H:%M:%S')
obs.lon = longit
obs.lat = latit
obs.elevation = elev
solar_time = solartime(obs)
print solar_time
if __name__ == '__main__':
main()
This does not get me the output I would expect.
python hour_angle_ephem2.py 2012/11/16 20:34:56 -122.2697 37.8044 3.0
9:47:50.83
AFAIK, the only difference between the two scripts is that the first bases hour angle on local mean sidereal time, while the second bases hour angle on local apparent sidereal time, which takes into account the nutation of the earth, which I think should be a very small factor. Instead I'm seeing a difference of about three hours. Can anyone explain to me what is going on?
When you provide PyEphem with a raw floating-point number where it expects an angle, then it trusts that you have converted the angle to radians first — since it always treats floating point angles as radians, to keep things consistent. But in your second script, you are getting longitudes and latitudes that are expressed in degrees and providing them to PyEphem as though they are in radians. You can see the result if you add a print statement or two to see what the .lon and .lat attributes of your Observer look like:
print observer.lon #--> -7005:32:16.0
print observer.lat #--> 2166:01:57.2
I think that what you instead want to do is simply provide your raw longitude and latitude strings to PyEphem so that it interprets them as human-readable degrees instead of machine-readable radians, by removing the float() calls around argv[3] and argv[4] in your second script. You should then find that it returns a value closer to what you are expecting:
$ python tmp11.py 2012/11/16 20:34:56 -122.2697 37.8044 3.0
12:40:55.59
/proc/stat shows ticks for user, nice, sys, idle, iowait, irq and sirq like this:
cpu 6214713 286 1216407 121074379 260283 253506 197368 0 0 0
How can I calculate the individual utilizations (in %) for user, nice etc with these values? Like the values that shows in 'top' or 'vmstat'.
This code calculates user utilization spread over all cores.
import os
import time
import multiprocessing
def main():
jiffy = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
num_cpu = multiprocessing.cpu_count()
stat_fd = open('/proc/stat')
stat_buf = stat_fd.readlines()[0].split()
user, nice, sys, idle, iowait, irq, sirq = ( float(stat_buf[1]), float(stat_buf[2]),
float(stat_buf[3]), float(stat_buf[4]),
float(stat_buf[5]), float(stat_buf[6]),
float(stat_buf[7]) )
stat_fd.close()
time.sleep(1)
stat_fd = open('/proc/stat')
stat_buf = stat_fd.readlines()[0].split()
user_n, nice_n, sys_n, idle_n, iowait_n, irq_n, sirq_n = ( float(stat_buf[1]), float(stat_buf[2]),.
float(stat_buf[3]), float(stat_buf[4]),
float(stat_buf[5]), float(stat_buf[6]),
float(stat_buf[7]) )
stat_fd.close()
print ((user_n - user) * 100 / jiffy) / num_cpu
if __name__ == '__main__':
main()
From Documentation/filesystems/proc.txt:
(...) These numbers identify the amount of time the CPU has spent performing
different kinds of work. Time units are in USER_HZ (typically hundredths of a second).
So to figure out utilization in terms of percentages you need to:
Find out what USER_HZ is on the machine
Find out how long it's been since the system booted.
The second one is easy: there is a btime line in that same file which you can use for that. For USER_HZ, check out How to get number of mili seconds per jiffy.
How to retrieve the process start time (or uptime) in python in Linux?
I only know, I can call "ps -p my_process_id -f" and then parse the output. But it is not cool.
By using psutil https://github.com/giampaolo/psutil:
>>> import psutil, os, time
>>> p = psutil.Process(os.getpid())
>>> p.create_time()
1293678383.0799999
>>> time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.create_time()))
'2010-12-30 04:06:23'
>>>
...plus it's cross platform, not only Linux.
NB: I am one of the authors of this project.
If you are doing it from within the python program you're trying to measure, you could do something like this:
import time
# at the beginning of the script
startTime = time.time()
# ...
def getUptime():
"""
Returns the number of seconds since the program started.
"""
# do return startTime if you just want the process start time
return time.time() - startTime
Otherwise, you have no choice but to parse ps or go into /proc/pid. A nice bashy way of getting the elapsed time is:
ps -eo pid,etime | grep $YOUR_PID | awk '{print $2}'
This will only print the elapsed time in the following format, so it should be quite easy to parse:
days-HH:MM:SS
(if it's been running for less than a day, it's just HH:MM:SS)
The start time is available like this:
ps -eo pid,stime | grep $YOUR_PID | awk '{print $2}'
Unfortunately, if your process didn't start today, this will only give you the date that it started, rather than the time.
The best way of doing this is to get the elapsed time and the current time and just do a bit of math. The following is a python script that takes a PID as an argument and does the above for you, printing out the start date and time of the process:
import sys
import datetime
import time
import subprocess
# call like this: python startTime.py $PID
pid = sys.argv[1]
proc = subprocess.Popen(['ps','-eo','pid,etime'], stdout=subprocess.PIPE)
# get data from stdout
proc.wait()
results = proc.stdout.readlines()
# parse data (should only be one)
for result in results:
try:
result.strip()
if result.split()[0] == pid:
pidInfo = result.split()[1]
# stop after the first one we find
break
except IndexError:
pass # ignore it
else:
# didn't find one
print "Process PID", pid, "doesn't seem to exist!"
sys.exit(0)
pidInfo = [result.split()[1] for result in results
if result.split()[0] == pid][0]
pidInfo = pidInfo.partition("-")
if pidInfo[1] == '-':
# there is a day
days = int(pidInfo[0])
rest = pidInfo[2].split(":")
hours = int(rest[0])
minutes = int(rest[1])
seconds = int(rest[2])
else:
days = 0
rest = pidInfo[0].split(":")
if len(rest) == 3:
hours = int(rest[0])
minutes = int(rest[1])
seconds = int(rest[2])
elif len(rest) == 2:
hours = 0
minutes = int(rest[0])
seconds = int(rest[1])
else:
hours = 0
minutes = 0
seconds = int(rest[0])
# get the start time
secondsSinceStart = days*24*3600 + hours*3600 + minutes*60 + seconds
# unix time (in seconds) of start
startTime = time.time() - secondsSinceStart
# final result
print "Process started on",
print datetime.datetime.fromtimestamp(startTime).strftime("%a %b %d at %I:%M:%S %p")
man proc says that the 22nd item in /proc/my_process_id/stat is:
starttime %lu
The time in jiffies the process started after system boot.
Your problem now is, how to determine the length of a jiffy and how to determine when the system booted.
The answer for the latter comes still from man proc: it's in /proc/stat, on a line of its own like this:
btime 1270710844
That's a measurement in seconds since Epoch.
The answer for the former I'm not sure about. man 7 time says:
The Software Clock, HZ, and Jiffies
The accuracy of many system calls and timestamps is limited by the resolution of the software clock, a clock maintained by the kernel which measures time in jiffies. The size of a jiffy is determined by the value of the kernel constant HZ. The value of HZ varies across kernel versions and hardware platforms. On x86 the situation is as follows: on kernels up to and including 2.4.x, HZ was 100, giving a jiffy value of 0.01 seconds; starting with 2.6.0, HZ was raised to 1000, giving a jiffy of 0.001 seconds; since kernel 2.6.13, the HZ value is a kernel configuration parameter and can be 100, 250 (the default) or 1000, yielding a jiffies value of, respectively, 0.01, 0.004, or 0.001 seconds.
We need to find HZ, but I have no idea on how I'd go about that from Python except for hoping the value is 250 (as Wikipedia claims is the default).
ps obtains it thus:
/* sysinfo.c init_libproc() */
if(linux_version_code > LINUX_VERSION(2, 4, 0)){
Hertz = find_elf_note(AT_CLKTCK);
//error handling
}
old_Hertz_hack(); //ugh
This sounds like a job well done by a very small C module for Python :)
Here's code based on badp's answer:
import os
from time import time
HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
def proc_age_secs():
system_stats = open('/proc/stat').readlines()
process_stats = open('/proc/self/stat').read().split()
for line in system_stats:
if line.startswith('btime'):
boot_timestamp = int(line.split()[1])
age_from_boot_jiffies = int(process_stats[21])
age_from_boot_timestamp = age_from_boot_jiffies / HZ
age_timestamp = boot_timestamp + age_from_boot_timestamp
return time() - age_timestamp
I'm not sure if it's right though. I wrote a test program that calls sleep(5) and then runs it and the output is wrong and varies over a couple of seconds from run to run. This is in a vmware workstation vm:
if __name__ == '__main__':
from time import sleep
sleep(5)
print proc_age_secs()
The output is:
$ time python test.py
6.19169998169
real 0m5.063s
user 0m0.020s
sys 0m0.036s
def proc_starttime(pid=os.getpid()):
# https://gist.github.com/westhood/1073585
p = re.compile(r"^btime (\d+)$", re.MULTILINE)
with open("/proc/stat") as f:
m = p.search(f.read())
btime = int(m.groups()[0])
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
with open("/proc/%d/stat" % pid) as f:
stime = int(f.read().split()[21]) / clk_tck
return datetime.fromtimestamp(btime + stime)
you can parse /proc/uptime
>>> uptime, idletime = [float(f) for f in open("/proc/uptime").read().split()]
>>> print uptime
29708.1
>>> print idletime
26484.45
for windows machines, you can probably use wmi
import wmi
c = wmi.WMI()
secs_up = int([uptime.SystemUpTime for uptime in c.Win32_PerfFormattedData_PerfOS_System()][0])
hours_up = secs_up / 3600
print hours_up