Multiprocessing function not writing to file or printing - python

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!

I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

Related

Read multiple DS18B20 temperature sensors faster using Raspberry Pi

My custom sensor dashboard requests new readings every second.
This worked well, until I hooked up 3 DS18B20 temperature sensors (1-wire protocol, so all on 1 pin), which each take 750ms to provide new data.
This is the class I currently use to read the temperature of each sensor:
# ds18b20.py
# written by Roger Woollett
import os
import glob
import time
class DS18B20:
# much of this code is lifted from Adafruit web site
# This class can be used to access one or more DS18B20 temperature sensors
# It uses OS supplied drivers and one wire support must be enabled
# To do this add the line
# dtoverlay=w1-gpio
# to the end of /boot/config.txt
#
# The DS18B20 has three pins, looking at the flat side with the pins pointing
# down pin 1 is on the left
# connect pin 1 to GPIO ground
# connect pin 2 to GPIO 4 *and* GPIO 3.3V via a 4k8 (4800 ohm) pullup resistor
# connect pin 3 to GPIO 3.3V
# You can connect more than one sensor to the same set of pins
# Only one pullup resistor is required
def __init__(self):
# Load required kernel modules
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
# Find file names for the sensor(s)
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')
self._num_devices = len(device_folder)
self._device_file = list()
i = 0
while i < self._num_devices:
self._device_file.append(device_folder[i] + '/w1_slave')
i += 1
def _read_temp(self, index):
# Issue one read to one sensor
# You should not call this directly
# First check if this index exists
if index >= len(self._device_file):
return False
f = open(self._device_file[index], 'r')
data = f.read()
f.close()
return data
def tempC(self, index=0):
# Call this to get the temperature in degrees C
# detected by a sensor
data = self._read_temp(index)
retries = 0
# Check for error
if data == False:
return None
while (not "YES" in data) and (retries > 0):
# Read failed so try again
time.sleep(0.1)
#print('Read Failed', retries)
data = self._read_temp(index)
retries -= 1
if (retries == 0) and (not "YES" in data):
return None
(discard, sep, reading) = data.partition(' t=')
if reading == 85000:
# 85ºC is the boot temperature of the sensor, so ignore that value
return None
temperature = float(reading) / 1000.0
return temperature
def device_count(self):
# Call this to see how many sensors have been detected
return self._num_devices
I already tried to return the previous temperature reading if the current one isn't finished yet, however this didn't reduce the time it took to read a sensor, so I guess the only way is to do things asynchronously.
I could reduce the precision to reduce the time it takes per reading, but ideally I would read all of the sensors simultaneously on separate threads.
How can I best implement this? Or are there other ways to improve the reading speed of multiple DS18B20 sensors?
Thanks for any insights!
You're facing some limitations introduced by the Linux kernel driver. If you were interacting with the OneWire protocol directly, you would only have a single 750ms read cycle for all three sensors, rather than (3 * 750ms). When speaking the 1-wire protocol directly, you can issue a single "convert temperature" command to all devices on the bus, as described here, and then read all the sensors.
The Linux driver explicitly doesn't support this mode of operation:
If none of the devices are parasite powered it would be possible to convert all the devices at the same time and then go back to read individual sensors. That isn’t currently supported. The driver also doesn’t support reduced precision (which would also reduce the conversion time) when reading values.
That means you're stuck with a 750ms per device read cycle. Your best option is probably placing the sensor reading code in a separate thread, e.g.:
import glob
import threading
import time
# Note that we're inheriting from threading.Thread here;
# see https://docs.python.org/3/library/threading.html
# for more information.
class DS18B20(threading.Thread):
default_base_dir = "/sys/bus/w1/devices/"
def __init__(self, base_dir=None):
super().__init__()
self._base_dir = base_dir if base_dir else self.default_base_dir
self.daemon = True
self.discover()
def discover(self):
device_folder = glob.glob(self._base_dir + "28*")
self._num_devices = len(device_folder)
self._device_file: list[str] = []
for i in range(self._num_devices):
self._device_file.append(device_folder[i] + "/w1_slave")
self._values: list[float | None] = [None] * self._num_devices
self._times: list[float] = [0.0] * self._num_devices
def run(self):
"""Thread entrypoint: read sensors in a loop.
Calling DS18B20.start() will cause this method to run in
a separate thread.
"""
while True:
for dev in range(self._num_devices):
self._read_temp(dev)
# Adjust this value as you see fit, noting that you will never
# read actual sensor values more often than 750ms * self._num_devices.
time.sleep(1)
def _read_temp(self, index):
for i in range(3):
with open(self._device_file[index], "r") as f:
data = f.read()
if "YES" not in data:
time.sleep(0.1)
continue
disacard, sep, reading = data.partition(" t=")
temp = float(reading) / 1000.0
self._values[index] = temp
self._times[index] = time.time()
break
else:
print(f"failed to read device {index}")
def tempC(self, index=0):
return self._values[index]
def device_count(self):
"""Return the number of discovered devices"""
return self._num_devices
Because this is a thread, you need to .start() it first, so your
code would look something like:
d = DS18B20()
d.start()
while True:
for i in range(d.device_count()):
print(f'dev {i}: {d.tempC(i)}')
time.sleep(0.5)
You can call the tempC method as often as you want, because it's
just return a value from the _values array. The actual update
frequency is controlled by the loop in the run method (and the
minimum cycle time imposed by the sensors).

Multithread or multiprocess

So, currently, I am using multiprocessing to run these 3 functions together.
As only tokens changes, is it recommended to switch to multi-threading? (if yes, will it really help in a performance like speed-up and I think memory will be for sure used less)
This is my code:
from database_function import *
from kiteconnect import KiteTicker
import pandas as pd
from datetime import datetime, timedelta
import schedule
import time
from multiprocessing import Process
def tick_A():
#credentials code here
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC LIMIT 50")] #FETCHING FIRST 50 SCRIPTS TOKEN
#print(tokens)
##### TO MAKE SURE THE TASK STARTS AFTER 8:59 ONLY ###########
t = datetime.today()
future = datetime(t.year,t.month,t.day,8,59)
if ((future-t).total_seconds()) < 0:
future = datetime(t.year,t.month,t.day,t.hour,t.minute,(t.second+2))
time.sleep((future-t).total_seconds())
##### TO MAKE SURE THE TASK STARTS AFTER 8:59 ONLY ###########
def on_ticks(ws, ticks):
global ltp
ltp = ticks[0]["last_price"]
for tick in ticks:
print(f"{tick['instrument_token']}A")
db_runquery(f'UPDATE SCRIPT SET ltp = {tick["last_price"]} WHERE zerodha = {tick["instrument_token"]}') #UPDATING LTP IN DATABASE
#print(f"{tick['last_price']}")
def on_connect(ws, response):
#print(f"response from connect :: {response}")
# Subscribe to a list of instrument_tokens (TOKENS FETCHED ABOVE WILL BE SUBSCRIBED HERE).
# logging.debug("on connect: {}".format(response))
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens) # SETTING TOKEN TO TICK MODE (LTP / FULL / QUOTE)
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.connect(threaded=True)
#####TO STOP THE TASK AFTER 15:32 #######
end_time = datetime(t.year,t.month,t.day,15,32)
while True:
schedule.run_pending()
#time.sleep(1)
if datetime.now() > end_time:
break
#####TO STOP THE TASK AFTER 15:32 #######
def tick_B():
everything remains the same only tokens value changes
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC OFFSET (50) ROWS FETCH NEXT (50) ROWS ONLY")]
def tick_C():
everything remains the same only tokens value changes
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC OFFSET (100) ROWS FETCH NEXT (50) ROWS ONLY")]
if __name__ == '__main__':
def runInParallel(*fns):
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
runInParallel(tick_A , tick_B , tick_C)
So, currently, I am using multiprocessing to run these 3 functions together.
As only tokens changes, is it recommended to switch to multi-threading? (if yes, will it really help in a performance like speed-up and I think memory will be for sure used less)
most Python implementations do not have true multi-threading, because they use global lock (GIL). So only one thread runs at a time.
For I/O heavy applications it should not make difference. But if you need CPU heavy operations done in parallel (and I see that you use Panda - so the answer must be yes) - you will be better off staying with multi-process app.

Python multiprocessing finish the work correctly, but the processes still alive (Linux)

I use python multiprocessing to compute some sort of scores on DNA sequences from a large file.
For that I write and use the script below.
I use a Linux machine with 48 cpu in python 3.8 environment.
Th code work fine, and terminate the work correctly and print the processing time at the end.
Problem: when I use the htop command, I find that all 48 processes are still alive.
I don't know why, and I don't know what to add to my script to avoid this.
import csv
import sys
import concurrent.futures
from itertools import combinations
import psutil
import time
nb_cpu = psutil.cpu_count(logical=False)
def fun_job(seq_1, seq_2): # seq_i : (id, string)
start = time.time()
score_dist = compute_score_dist(seq_1[1], seq_2[1])
end = time.time()
return seq_1[0], seq_2[0], score_dist, end - start # id seq1, id seq2, score, time
def help_fun_job(nested_pair):
return fun_job(nested_pair[0], nested_pair[1])
def compute_using_multi_processing(list_comb_ids, dict_ids_seqs):
start = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor(max_workers=nb_cpu) as executor:
results = executor.map(help_fun_job,
[((pair_ids[0], dict_ids_seqs[pair_ids[0]]), (pair_ids[1], dict_ids_seqs[pair_ids[1]]))
for pair_ids in list_comb_ids])
save_results_to_csv(results)
finish = time.perf_counter()
proccessing_time = str(datetime.timedelta(seconds=round(finish - start, 2)))
print(f' Processing time Finished in {proccessing_time} hh:mm:ss')
def main():
print("nb_cpu in this machine : ", nb_cpu)
file_path = sys.argv[1]
dict_ids_seqs = get_dict_ids_seqs(file_path)
list_ids = list(dict_ids_seqs) # This will convert the dict_keys to a list
list_combined_ids = list(combinations(list_ids, 2))
compute_using_multi_processing(list_combined_ids, dict_ids_seqs)
if __name__ == '__main__':
main()
Thank you for your help.
Edit : add the complete code for fun_job (after #Booboo answer)
from Bio import Align
def fun_job(seq_1, seq_2): # seq_i : (id, string)
start = time.time()
aligner = Align.PairwiseAligner()
aligner.mode = 'global'
score_dist = aligner.score(seq_1[1],seq_2[1])
end = time.time()
return seq_1[0], seq_2[0], score_dist, end - start # id seq1, id seq2, score, time
When the with ... as executor: block exits, there is an implicit call to executor.shutdown(wait=True). This will wait for all pending futures to to be done executing "and the resources associated with the executor have been freed", which presumably includes terminating the processes in the pool (if possible?). Why your program terminates (or does it?) or at least you say all the futures have completed executing, while the processes have not terminated is a bit of a mystery. But you haven't provided the code for fun_job, so who can say why this is so?
One thing you might try is to switch to using the multiprocessing.pool.Pool class from the multiprocessing module. It supports a terminate method, which is implicitly called when its context manager with block exits, that explicitly attempts to terminate all processes in the pool:
#import concurrent.futures
import multiprocessing
... # etc.
def compute_using_multi_processing(list_comb_ids, dict_ids_seqs):
start = time.perf_counter()
with multiprocessing.Pool(processes=nb_cpu) as executor:
results = executor.map(help_fun_job,
[((pair_ids[0], dict_ids_seqs[pair_ids[0]]), (pair_ids[1], dict_ids_seqs[pair_ids[1]]))
for pair_ids in list_comb_ids])
save_results_to_csv(results)
finish = time.perf_counter()
proccessing_time = str(datetime.timedelta(seconds=round(finish - start, 2)))
print(f' Processing time Finished in {proccessing_time} hh:mm:ss')

How do I get the same queue into differnet multiprocessing files?

I see a lot of tutorials on how to use queues, but they always show them implemented in the same file. I'm trying to organize my code files well from the beginning because I anticipate the project to become very large. How do I get the queue that I initialize in my main file to import into the other function files?
Here is my main file:
import multiprocessing
import queue
from data_handler import data_handler
from get_info import get_memory_info
from get_info import get_cpu_info
if __name__ == '__main__':
q = queue.Queue()
getDataHandlerProcess = multiprocessing.Process(target=data_handler(q))
getMemoryInfoProcess = multiprocessing.Process(target=get_memory_info(q))
getCPUInfoProcess = multiprocessing.Process(target=get_cpu_info(q))
getDataHandlerProcess.start()
getMemoryInfoProcess.start()
getCPUInfoProcess.start()
print("DEBUG: All tasks successfully started.")
Here is my producer:
import psutil
import struct
import time
from data_frame import build_frame
def get_cpu_info(q):
while True:
cpu_string_data = bytes('', 'utf-8')
cpu_times = psutil.cpu_percent(interval=0.0, percpu=True)
for item in cpu_times:
cpu_string_data = cpu_string_data + struct.pack('<d',item)
cpu_frame = build_frame(cpu_string_data, 0, 0, -1, -1)
q.put(cpu_frame)
print(cpu_frame)
time.sleep(1.000)
def get_memory_info(q):
while True:
memory_string_data = bytes('', 'utf-8')
virtual_memory = psutil.virtual_memory()
swap_memory = psutil.swap_memory()
memory_info = list(virtual_memory+swap_memory)
for item in memory_info:
memory_string_data = memory_string_data + struct.pack('<d',item)
memory_frame = build_frame(memory_string_data, 0, 1, -1, -1)
q.put(memory_frame)
print(memory_frame)
time.sleep(1.000)
def get_disk_info(q):
while True:
disk_usage = psutil.disk_usage("/")
disk_io_counters = psutil.disk_io_counters()
time.sleep(1.000)
print(disk_usage)
print(disk_io_counters)
def get_network_info(q):
while True:
net_io_counters = psutil.net_io_counters()
time.sleep(1.000)
print(net_io_counters)
And here is my consumer:
def data_handler(q):
while True:
next_element = q.get()
print(next_element)
print('Item received at data handler queue.')
It is not entirely clear to me what do you mean by " How do I get the queue that I initialize in my main file to import into the other function files?".
Normally you pass a queue as and argument to a function and use it within a function scope regardless of the file structure. Or perform any other variable sharing techniques used for any other data type.
Your code seems to have a few errors however. Firstly, you shouldn't be using queue.Queue with multiprocessing. It has it's own version of that class.
q = multiprocessing.Queue()
It is slower than the queue.Queue, but it works for sharing the data across processes.
Secondly, the proper way to create process objects is:
getDataHandlerProcess = multiprocessing.Process(target=data_handler, args = (q,))
Otherwise you are actually calling data_handler(q) the main thread and trying to assign its return value to the target argument of multiprocessing.Process. Your data_handler function never returns, so the program probably gets into an infinite a deadlock at this point before multiprocessing even begins. Edit: actually it probably goes into infinite wait trying to get an element from an empty queue which will never be filled.

Stopping a function based on a value

I am running a python script on a raspberry-pi.
Essentially, I would like a camera to take a picture every 5 seconds, but only if I have set a boolean to true, which gets toggled on a physical button.
initially I set it to true, and then in my while(true) loop, I want to check to see if the variable is set to true, and if so, start taking pictures every 5 seconds. The issue is if I use something like time time.sleep(5), it essentially freezes everything, including the check. Combine that with the fact that I am using debouncing for the button, it then becomes impossible for me to actually toggle the script since I would have to press it exactly after the 5s wait time, right for the value check... I've been searching around and I think the likely solution would have to include threading, but I can't wrap my head around it. One kind of workaround I thought of would be to look at the system time and if the seconds is a multiple of 5, then take picture (all within the main loop). This seems a bit sketchy.
Script below:
### Imports
from goprocam import GoProCamera, constants
import board
import digitalio
from adafruit_debouncer import Debouncer
import os
import shutil
import time
### GoPro settings
goproCamera = GoProCamera.GoPro()
### Button settings
pin = digitalio.DigitalInOut(board.D12)
pin.direction = digitalio.Direction.INPUT
pin.pull = digitalio.Pull.UP
switch = Debouncer(pin, interval=0.1)
save = False #this is the variable
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
time.sleep(5)
Here's something to try:
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save:
current_time = time.time()
if current_time - last_pic_time >= 5:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
last_pic_time = current_time
Depending on exactly what sort of behavior you want, you may have to fiddle with when and how often time.time() is called.
Cheers!
Maybe something like this?
import threading
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
We call the function above inside the main loop.
Wrap your while loop content on a function:
def take_photo:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
Now we create a flag initially set to False to avoid creating multiple threads.
Notice that I did this before the while loop. We just need a starting value here.
active = False
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save: # we need to start taking photos.
if not active: # it is not active... so it is the first time it is being called or it has been toggled to save as True again.
photo_thread = set_interval(take_photo, 5) # grabbing a handle to the thread - photo_thread - so we can cancel it later when save is set to False.
active = True # marking as active to be skipped from the loop until save is False
else:
try: # photo_thread may not exist yet so I wrapped it inside a try statement here.
photo_thread.cancel() # if we have a thread we kill it
active = False #setting to False so the next time the button is pressed we can create a new one.
Let me know if it works. =)
What I ended up doing:
### Imports
from goprocam import GoProCamera, constants
import board
import digitalio
from adafruit_debouncer import Debouncer
import os
import time
import threading
### GoPro settings
gopro = GoProCamera.GoPro()
### Button settings
pin = digitalio.DigitalInOut(board.D12)
pin.direction = digitalio.Direction.INPUT
pin.pull = digitalio.Pull.UP
switch = Debouncer(pin, interval=0.1)
### Picture save location
dir_path = os.path.dirname(os.path.realpath(__file__))
new_path = dir_path+"/pictures/"
save = False
### Functions
def takePhoto(e):
while e.isSet():
gopro.take_photo()
gopro.downloadLastMedia()
fname = '100GOPRO-' + gopro.getMedia().split("/")[-1]
current_file = dir_path+'/'+fname
if os.path.isfile(current_file):
os.replace(current_file, new_path+fname) #move file, would be cleaner to download the file directly to the right folder, but the API doesn't work the way I thought it did
e.wait(5)
### Initial settings
e = threading.Event()
t1 = threading.Thread(target=takePhoto, args=([e]))
print("Starting script")
while(True):
switch.update()
if switch.fell:
#toggle value
save = not save
if save:
e.set() #should be taking pictures
else:
e.clear() #not taking pictures
if not t1.is_alive(): #start the thread if it hasn't been yet
if e.is_set():
t1.start()

Categories