Sleep after yield inside an object method is not working python - python

I want to monitor my servers after every few seconds. I have a generator inside an object method which will return overall metrics and after that it should wait for a while and check again:
def ping_servers(self):
while True:
all_server_metrics = {}
for server in self.servers:
response = requests.get(server, timeout=self.timeout_secs)
server_metrics = {
'response_time': response.elapsed.total_seconds(),
'response_code': response.status_code,
}
all_server_metrics[server] = server_metrics
yield json.dumps(all_server_metrics)
time.sleep(self.period_secs)
Here it will not sleep at all. But if I put time.sleep() at the beginning of the while loop it will sleep for a given period of time. What could be the reason, why it is not sleeping?
This is the code where I call above method:
monitoring = MonitoringService(period_secs=30, timeout_secs=5)
while True:
ping_results = next(monitoring.ping_servers())
print(ping_results)

Related

Simpy: Callcenter simulation - inactive call timeout

currently I am working on a more or less complex callcenter simulation. I am quite new to Simpy and have a problem with timing out calls if no agent could answer them.
In my simulation I generate calls in 4 different queues. Each of them should have it's own call spawn rate.
Additionally I have 4 active agents. Each of them is working on multiple queues, but nobody on all 4 queues. And each of them has a separate handling time distribution when working in a certain queue.
Additionally, an available agent should not only be able to take the first available call in a queue, but I want to add more detailed logics about which call to pick, later. Thus, I add spawning calls to a store (later a filter store) and pull them out in the consume_calls function.
As the callcenter could operate in an understaffed situation (too few agents), I want to model customer patience as well. So I defined MIN_PATIENCE and MAX_PATIENCE as well and let them quit the call if the agent cannot take it within this time range.
My script looks something like this.
Code updated (2023-02-02): Seems to work now
from typing import Callable, Generator, List, Union
import numpy as np
import pandas as pd
import simpy
RANDOM_SEED = 42
NUM_AGENTS = 4 # Number of agents in the callcenter
NUM_QUEUES = 2
MIN_PATIENCE = 2 * 60
MAX_PATIENCE = 5 * 60
SIM_DURATION = 8 * 60 * 60
RNG = np.random.default_rng(RANDOM_SEED)
i = 0
# Parse config files for agent performance and queue arrivial times
agents_config = pd.read_csv("queue_agent_mapping.csv")
agents_config["lambda"] = agents_config["lambda"] * 60
agents_config_idx = agents_config.set_index(["agent_id", "queue_id"])
queue_config = pd.read_csv("call_freq.csv")
queue_config["lambda"] = queue_config["lambda"] * 60
queue_config_idx = queue_config.set_index("queue_id")
callcenter_logging = pd.DataFrame(
{
"call_id": [],
"queue_id": [],
"received_time": [],
"agent_id": [],
"start_time": [],
"end_time": [],
"status": [],
}
)
callcenter_logging = callcenter_logging.set_index("call_id")
open_transactions = pd.DataFrame({"call_id": [], "queue_id": [], "received_time": []})
open_transactions = open_transactions.set_index("call_id")
# Get number of agents in simulation from config file
num_agents = agents_config.drop_duplicates(subset="agent_id")
# Get agent-queue mapping from config file
agents_config_grouped = (
agents_config.groupby("agent_id").agg({"queue_id": lambda x: list(x)}).reset_index()
)
class Callcenter:
"""Representation of the call center.
Entry point of the simulation as it starts processes to generate calls and their processing.
"""
# Variable used to generate unique ids
call_id = 0
def __init__(self, env: simpy.Environment):
self.env = env
def get_next_call_id(self):
self.call_id += 1
return self.call_id
def run_simulation(self, agents: simpy.FilterStore):
self.agents = agents
self.queues = [Queue(env, queue_id=qq) for qq in range(4)]
self.call_generator = [env.process(queue.generate_calls(self)) for queue in self.queues]
self.call_accept_consumer = [env.process(queue.consume_calls()) for queue in self.queues]
class Agent:
"""Representation of agents and their global attributes."""
def __init__(self, agent_id, queue_id):
self.agent_id = agent_id
self.allowed_queue_ids = queue_id
class Queue:
"""Representation of call center queues.
Holds methods to generate and consume calls. Calls are stored in a store.
"""
def __init__(self, env, queue_id):
self.env = env
# Defines the store for calls. Later, a FilterStore should be used. This will enable us to
# draw calls by special attribute to fine tune the routing.
self.store = simpy.FilterStore(env)
self.queue_id = queue_id
# Get the arrivial distribution of calls in the queue from the initial config.
self.lam = self._get_customer_arrival_distribution()
def generate_calls(self, callcenter_instance: Callcenter) -> Generator:
"""Generate the calls.
Calls are then put to the queues store.
"""
while True:
yield self.env.timeout(RNG.poisson(self.lam))
# Initialize and fill the call object.
new_call_id = callcenter_instance.get_next_call_id()
call = Call(queue_id=self.queue_id, call_id=new_call_id, env=env)
call = call.update_history()
call.add_open_transaction(status="active")
# Write call to logging table
callcenter_logging.loc[call.call_id, ["queue_id", "received_time"]] = [
call.queue_id,
self.env.now,
]
# Put call to the queue store.
self.put_call(call)
def consume_calls(self) -> Generator:
"""Draw call from queue store and let agents work on them.
If no agent is found within MIN_PATIENCE and MAX_PATIENCE, drop the call.
"""
while True:
# Wait for available agent or drop the call as customer ran out of patience.
agent = yield agents.get(lambda ag: self.queue_id in ag.allowed_queue_ids)
# call = yield self.get_call(lambda ca: ca.status == "active")
call = yield self.get_call(lambda ca: ca.status == "active")
if call.received_at + call.max_waiting <= call.env.now:
print(
f"customer hung up call {call.call_id} after waiting {call.max_waiting / 60} minutes."
)
# The call did not receive an agent in time and ran out of patience.
call.status = "dropped"
callcenter_logging.loc[call.call_id, ["end_time", "status"]] = [
call.env.now,
call.status,
]
print(
f"no agent for {call.call_id} after waiting {(call.env.now - call.received_at) / 60} minutes"
)
else:
# Do some logging that an agent took the call.
print(
f"Agent {agent.agent_id} takes {call.call_id} in queue {call.queue_id} at {call.env.now}."
)
callcenter_logging.loc[call.call_id, ["queue_id", "agent_id", "start_time"]] = [
call.queue_id,
agent.agent_id,
call.env.now,
]
# Get average handling time from config dataframe. Change this to an Agent class
# attribute, later.
ag_lambda = agents_config_idx.loc[agent.agent_id, call.queue_id]["lambda"]
yield call.env.timeout(RNG.poisson(ag_lambda)) # Let the agent work on the call.
call.status = "finished"
# Do some logging
callcenter_logging.loc[call.call_id, ["end_time", "status"]] = [
call.env.now,
call.status,
]
print(
f"Agent {agent.agent_id} finishes {call.call_id} in queue {call.queue_id} at {call.env.now}."
)
# Put the agent back to the agents store. -> Why is this needed? Shouldn't the
# context manager handle this? But it did not work without this line.
yield agents.put(agent)
def _get_customer_arrival_distribution(self) -> float:
"""Returns the mean call arrivial time in a given queue_id."""
return queue_config.loc[self.queue_id, "lambda"]
def get_call(self, filter_func: Callable):
"""Helper function to get calls by complex filters from the queue store."""
return self.store.get(filter=filter_func)
def put_call(self, call):
"""Helper function to put calls to the queue store."""
self.store.put(call)
print(f"Call {call.call_id} added to queue {call.queue_id} at {call.env.now}")
class Call(Callcenter):
"""Representation of a call with all its."""
def __init__(self, env: simpy.Environment, call_id: int, queue_id: int):
self.env = env
self.call_id = call_id
self.queue_id = queue_id
self.agent_id = None
self.status = "active"
self.received_at = env.now
self.max_waiting = RNG.integers(MIN_PATIENCE, MAX_PATIENCE)
self.history: List[Union[int, str]] = []
def update_history(self):
"""Helper to update the call history.
Needed for tracking calls, which changes are transferred from one queue to another.
Currently this is not in use.
"""
self.history.append([self.call_id, self.queue_id, self.agent_id, self.status])
return self
def add_open_transaction(self, status="active"):
"""Helper to add a call to the open transactions log.
Needed for rebalancing logics. Currently this is not in use.
"""
open_transactions.loc[self.call_id, ["queue_id", "received_time", "status"]] = [
self.queue_id,
self.env.now,
status,
]
env = simpy.Environment()
agents = simpy.FilterStore(env, capacity=len(num_agents))
agents.items = [Agent(row.agent_id, row.queue_id) for row in agents_config_grouped.itertuples()]
callcenter = Callcenter(env)
callcenter.run_simulation(agents)
env.run(until=SIM_DURATION)
These are the config files.
# call_freq
queue_name,queue_id,lambda
A,0,2
B,1,4
C,2,3.5
D,3,3
# queue_agent_mapping.csv
queue_id,agent_id,lambda
0,abc11,2
0,abc13,5
0,abc14,2
1,abc11,5
1,abc12,3
1,abc14,12
2,abc12,2
2,abc13,3
3,abc14,3
I set the customer patience to be within 3 to 6 minutes. Nonetheless, if I check the waiting time of a call until it is served, I find a lot of them waiting much more than 3-6 minutes. To make things even worse, I also find some calls being dropped as desired.
callcenter_logging["wait_time"] = callcenter_logging["start_time"] - callcenter_logging["received_time"]
callcenter_logging["serving_wait"] = callcenter_logging["end_time"] - callcenter_logging["start_time"]
callcenter_logging.head(20)
dropped = (
callcenter_logging.loc[callcenter_logging["status2"] == "dropped", ["queue_id", "status2", "end_time"]]
)
dropped = (
dropped.set_index("end_time")
.groupby(["queue_id"])
.expanding()["status2"]
.agg({"cnt_dropped": 'count'})
.reset_index()
)
fig_waiting = px.line(callcenter_logging, "received_time", "wait_time", color="queue_id")
fig_waiting = fig_waiting.update_traces(connectgaps=True)
fig_waiting.show()
fig_dropped = px.line(dropped, x="end_time", y="cnt_dropped", color="queue_id")
fig_dropped.show()
waiting time plot
dropped calls plot
I guess my calls are not pulled from the queue store at the right times. But I was not able to understand the exact problem with the code.
Is there anybody who has an idea?
Your time out for patience does not factor in how long a call has already been in the queue. So if a call has been in the queue for 10 minutes when the loop starts and no agents are available then the total wait time for the call when the patience time out happens will be 10 minutes plus env.timeout(RNG.integers(MIN_PATIENCE, MAX_PATIENCE)).
You need the patience time out to be in the call object and have the call object remove itself from the queue or set a patience expired flag when the patience time out expires.
I would change your loop to first get a agent, and do a inner loop to get a call, ignoring calls with the expired flag.

two python files - one file gives json object and another file takes the output as input

I'm new to python and apologies if my way of questioning is wrong.
I have two python files. one file will take as input and another file's output. Problem is, I need to pass input one at a time. So python one file will have a for loop and it generates JSON one by one and it should pass JSON one by one in that for loop only. It means python file two will be running and it takes input from python file one and processes and picks the second incoming JSON output from python file one. This process continues till the python one file ends its loop
**pythonone.py file
**
import json, time
from faker import Faker
#Create a Faker object to generate fake data for the Producer
fake=Faker()
def myrandomdata(i,j):
return fake.random_int(min = 1, max = j)
json_obj_list = []
random_ins_id = str(myrandomdata(20000,10000000))
random_inv_item_id = str(myrandomdata(20000,10000000))
random_inv_org_id = str(myrandomdata(1000,100000))
random_loc_id = str(myrandomdata(20000,100000))
qty = myrandomdata(1,100)
loc_type_id = myrandomdata(0,4)
def main():
for i in range(5):
json_obj_list={'ID': random_ins_id,
'QTY': qty,
'EXT_REF': random_loc_id,
'INV_ITEM_ID': random_inv_item_id,
'ORG_ID': random_inv_org_id,
'SERIAL_NUMBER': loc_type_id
}
json_dump = json.dumps(json_obj_list, indent="\t")
print(json_dump)
time.sleep(3)
**Pythontwo.py
**
def process_my_data:
res= pythonone.main()
/*I do some process */
Guide me on how can i achieve this
I am stuck to wait one file to process and pick second one and then 3rd one and stops till for loop ends from pythonone.py
you should bring in threading concept
create a producer method for producing JSON
create a consumer method for consuming that JSON
condition variable allows one or more threads to wait until they are notified by another thread
import json,time
from threading import *
from faker import Faker
#Create Faker object to generate fake data for Producer
fake=Faker()
def myrandomdata(i,j):
return fake.random_int(min = 1, max = j)
li = [ ]
random_ins_id = str(myrandomdata(20000,10000000))
random_inv_item_id = str(myrandomdata(20000,10000000))
random_inv_org_id = str(myrandomdata(1000,100000))
random_loc_id = str(myrandomdata(20000,100000))
qty = myrandomdata(1,100)
loc_type_id = myrandomdata(0,4)
def produce():
for i in range(3):
condition_object.acquire()
json_obj_list={'ID': random_ins_id,
'QTY': qty,
'EXT_REF': random_loc_id,
'INV_ITEM_ID': random_inv_item_id,
'ORG_ID': random_inv_org_id,
'SERIAL_NUMBER': loc_type_id
}
print("produced json",json_obj_list)
li.append(json_obj_list)
condition_object.notify()
condition_object.wait()
def consume():
for i in range(3):
condition_object.acquire()
json = li.pop()
print("the json for consuming ", json)
condition_object.notify()
condition_object.wait()
condition_object = Condition()
T1 = Thread(target=produce)
T2 = Thread(target=consume)
T1.start()
T2.start()
the produces acquires the object and adds the JSON to a common list and notify the consumer and wait for call back
while the consumer acquires the object gets the JSON and notify the producer to continue from waiting state while the consumer being in waiting state .
Your main needs to return some data, so the caller can use them, also use better names
you're overwritting json_obj_list at each iteration, you need to collect them in a list
you're generating the random number once, you need to do it for each iteration
you're not using i in your myrandomdata, and you can don't need faker for such random int
from random import randrange
def myrandomdata(i, j):
return randrange(i, j + 1)
def generate_items():
json_obj_list = []
for i in range(5):
json_obj_list.append({'ID': myrandomdata(20000, 10000000),
'QTY': myrandomdata(1, 100),
'EXT_REF': myrandomdata(20000, 100000),
'INV_ITEM_ID': myrandomdata(20000, 10000000),
'ORG_ID': myrandomdata(1000, 100000),
'SERIAL_NUMBER': myrandomdata(0, 4)})
return json_obj_list
def process_my_data():
res = pythonone.generate_items()
for item in res:
print(type(item), item)

How to call a function without blocking the execution

I have been working on a small PoC where I am trying to do a I/O Bound application to execute functions without being blocked. Currently I have created something like this:
import time
import concurrent.futures
found_products = []
site_catalog = [
"https://www.graffitishop.net/Sneakers",
"https://www.graffitishop.net/T-shirts",
"https://www.graffitishop.net/Sweatshirts",
"https://www.graffitishop.net/Shirts"
]
def threading_feeds():
# Create own thread for each URL as we want to run concurrent
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(monitor_feed, site_catalog)
def monitor_feed(link: str) -> None:
old_payload = product_data(...)
while True:
new_payload = product_data(...)
if old_payload != new_payload:
for links in new_payload:
if links not in found_products:
logger.info(f'Detected new link -> {found_link} | From -> {link}')
# Execute filtering function without blocking, how?
filtering(link=found_link)
else:
logger.info("Nothing new")
time.sleep(60)
continue
def filtering(found_link):
# More code will be added in the future to handle logical code parts
...
# Test
time.sleep(60)
Problem: Currently the issue is that whenever we enter the row filtering(link=found_link) there will be a call to filtering(...) which sleeps for 60 seconds (This is only a mock data, in the future I will have a logical code part instead), what it does then is that the monitor_feed stops the execution and waits until the filtering() is finished.
My Question: I wonder how can I be able to execute the filtering(...) and still continue to loop through the monitor_feed without being blocked when we call filtering(...)?
This is your code with small modifications - mostly problem was with wrong names of variable (because then are very similar)
To make sure I use names executor1, executor2 and executor2 has to be create before while True because it has to exist all time when threads are used.
If you have def filtering(filtered_link) then you have to use the same name filtered_link in submit(..., filtered_link=...)
import concurrent.futures
import time
found_products = []
site_catalog = [
"https://www.graffitishop.net/Sneakers",
"https://www.graffitishop.net/T-shirts",
"https://www.graffitishop.net/Sweatshirts",
"https://www.graffitishop.net/Shirts"
]
def threading_feeds():
print('[threading_feeds] running')
# Create own thread for each URL as we want to run concurrent
with concurrent.futures.ThreadPoolExecutor() as executor1:
executor1.map(monitor_feed, site_catalog)
def monitor_feed(link: str) -> None:
print('[monitor_feed] start')
old_payload = ['old'] # product_data(...)
# executor has to exist all time
with concurrent.futures.ThreadPoolExecutor() as executor2:
while True:
print('[monitor_feed] run loop')
new_payload = ['new1', 'new2', 'new3'] # product_data(...)
if old_payload != new_payload:
for product_link in new_payload:
if product_link not in found_products:
print(f'Detected new link -> {product_link} | From -> {link}')
executor2.submit(filtering, filtered_link=product_link)
#executor2.submit(filtering, product_link)
print("Continue")
time.sleep(2)
def filtering(filtered_link):
# More code will be added in the future to handle logical code parts
#...
# Test
print(f'[filtering]: start: {filtered_link}')
time.sleep(60)
print(f'[filtering]: end: {filtered_link}')
# --- start --
threading_feeds()

Multiprocessing function not writing to file or printing

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!
I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

Keep one method constantly running and another method executing every certain period

So currently I am these two method where one reads the RF Data from another device constantly and another method sends that data every so often.
How could I do this? I need the RF Data incoming to be constantly updated and received while the sendData() method just grabs the data from the global variable whenever it can.
Heres the code below so far but it's not working...
import httplib, urllib
import time, sys
import serial
from multiprocessing import Process
key = 'MY API KEY'
rfWaterLevelVal = 0
ser = serial.Serial('/dev/ttyUSB0',9600)
def rfWaterLevel():
global rfWaterLevelVal
rfDataArray = ser.readline().strip().split()
print 'incoming: %s' %rfDataArray
if len(rfDataArray) == 5:
rfWaterLevelVal = float(rfDataArray[4])
print 'RFWater Level1: %.3f cm' % (rfWaterLevelVal)
#rfWaterLevel = 0
def sendData():
global rfWaterLevelVal
params = urllib.urlencode({'field1':rfWaterLevelVal, 'key':key})
headers = {"Content-type" : "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = httplib.HTTPConnection("api.thingspeak.com:80", timeout = 5)
conn.request("POST", "/update", params, headers)
#print 'RFWater Level2: %.3f cm' % (rfWaterLevelVal)
response = conn.getresponse()
print response.status, response.reason
data = response.read()
conn.close()
while True:
try:
rfWaterLevel()
p = Process(target=sendData(), args())
p.start()
p.join()
#Also tried threading...did not work..
#t1 = threading.Thread(target=rfWaterLevel())
#t2 = threading.Thread(target=sendData())
#t1.start()
#t1.join()
#t2.join()
except KeyboardInterrupt:
print "caught keyboard interrupt"
sys.exit()
Please help!
Just to clarify, I need rfWaterLevel() method to run constantly as the rf data is incoming constantly, and I need sendData() to just be called as soon as it's ready to send again (roughly every 5 seconds or so). But it seems as if, if there is any sort of delay to the incoming rf data then rf data stops updating itself (the received end) and thus the data being sent is not accurate to what is being sent from the rf transmitter.
Thanks in advance!
I can't give you a full solution but I can guide you into the right direction.
Your code has three problems.
Process starts (as the name suggests) a new process and not a new thread.
A new process cannot share data with the old process.
You should use mutlithreading instead.
Have a look at threading as explained here
You are calling rfWaterLevel() inside the main thread.
You need to start the second thread before entering the while Loop.
Your are creating the second thread again and again inside the while Loop.
Create it only once and put the while Loop inside the function
Your basic program structure should be like this:
import time
def thread_function_1():
while True:
rfWaterLevel()
def thread_function_2():
while True:
sendData()
time.sleep(5)
# start thread 1
thread1 = Thread(target = thread_function_1)
thread1.start()
# start thread 2
thread2 = Thread(target = thread_function_2)
thread2.start()
# wait for both threads to finish
thread1.join()
thread2.join()

Categories