Executing different functions in a single for loop in python - python

For Instance I have three functions and a single for loop in python and I want to execute all these functions sequentially for example on first iteration function 1 should be executed and on 2nd iteration function 2 and so on
The three functions are:
from scapy.all import *
from random import randint
import threading
import time
from datetime import datetime
import multiprocessing
from itertools import count
#pktList = []
#pktsInt = 0
#def Packets():
# Generate packet
#for run_no in range(0,1)
p = raw_input('Enter PACKETs to send: ')
pktsInt = int(p)
pkts = IP(src="10.0.0.1",dst="10.0.0.2")/TCP()/"GET /HTTP/1.0\r\n\r\n"/Raw(RandString(size=120))
#print pkts
pkts[TCP].flags = "UFP"
pktList = []
for pktNum in range(0,pktsInt):
pktList.extend(pkts)
pktList[pktNum][TCP].dport = 80
#randint(1,65535) # Pkt has Ran PortNo.
print pktList[pktNum].summary()
#print len(pktList[pktNum])
#wrpcap('tcp-packets.pcap',pktList[pktNum])
#queue.put((run_no, pktsInt, pktsList))
# Send the list of packets send(pktList)
def send_http(queue):
for run_number in range(0,1): # this will run indefinitely, same as while True, must be killed to stop.
start=datetime.now()
print "\nStart Time: ", start
start_time=time.time()
send(pktList)
end = datetime.now()
print "\nEnd Time: ", end
totalTime = time.time()-start_time
totalBytes=(pktsInt*120)/totalTime
#print totalBytes,"Seconds"
queue.put((run_number, totalTime, totalBytes))
# Generate packet
pkts1 = IP(dst="10.0.0.2")/fuzz(UDP()/NTP(version=4))/Raw(RandString(size=120))
#print pkts
pkts1[UDP].flags = "UFP"
pktList1 = []
for pktNum1 in range(0,10):
pktList1.extend(pkts1)
pktList1[pktNum1][UDP].dport = randint(1,65535) # Pkt has Ran PortNo.
print pktList1[pktNum1].summary()
#print len(pktList1[pktNum1])
#wrpcap('udp-packets.pcap',pktList1[pktNum1])
# Send the list of packets send(pktList)
def send_ntp(queue):
for run_number in range(1,2): # this will run indefinitely, same as while True, must be killed to stop.
start1 = datetime.now()
print "\nStart Time: ", start1
start_time1=time.time()
send(pktList1)
end1 = datetime.now()
print "\nEnd Time: ", end1
totalTime = time.time()-start_time1
totalBytes=(10*120)/totalTime
#print totalBytes,"Seconds"
queue.put((run_number, totalTime, totalBytes))
# Generate packet
pkts2 = IP(src="10.0.0.1",dst="10.0.0.2")/TCP()/Raw(RandString(size=120))
#print pkts
pkts2[TCP].flags = "UFP"
pktList2 = []
for pktNum2 in range(0,5):
pktList2.extend(pkts2)
pktList2[pktNum2][TCP].dport = 25 # Pkt has Ran PortNo.
print pktList2[pktNum2].summary()
#print len(pktList2[pktNum2])
#wrpcap('tcp-packets.pcap',pktList[pktNum])
def send_smtp(queue):
# Send the list of packets send(pktList)
for run_number in range(2,3): # this will run indefinitely, same as while True, must be killed to stop.
start2 = datetime.now()
print "\n Start Time: ", start2
start_time2=time.time()
send(pktList2)
totalTime = time.time()-start_time2
end2 = datetime.now()
print "\nEnd Time: ", end2
totalBytes=(5*120)/totalTime
#print totalBytes,"Seconds"
queue.put((run_number, totalTime, totalBytes))
#print pktList[0].summary()
#start_time=time.time()
#send(pktList2)
#print pktList2[0].show()
#print pktList2[0].show2()
q = multiprocessing.Queue()
#t1 = multiprocessing.Process(target=Packets)
t = multiprocessing.Process(target=send_http, args=(q, ))
p = multiprocessing.Process(target=send_ntp, args=(q, ))
r = multiprocessing.Process(target=send_smtp, args=(q, ))
#t1.start()
t.start()
time.sleep(12) # "Some interval of time"
p.start()
time.sleep(16)
r.start()
time.sleep(29)
if t.is_alive():
t.terminate()
if p.is_alive():
p.terminate()
if r.is_alive():
r.terminate()
rates = []
while True: # This loop will pull all items out of the queue and display them.
run = q.get()
if not run: # When we reach the end of the queue, exit
break
run_number, total_time, total_bytes = run
print "Run {run_number} took a total of {total_time}\
at an average rate of {total_bytes:.1f} B/s".format(run_number=run_number,
total_time=total_time,
total_bytes=total_bytes)
rates.append(total_bytes)
print "Average rate of {0:.1f} B/s".format(sum(rates)/float(len(rates)))
and a for-loop
# Make a function iterable, by repeatedly calling it.
def make_iterable(func, *args):
try:
while 1:
yield func(*args)
except:
pass
uni_rand = make_iterable(random.uniform, 0, 1)
# A generator for inter-arrival times.
inter_arrival = ( -(1./a)*math.log(u) for u in uni_rand)
# Generate inter-arrival times, then sleep for that long.
inter_arrival_iter = iter(inter_arrival)
for i in xrange(count):
inter_arrival_seconds = inter_arrival_iter.next() * 3600.
print "Sleeping for %f seconds." % inter_arrival_seconds
time.sleep(inter_arrival_seconds)
#func1()
#Sequential Function Calling Here except for the executed one
Now The Issue is I am using multiprocessing with all the above mentioned functions, How would I call them now to generate different arrival time

Just put the functions in an iterable and call them one at a time in a for loop:
for afunc in (func1, func2, func3, func4):
afunc()

You could put the functions in a list:
functions = [func1, func2, func3, func4]
for i in range(20):
function = functions[i%4]
EDIT
What will it do?
>>> def func1():
print('I am the first function!')
>>> def func2():
print('I am the second function!')
>>> def func3():
print('I am the third function!')
>>> def func4():
print('I am the fourth function!')
>>> functions = [func1, func2, func3, func4]
>>> for i in range(10):
function = functions[i%4]
function()
I am the first function!
I am the second function!
I am the third function!
I am the fourth function!
I am the first function!
I am the second function!
I am the third function!
I am the fourth function!
I am the first function!
I am the second function!
EDIT 2
Do you simply want the same thing except with 3 functions?
functions = [func1, func2, func3]
for i in xrange(count):
functions[i%3]()
#do the rest of your stuff here

Related

Stuck in a While True loop... any ideas on how to get out?

Edit #1: Here is the working code, however I sometimes get a UnicodeDecodeError which halts the loop from continuing. Is there anyway to cause a break or pass in the loop when this occurs? I have tried changing the code to Try instead of If statements and its not working...
My issue is in the while True: statement...
def SerialRead(dataReadEvent):
delay1 = DT.datetime.now()
dataReadEvent.set()
#Serial Reading
ser = serial.Serial(port='COM4', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=None)
global last_received
buffer = ''
amountAppended = 0
while True:
buffer += ser.read(ser.inWaiting()).decode('ascii')
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
amountAppended += 1
if amountAppended == 2:
amountAppended =0
break
else:
ser.close()
global plaintext1
plaintext1 = last_received.replace(' ', ', ')
plaintext = plaintext1.replace('=', ', ')
global listvalue
listvalue = plaintext.split(", ")
#Writing to csv
outputfile = open(location, mode='a', newline='')
outputWriter = csv.writer(outputfile)
outputWriter.writerow([plaintext])
outputfile.close()
delay2 = DT.datetime.now()
differencetime = (delay2 - delay1).total_seconds()
restart = (writedelay - differencetime)
threading.Timer(restart, SerialRead, args=(dataReadEvent,)).start()
I have trying to get it so that my serial connection reads the last line of input every 5 seconds. However, I have seeded a While True loop inside a threading command and I cannot get out of the While True loop... its always once it is engaged.
The While True Loop allows me to get one complete line of serial data from my unit. I need a full proper line and this was the way to do it, but it snags every time. How can I get out of the Loop?
from PyQt4 import QtGui, QtCore
import sys
import masimo
import csv
import time
import datetime as DT
import threading
from threading import Thread
import serial
import os
os.chdir(r"C:\Users\SpO2\Desktop\Data")
time1 = time.strftime("%d %b %Y %H%M%S")
location = r'%s.csv' % time1
outputfile = open(location, mode='x', newline='')
outputWriter = csv.writer(outputfile)
outputWriter.writerow(["start"])
outputfile.close()
writedelay = int(5)
last_received = ''
class ExampleApp(QtGui.QMainWindow, masimo.Ui_MainWindow):
def __init__(self, event, parent=None):
super(self.__class__, self).__init__()
self.setupUi(self)
self.dataWasReadEvent = event
self.checkThreadTimer = QtCore.QTimer(self)
self.checkThreadTimer.setInterval(500) #.5 seconds
self.checkThreadTimer.timeout.connect(self.readListValues)
self.checkThreadTimer.start()
def readListValues(self):
if self.dataWasReadEvent.is_set():
#Read your events from the list and update your fields
self.SPO2text.setText(str(listvalue[5]))
self.HRtext.setText(str(listvalue[7]))
self.PItext.setText(str(listvalue[9]))
self.timestamptext.setText(str(listvalue[1]))
self.rawdata.setText(str(plaintext1))
self.dataWasReadEvent.clear() #Clear the event set flag so that nothing happens the next time the timer times out
def SerialRead(dataReadEvent):
delay1 = DT.datetime.now()
dataReadEvent.set()
#Serial Reading
ser = serial.Serial(port='COM4', baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=2)
global last_received
buffer = ''
while True:
buffer += ser.read(ser.inWaiting()).decode('ascii')
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
else:
ser.close()
global plaintext1
plaintext1 = last_received.replace(' ', ', ')
plaintext = plaintext1.replace('=', ', ')
global listvalue
listvalue = plaintext.split(", ")
#Writing to csv
outputfile = open(location, mode='a', newline='')
outputWriter = csv.writer(outputfile)
outputWriter.writerow([plaintext])
outputfile.close()
delay2 = DT.datetime.now()
differencetime = (delay2 - delay1).total_seconds()
restart = (writedelay - differencetime)
threading.Timer(restart, SerialRead, args=(dataReadEvent,)).start()
def main(dataReadEvent):
app = QtGui.QApplication(sys.argv)
form = ExampleApp(dataReadEvent)
form.show()
sys.exit(app.exec_())
if __name__ == '__main__':
dataReadEvent = threading.Event()
Thread(target = SerialRead, args=(dataReadEvent,) ).start()
Thread(target = main, args=(dataReadEvent,) ).start()
The
while True:
function will never end and will never exit, you can use "break" to exit a loop. if this isn't what you want you will have to tell it when the while statement should be active ie:
While amountOfTimesToLoop < 0: do whatever
If you want to check for when things have been appended to your list you can do something like
while True:
buffer += ser.read(ser.inWaiting()).decode('ascii')
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
if last_received.length == 2:
break
else:
ser.close()
or if you aren't clearing the list you could something like
amountAppended = 0
while True:
buffer += ser.read(ser.inWaiting()).decode('ascii')
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
amountAppended += 1
if amountAppended == 2:
amountAppended = 0
break
else:
ser.close()
Use the break keyword to exit any loop. It will exit whatever loop the break is in.
Your loop begins with
while True:
This will always continue forever look at these two examples and note the differences
a=0
while a<10:
print a
a = a+1
This will print 0 1 2 3 4 ... 9 and then a is incremented to 10. the expression evaluates to false and the loop exits.
Now:
a=1
while a>0:
print a
a = a+1
This will run forever since a always greater than 0
In response to your comment:
Use the having two lines in last_recieved as the test then.
while last_received_count <2:
....
....
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
last_received_count = last_received_count+1
As you want to "run the loop until i have two lines of data appended to 'last_received'", you can use a counter which increments inside the if statement and then break when the counter value is equals to 2. Something like this:
counter = 1
while True:
buffer += ser.read(ser.inWaiting()).decode('ascii')
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
if counter == 2:
break
counter += 1

Python saving execution time when multithreading

I am having a problem when multithreading and using queues in python 2.7. I want the code with threads to take about half as long as the one without, but I think I'm doing something wrong. I am using a simple looping technique for the fibonacci sequence to best show the problem.
Here is the code without threads and queues. It printed 19.9190001488 seconds as its execution time.
import time
start_time = time.time()
def fibonacci(priority, num):
if num == 1 or num == 2:
return 1
a = 1
b = 1
for i in range(num-2):
c = a + b
b = a
a = c
return c
print fibonacci(0, 200000)
print fibonacci(1, 100)
print fibonacci(2, 200000)
print fibonacci(3, 2)
print("%s seconds" % (time.time() - start_time))
Here is the code with threads and queues. It printed 21.7269999981 seconds as its execution time.
import time
start_time = time.time()
from Queue import *
from threading import *
numbers = [200000,100,200000,2]
q = PriorityQueue()
threads = []
def fibonacci(priority, num):
if num == 1 or num == 2:
q.put((priority, 1))
return
a = 1
b = 1
for i in range(num-2):
c = a + b
b = a
a = c
q.put((priority, c))
return
for i in range(4):
priority = i
num = numbers[i]
t = Thread(target = fibonacci, args = (priority, num))
threads.append(t)
#print threads
for t in threads:
t.start()
for t in threads:
t.join()
while not q.empty():
ans = q.get()
q.task_done()
print ans[1]
print("%s seconds" % (time.time() - start_time))
What I thought would happen is the multithreaded code takes half as long as the code without threads. Essentially I thought that all the threads work at the same time, so the 2 threads calculating the fibonacci number at 200,000 would finish at the same time, so execution is about twice as fast as the code without threads. Apparently that's not what happened. Am I doing something wrong? I just want to execute all threads at the same time, print in the order that they started and the thread that takes the longest time is pretty much the execution time.
EDIT:
I updated my code to use processes, but now the results aren't being printed. Only an execution time of 0.163000106812 seconds is showing. Here is the new code:
import time
start_time = time.time()
from Queue import *
from multiprocessing import *
numbers = [200000,100,200000,2]
q = PriorityQueue()
processes = []
def fibonacci(priority, num):
if num == 1 or num == 2:
q.put((priority, 1))
return
a = 1
b = 1
for i in range(num-2):
c = a + b
b = a
a = c
q.put((priority, c))
return
for i in range(4):
priority = i
num = numbers[i]
p = Process(target = fibonacci, args = (priority, num))
processes.append(p)
#print processes
for p in processes:
p.start()
for p in processes:
p.join()
while not q.empty():
ans = q.get()
q.task_done()
print ans[1]
print("%s seconds" % (time.time() - start_time))
You've run in one of the basic limiting factors of the CPython implementation, the Global Interpreter Lock or GIL. Effectively this serializes your program, your threads will take turns executing. One thread will own the GIL, while the other threads will wait for the GIL to come free.
One solution would to be use separate processes. Each process would have its own GIL so would execute in parallel. Probably the easiest way to do this is to use Python's multiprocessing module as replacement for the threading module.

Python Apply_async not waiting for other Processes to Finish

I have the following sample code that I am trying to use the multiprocessing module on. The following statement had been working previously under other applications, but one process (which receives a very small amount of data just due to the breakup) finishes first and causes the program to finish. Could someone help me understand why this is not waiting for the others?
def mpProcessor(basePath, jsonData, num_procs = mp.cpu_count()):
manager = mp.Manager()
map = manager.dict()
procs = mp.Pool(processes = num_procs, maxtasksperchild = 1)
chunkSize = len(jsonData) / (num_procs)
dataChunk = [(i, i + chunkSize) for i in range(0, len(jsonData), chunkSize)]
count = 1
for i in dataChunk:
print 'test'
s, e = i
procs.apply_async(processJSON, args = (count, basePath, jsonData[s:e]))
count += 1
procs.close()
procs.join()
return map
def processJSON(proc, basePath, records):
print 'Spawning new process: %d' %os.getpid()
outDict = dict()
print len(records)
for i in range(len(records)):
valid = False
idx = 0
while valid == False:
jsonObject = json.loads(records[i][1])['results'][idx]
if jsonObject['kind'] == 'song':
valid = True
break
else:
idx += 1
tunesTrack = Track()
tunesTrack.setTrackId(jsonObject['trackId'])
print 'Finished processing %d records with process %d' %(len(records), os.getpid())
You seem to be reinventing the wheel.
What you are trying to do could be much more easily achieved by using an initializer with the pool and using map rather than apply_async. As it stands your code snippet is not runnable so I can't be sure what the actual problem is. However, the following should simplify your code and make it easier to debug.
import math
import multiprocessing as mp
def pool_init(basePath_):
global basePath, job_count
basePath = basePath_
job_count = 0
print 'Spawning new process: %d' %os.getpid()
def mpProcessor(basePath, jsonData, num_procs=mp.cpu_count()):
pool = mp.Pool(processes=num_procs, initializer=pool_init, initargs=(basePath,))
# could specify a chunksize, but multiprocessing works out the optimal chunksize
return pool.map(processJSON, jsonData)
# change processJSON to work with single records and
# remove proc and basePath args (as not needed)
def processJSON(record):
global job_count
print 'Starting job %d in process: %d' % (job_count, os.getpid())
valid = False
idx = 0
while valid == False:
jsonObject = json.loads(record[1])['results'][idx]
if jsonObject['kind'] == 'song':
valid = True
break
else:
idx += 1
tunesTrack = Track()
tunesTrack.setTrackId(jsonObject['trackId'])
print 'Finished processing job %d with process %d' % (job_count, os.getpid())
job_count += 1

Problems using timer2 on Raspberry Pi

I'm having trouble using timer2 on the Raspberry Pi
Here's the code
**************************************************************
# tmr2_tst_04.py
# https://github.com/ask/timer2
# http://pymotw.com/2/threading/index.html#thread-objects
# ISSUES:
#
# *) Seems to respond only to even seconds
#
# *) Is off by 1 second. i.e. 4000 gives a 5 second interrupt
import timer2
import time # for sleep
import signal,sys
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
timer.stop()
sys.exit(0)
#time_to_wait = 4500
#time_to_wait = 4999
time_to_wait = 4000.0 # gives 5-second cycle time !!!
#time_to_wait = 500.0 # doesn't work
tm = 0
tdiff = 0
tm_old = -1
iter = 0
to_print = False
def hello():
global iter
global tm, tdiff
global tm_old
global to_print
tm = time.time()
tdiff = (tm - tm_old) if tm_old > 0 else 0
tm_old = tm
iter += 1
# buf = "%3d %d %f %6.4f %s" % (iter, time_to_wait, tm, tdiff, "Hello world")
# print buf
to_print = True
# Set up to catch ^C
signal.signal(signal.SIGINT, signal_handler)
print 'Press Ctrl+C to exit'
# Set up timer interrupt routine
timer = timer2.Timer()
timer.apply_interval(time_to_wait, hello)
# Main program loop
while iter <= 10000:
if to_print:
buf = "%3d %d %f %6.4f %s" % (iter, time_to_wait, tm, tdiff, "Hello world")
print buf
to_print = False
time.sleep((time_to_wait/1000)/2)
timer.stop()
*************************************************************************
This runs the thread "hello" every 5000 msec on the Raspberry Pi, but every 4000 msec on an UBUNTU machine
Second issue - if I try for a short time interval, say
time_to_wait = 500
it doesn't work at all - just zips through the code with time differences of .1 msec!
It's calling Python's sleep function, which takes an arguments in seconds. Ideally, what you have done would convert that to .25 seconds, but all of the numbers involved are integers.
As integers, 500/1000 = 0, 0/2 = 0, and so on. So, it waits the requested 0 seconds before proceeding. Change 1000 to 1000.0, for example, and it should work.

Threaded Python port scanner

I am having issues with a port scanner I'm editing to use threads.
This is the basics for the original code:
for i in range(0, 2000):
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((TargetIP, i))
if(result == 0) :
c = "Port %d: OPEN\n" % (i,)
s.close()
This takes approx 33 minutes to complete. So I thought I'd thread it to make it run a little faster. This is my first threading project so it's nothing too extreme, but I've ran the following code for about an hour and get no exceptions yet no output. Am I just doing the threading wrong or what?
import threading
from socket import *
import time
a = 0
b = 0
c = ""
d = ""
def ScanLow():
global a
global c
for i in range(0, 1000):
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((TargetIP, i))
if(result == 0) :
c = "Port %d: OPEN\n" % (i,)
s.close()
a += 1
def ScanHigh():
global b
global d
for i in range(1001, 2000):
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((TargetIP, i))
if(result == 0) :
d = "Port %d: OPEN\n" % (i,)
s.close()
b += 1
Target = raw_input("Enter Host To Scan:")
TargetIP = gethostbyname(Target)
print "Start Scan On Host ", TargetIP
Start = time.time()
threading.Thread(target = ScanLow).start()
threading.Thread(target = ScanHigh).start()
e = a + b
while e < 2000:
f = raw_input()
End = time.time() - Start
print c
print d
print End
g = raw_input()
This is where your code is failing.
threading.Thread(target = ScanLow).start()
threading.Thread(target = ScanHigh).start()
e = a + b
while e < 2000:
f = raw_input()
Immediately after you start your threads, you set the value to e. However, you never update e after that, so the loop never exits.
It also seems like you are doing this to wait until both threads have finished. The join() method is is a better way to do this.
from threading import Thread
threads = []
threads.append(Thread(target = ScanLow))
threads.append(Thread(target = ScanHigh))
for thread in threads:
thread.start()
//both threads are running
for thread in threads:
thread.join()
//both threads have stopped
Edit:
Not related to your question, but a helpful comment. Both of your scan functions are doing the exact same thing. You can replace them with one function that takes the scan range as arguments and start both threads with the one function.
from threading import Thread
def Scan(start, stop):
global a
global c
for i in range(start, stop):
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((TargetIP, i))
if(result == 0) :
c = "Port %d: OPEN\n" % (i,)
s.close()
a += 1
threadCount = 2
totalPorts = 2000
threads = []
for start in xrange(0, totalPorts, totalPorts/threadCount):
threads.append(Thread(target = Scan, args = (start, totalPorts/threadCount)))
for thread in threads:
thread.start()
//both threads are running
for thread in threads:
thread.join()
//both threads have stopped
And now you can easily adjust the number of threads and ports to scan.
You have an awkward method for monitoring the threads. Using join will indicate when the thread is complete. No reason not to spin off more threads to get the results faster as well:
import threading
import socket
import time
ports = []
def check_port(ip,port):
s = socket.socket()
if s.connect_ex((ip,port)) == 0:
ports.append(port)
s.close()
target = raw_input('Target? ')
s = time.time()
threads = []
for port in range(2000):
t = threading.Thread(target=check_port,args=(target,port))
t.start()
threads.append(t)
for t in threads:
t.join()
print ports
print time.time() - s
Output
[80, 135, 445, 1028]
6.92199993134

Categories