Not all Queue items are printing Python - python

Hello I have a problem with the queue not printing items in the order its supposed to be in so it doesnt check for all the passwords. Code below
Class ZipFile:
def __init__(self):
self.zip_file = self.return_zip_file() # just grabbing the zip file here
self.password_word_list = self.password_file() # grabbing the password file
self.q = queue.Queue(maxsize=50) # making the queue here
def word_list(self):
with open(self.password_word_list, "r") as f:
data = f.readlines()
for password in data:
password = password.strip()
yield password
def extract_zip_file(self, zip_file, password):
try:
zip_file.extractall(pwd=password.encode()) # extracting zip
print(f"[+] Password -> {password}")
except Exception as e:
print(e) # for debugging
pass
def brute_force_zip(self)
get_word_list = self.word_list()
count = 0
get_zip_file = zipfile.ZipFile(self.zip_file)
for password in get_word_list:
self.q.put(password)
if self.qsize() == 50:
while not self.q.empty():
thread = Process(target=self.extract_zip_file, args=(get_zip_file, self.q.get()), daemon=True)
thread.start()
count += 1
print(f"\rAttempts: {str(count)}")
So basically the self.q.get() prints everything out of order, and doesnt even get all the words sometimes, how can i fix it? thanks!

Actually I figured it out, I forgot that the multiprocessing was processing different threads and that was the reason why.

Related

No item in queue but there should be python threading

I am working on a project that is a gui with a separate thread. When the user clicks a button a function is fired that sends data to the queue and launches the other thread. The other thread then gets the data from the queue and adds new data to it. Where the gui will then get that data and do something. But it gets stuck saying the queue is empty? Why is this and how can I fix it?
def click():
if self.uN.get() and self.pW.get():
self.q.put("LOGIN")
self.q.put(self.uN.get() + "," + self.pW.get())
else:
self.q.put("eFi")
con = Connect(self.q)
con.setDaemon(True)
con.start()
time.sleep(1)
while True:
root.update()
try:
data = self.q.get(False)
except queue.Empty:
pass
else:
print(data + "+")
if data == "Fcon":
tkMessageBox.showerror("ERROR!", "Failed to connect to the server!")
elif data == "nCre":
tkMessageBox.showerror("ERROR!", "A text field is empty!")
elif data == "Gcon":
for item in root.winfo_children():
item.destroy()
self.mScreen()
else:
print("?")
print('!')
break
Here is the other threads code:
class Connect(Thread):
def __init__(self, q):
Thread.__init__(self)
self.s = socket.socket()
self.q = q
def run(self):
while True:
try:
data = self.q.get()
except Exception as e:
pass
else:
if data == "LOGIN":
self.login()
elif data == "eFi":
self.q.put("nCre")
print("??????")
def login(self):
info = self.q.get().split(",")
self.q.put("Gcon")
print("GOD")
The problem was being caused by having two threads access the same queue one thread would grab the information before the proper thread was able to access it. To solve the problem I made two queues one for each thread.

What is the difference in creating an instance of a class inside a thread loop versus outside

I have recently started working on threads and queues in Python. I have seen the following pattern for multi threading and queuing:
VALID_TARGETS = ['10.192.20.2','10.233.1.3', '10,235,0.3']
queue2 = Queue.Queue(maxsize=10)
for target in VALID_TARGETS:
queue2.put(target)
for i in range(100):
try:
valAuth_obj = ValAuth(queue2, user_id, pwd)
valAuth_obj.daemon = True
valAuth_obj.start()
except Exception, error:
print "ERROR"
break
queue2.join()
Where ValAuth is a class inheriting threading.Threads, whose run function is:
def run(self):
while True:
target = self.queue.get()
self.login(target)
self.queue.task_done()
The program is running as expected. But i have noticed we are calling the class valAuth for every thread i want to run. Then i tried the following, where i pulled out the valAuth() outside the loop.
VALID_TARGETS = ['10.192.20.2','10.233.1.3', '10,235,0.3']
queue2 = Queue.Queue(maxsize=10)
valAuth_obj = ValAuth(queue2, user_id, pwd)
for target in VALID_TARGETS:
queue2.put(target)
for i in range(100):
try:
valAuth_obj.daemon = True
valAuth_obj.start()
except Exception, error:
print "ERROR"
break
queue2.join()
The program still gives me the same result but I can notice there is some difference in terms of speed. Can someone please explain what is the difference in terms of background operations and which one should we stick to?
CODE for valAuth class
class valAuth(threading.Thread):
""" Authenticate all targeted SCM build machines """
def __init__(self, queue, user_id, password):
threading.Thread.__init__(self)
self.queue = queue
self.user = user
self.password = password
def run(self):
while True:
target = self.queue.get()
self.auth(target)
self.queue.task_done()
def auth(self, target):
""" Check all SCM build machines are active with login authentication and
also checks if the account is not AD locked.
"""
print "P1: " + self.user + " target: "+ target
# Company's Authentication Protocol base code ... (cannot show ! SORRY !)
print "P1: " + self.user + " target: "+ target + " Run complete"
Thanks in advance.

python fcntl does not acquire lock

I have written a code to write parallel in a csv file in python.
When my program gets over, what I see is that few lines are merged instead of in seperate lines. Each line should only contain 3 columns. But instead it shows as below
EG
myname myage myvalue
myname myage myvaluemyname
myname myage myvalue
myage
What I understood by reading few other questions, is that I need to lock my file if I want to avoid such scenarios. So I added fcntl module. But it seems my file is still not being locked as it produces similar output
My code
def getdata(x):
try:
# get data from API
c.writefile(x,x1,x2)
except Exception,err:
print err
class credits:
def __init__(self):
self.d = dict()
self.details = dict()
self.filename = "abc.csv"
self.fileopen = open(self.filename,"w")
def acquire(self):
fcntl.flock (self.fileopen, fcntl.LOCK_EX)
def release(self):
fcntl.flock(self.fileopen, fcntl.LOCK_UN)
def __del__(self):
self.fileopen.close()
def writefile(self,x,x1,x2,x3):
try:
self.acquire()
self.fileopen.write(str(x)+","+str(x1)+","+str(x2)+"\n")
except Exception, e:
raise e
finally:
self.release()
if __name__ == '__main__':
conn = psycopg2.connect()
curr = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curr.execute("select * from emp")
rows = curr.fetchall()
listdata = []
for each in rows:
listdata.append(each[0])
c = credits()
p = Pool(processes = 5)
results = p.map(getdata,listdata)
conn.close()
I had to declare getdata as TOP level function otherwise it gave me "Cant pickle function"
Why don't you write to multiple files in each separate process and then merge them? It might be more computationally expensive but it will ensure thread safety.

Is there a standard approach to returning values from coroutine endpoints

My question:
I would like to know if there is a "best practice" pattern in Python for returning values from coroutine endpoints (aka the "sink" or "consumer"). More generally, how would you approach the following scenario?
My scenario:
I have my (producer) > (filter) > (consumer) coroutine pipeline to process a text-based table and to build a list of dictionaries from it. I would like the object that is built in consumer to be returned to the original caller of producer.
My approach:
My approach has been to set up a unique finish-processing signal that each coroutine checks for. If it hears the signal, then it passes on the signal to its child and yields the returned value. The consumer just yields its current value.
Alternative approaches:
I considered:
Using a global to hold the desired object to be "returned" to the caller.
A class-based approach with regular subroutines.
Reasons why I should maybe reconsider these for my scenario would also be welcome.
My implementation:
Here is a simplified version of what I have done, with all key components included.
import uuid
FINISH_PROCESSING_SIGNAL = uuid.uuid4()
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
# Sink
#coroutine
def list_builder():
# accepts objects and adds them to a list
_list = []
try:
while True:
data = (yield)
if data is FINISH_PROCESSING_SIGNAL:
yield _list
break
_list.append(data)
except GeneratorExit:
pass
# Filter
#coroutine
def user_data_filter(target=None):
if target is None:
target = list_builder()
header = "-+-"
footer = "Transfer Packets"
username = "User Name"
fullname = "Full Name"
note = "Description"
try:
while True:
user = {}
data = (yield)
if data is FINISH_PROCESSING_SIGNAL:
yield target.send(FINISH_PROCESSING_SIGNAL)
break
line = data
if header in line:
while True:
line = (yield)
if footer in line:
target.send(user)
break
elif username in line:
user["username"] = line.split('|')[1]
elif fullname in line:
user["fullname"] = line.split('|')[1]
elif note in line:
user["note"] = line.split('|')[1]
except GeneratorExit:
target.close()
# Producer
def process_users_table(table, target=None):
if target is None:
target = user_data_filter()
lines = table.split('\r\n')
for line in lines:
target.send(line)
processed_data = target.send(FINISH_PROCESSING_SIGNAL)
return processed_data
if __name__ == '__main__':
test_users_table = \
"""
Item |Value\r\n
----------------+-----------------------\r\n
User Name |alice\r\n
Full Name |Alice Doe\r\n
Description |\r\n
Transfer Packets|0\r\n
----------------+-----------------------\r\n
User Name |bob\r\n
Full Name |Bob Tables\r\n
Description |\r\n
Transfer Packets|0\r\n
"""
users = process_users_table(test_users_table)
print users
Your method of signaling the consumer to terminate is fine and is in harmony with what you would do if using a multiprocessing or threaded Queue. However, generators also have a way to throw Exceptions (rather than sending values) and the purpose of throw is precisely to signal events or changes in state to the generator. Moreover, when an exception is thrown to a generator,
[i]f the
generator catches the exception and yields another value, that is
the return value of g.throw().
That seems perfectly suited for your use case. Instead of sending a FINISH_PROCESSING_SIGNAL value, you could throw a FINISH_PROCESSING_SIGNAL Exception, and use try..except to yield the final value.
class FINISH_PROCESSING_SIGNAL(Exception): pass
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
# Sink
#coroutine
def list_builder():
# accepts objects and adds them to a list
_list = []
try:
while True:
data = (yield)
_list.append(data)
except FINISH_PROCESSING_SIGNAL:
yield _list
# Filter
#coroutine
def user_data_filter(target=list_builder()):
header = "-+-"
footer = "Transfer Packets"
username = "User Name"
fullname = "Full Name"
note = "Description"
try:
while True:
user = {}
data = (yield)
line = data
if header in line:
while True:
line = (yield)
if footer in line:
target.send(user)
break
elif username in line:
user["username"] = line.split('|')[1]
elif fullname in line:
user["fullname"] = line.split('|')[1]
elif note in line:
user["note"] = line.split('|')[1]
except FINISH_PROCESSING_SIGNAL as err:
# Pass along the Exception to the target, and yield its result back
# to the caller
yield target.throw(err)
# Producer
def process_users_table(table, target=user_data_filter()):
lines = table.split('\r\n')
for line in lines:
target.send(line)
processed_data = target.throw(FINISH_PROCESSING_SIGNAL)
# processed_data = target.close()
return processed_data
if __name__ == '__main__':
test_users_table = \
"""
Item |Value\r\n
----------------+-----------------------\r\n
User Name |alice\r\n
Full Name |Alice Doe\r\n
Description |\r\n
Transfer Packets|0\r\n
----------------+-----------------------\r\n
User Name |bob\r\n
Full Name |Bob Tables\r\n
Description |\r\n
Transfer Packets|0\r\n
"""
users = process_users_table(test_users_table)
print users

Handle multiple IO errors

I have several IO operation that I carry out on class init but they often fail with IOError. What I would like to do is delay a few hundred ms and try again until success or some defined timeout. How can I make sure each individual command succeeds before continuing/ending the loop? I assume there is a better way than an if statement for each item and a counter to check if all commands succeeded.
My current code below often fails with IOError and hangs the rest of the application.
def __init__(self):
print("Pressure init.")
self.readCoefficients()
def readCoefficients(self):
global a0_MSB;
global a0_LSB;
global b1_MSB;
global b1_LSB;
global b2_MSB;
global b2_LSB;
global c12_MSB;
global c12_LSB;
a0_MSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_A0_COEFF_MSB+0);
a0_LSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_A0_COEFF_LSB+0);
b1_MSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_B1_COEFF_MSB+0);
b1_LSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_B1_COEFF_LSB+0);
b2_MSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_B2_COEFF_MSB+0);
b2_LSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_B2_COEFF_LSB+0);
c12_MSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_C12_COEFF_MSB+0);
c12_LSB = Pressure.bus.read_byte_data(Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_C12_COEFF_LSB+0);
Are you wanting to retry each one of those last 8 lines independently or as a group? If independently you will want to make a little helper function:
def retry_function(tries, function, *args, **kwargs):
for try in range(tries):
try:
return function(*args, **kwargs)
except IOError as e:
time.sleep(.005)
raise e # will be the last error from inside the loop. be sure tries is at least 1 or this will be undefined!
Then call it like this:
a0_MSB = retry_function(5, Pressure.bus.read_byte_data, Pressure.MPL115A2_ADDRESS,Pressure.MPL115A2_REGISTER_A0_COEFF_MSB+0)
If not independently but as a group, you probably still want this helper function. But you'll have to rewrite it to handle a list of functions/arguments, or pass in another custom function
If it's OK for you that all the files are read one after the other, you can use a simple function.
import time
# ...
def readCoefficients(self):
global a0_MSB;
global a0_LSB;
global b1_MSB;
global b1_LSB;
global b2_MSB;
global b2_LSB;
global c12_MSB;
global c12_LSB;
max_retries = 15
a0_MSB = self.readretry(Pressure.MPL115A2_REGISTER_A0_COEFF_MSB+0, max_retries)
a0_LSB = self.readretry(Pressure.MPL115A2_REGISTER_A0_COEFF_LSB+0, max_retries)
b1_MSB = self.readretry(Pressure.MPL115A2_REGISTER_B1_COEFF_MSB+0, max_retries)
b1_LSB = self.readretry(Pressure.MPL115A2_REGISTER_B1_COEFF_LSB+0, max_retries)
b2_MSB = self.readretry(Pressure.MPL115A2_REGISTER_B2_COEFF_MSB+0, max_retries)
b2_LSB = self.readretry(Pressure.MPL115A2_REGISTER_B2_COEFF_LSB+0, max_retries)
c12_MSB = self.readretry(Pressure.MPL115A2_REGISTER_C12_COEFF_MSB+0, max_retries)
c12_LSB = self.readretry(Pressure.MPL115A2_REGISTER_C12_COEFF_LSB+0, max_retries)
def readretry(self, address, max_retries):
for i in range(max_retries):
try:
return Pressure.bus.read_byte_data(
Pressure.MPL115A2_ADDRESS,
address
)
except IOError as e:
# print(e)
time.sleep(0.1)
else:
raise IOError("Reading failed after multiple tries")
Note: You should not use globals, most specially in classes.
This is another way of doing it. this code tries to read all addresses, and saves the one that failed. Then waits a little and retries all the addresses that failed until all addresses have been read properly or the number of allowed retries exceeded.
def readCoefficients(self):
(
a0_MSB, a0_LSB,
b1_MSB, b1_LSB,
b2_MSB, b2_LSB,
c12_MSB, c12_LSB) = self.mio_read(15,
Pressure.MPL115A2_REGISTER_A0_COEFF_MSB+0,
Pressure.MPL115A2_REGISTER_A0_COEFF_LSB+0,
Pressure.MPL115A2_REGISTER_B1_COEFF_MSB+0,
Pressure.MPL115A2_REGISTER_B1_COEFF_LSB+0,
Pressure.MPL115A2_REGISTER_B2_COEFF_MSB+0,
Pressure.MPL115A2_REGISTER_B2_COEFF_LSB+0,
Pressure.MPL115A2_REGISTER_C12_COEFF_MSB+0,
Pressure.MPL115A2_REGISTER_C12_COEFF_LSB+0
)
def mio_read(self, max_retries, *addresses):
# Create storage for results
results = [None] * len(addresses)
# Keep track of the index of a particular address in the list of results
ios = list(enumerate(addresses))
for i in range(max_retries):
failedios = []
for index, address in ios:
try:
results[index] = Pressure.bus.read_byte_data(
Pressure.MPL115A2_ADDRESS,
address
)
except IOError as e:
# Place address in the queue for the next round
failedios.append((index, address))
# If all succeeded
if len(failedios) == 0:
return results
# Time may be reduced as so was spent checking other addresses
time.sleep(0.1)
ios = failedios
else:
raise IOError(",".join((addr for ind, addr in failedios)))

Categories