if last value is equal to `value` in while loop - python

I have a coroutine triggered by a while loop and a DataFrame updated each second:
_lock = asyncio.Lock()
while True:
df = # DataFrame is updated with a new entry
asyncio.run(coroutine())
time.sleep(1)
async def coroutine():
if cond1:
await _lock.acquire()
time.sleep(.5)
# wait until cond2 is True
_lock.release()
the cond2 is about a shared ressource: say, if df.iloc[-1] == value. I want that df.iloc[-1] in cond2 be updated while on Lock. "If the last value is equal to another value"
I tried to add a counter for index, but when lock is acquired, the loop is still.
I have below a minimal reproducible case:
import pandas as pd
import numpy as np
import threading
import asyncio
class Thread1(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global _download
_download = {}
def run(self):
ind_df = pd.DataFrame(...) # any DataFrame
data = self.get_data(ind_df)
count = 0
while True:
_download[count] = next(data).values
df = pd.DataFrame.from_dict(_download, orient='index', columns=...) #columns of ind_df
time.sleep(.5)
count += 1
def get_data(self, df):
i = 0
while i < df.shape[0]:
yield df.iloc[i]
i += 1
class Thread2(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
df = pd.DataFrame.from_dict(_download, orient='index', columns=...) #columns of ind_df
asyncio.run(self.apply_coroutines(df))
time.sleep(1)
async def apply_coroutines(self, df):
await asyncio.gather(
coroutine1(),
coroutine2(),
)
async def coroutine1(self):
flag = np.random.rand(1)[0]
if flag > .5:
print(df.iloc[-1])
##########################################
# WAIT until the last flag == .6 (example)
##########################################
else:
pass
async def coroutine2(self):
print('coroutine 2')
await asyncio.sleep(.5)
We'll have to:
t = Thread1()
t.start()
s = Thread2()
s.start()
I didn't bother here with threads to join. I normally use a global boolean flag to break while loops, then wait for threads.
I am lost with the condition part. Thank you in advance.

Related

How to fix sys:1: RuntimeWarning: coroutine 'Position.new_position' was never awaited?

I am trying to create a program that will post orders on binance and than wait until orders close. So we post 3 orders and when only one order left we do something. So we need just to "start" this function and move on. I am trying to use async but my code in function just not executed and it saying that coroutine 'Position.new_position' was never awaited. My code:
import requests
import pandas as pd
from binance.client import Client
from binance.helpers import round_step_size
import info
import math
import asyncio
from discord.ext import commands
import discord
import logging
client = Client(api_key=info.api, api_secret=info.secret_api, testnet=False)
DS = commands.Bot(command_prefix='/', intents = discord.Intents.all())
DS.remove_command("help")
def get_qsize(symbol):
info = client.futures_exchange_info()
for item in info['symbols']:
if(item['symbol'] == symbol):
for f in item['filters']:
if f['filterType'] == 'LOT_SIZE':
return f['stepSize']
def get_pricesize(symbol):
info = client.futures_exchange_info()
for item in info['symbols']:
if(item['symbol'] == symbol):
for f in item['filters']:
if f['filterType'] == 'PRICE_FILTER':
return f['tickSize']
for i in info.tokens:
try:
client.futures_change_margin_type(symbol=i, marginType='ISOLATED')
except:
continue
pos_list = {}
class Position:
async def new_position(side, symbol, price, stopl, takep):
await asyncio.sleep(60)
print('OK')
if side == "BUY":
close_side = "SELL"
else:
close_side = "BUY"
#calculating quaintity
b=client.futures_account_balance()
b = pd.DataFrame.from_dict(b)
b = b.loc[b['asset']=='USDT']
balance = float(b['balance'].values) * 0.05
q = balance / price * info.laverage
#rounding
q_size = get_qsize(symbol)
price_size = get_pricesize(symbol)
price = round_step_size(float(price), float(price_size))
stopl = round_step_size(float(stopl), float(price_size))
takep = round_step_size(float(takep), float(price_size))
q = round_step_size(q, q_size)
take_q = round_step_size(q/2, q_size)
#opening order
client.futures_change_leverage(symbol=symbol, leverage=info.laverage)
buyorder = client.futures_create_order(symbol=symbol, side=side, type="MARKET", quantity=q, isIsolated='TRUE')
stop = client.futures_create_order(symbol=symbol, side="SELL", type="STOP_MARKET", stopPrice=stopl, closePosition="true")
take = client.futures_create_order(symbol=symbol, side="SELL", type="TAKE_PROFIT_MARKET", stopPrice=takep, quantity=take_q)
#monitoring
a = True
while a == True:
print('OK')
await asyncio.sleep(60)
orders = client.futures_get_open_orders(symbol=symbol)
if len(orders) == 1:
try:
client.futures_cancel_order(symbol=symbol, orderId=take['orderId'], timestamp='true')
a = False
except:
a = False
def close_position(position):
#post close order
close = client.futures_create_order(symbol=position.symbol, side=position.close_side, type="MARKET", quantity=position.q, reduceOnly='true')
#close orders
try:
client.futures_cancel_order(symbol=position.symbol, orderId=position.stop['orderId'], timestamp='true')
except:
pass
try:
client.futures_cancel_order(symbol=position.symbol, orderId=position.take['orderId'], timestamp='true')
except:
pass
#DS.command(aliases=["open"])
async def open_position(ctx, side, symbol, price, stopl, takep):
global pos_list
print(price)
position = Position.new_position(side, symbol.replace("PERP", ""), float(price), float(stopl), float(takep))
pos_list[symbol] = position
print(pos_list)
#DS.command(aliases=["close"])
async def close_position(ctx, symbol):
global pos_list
symbol = symbol.replace("PERP", "")
Position.close_position(pos_list[symbol])
DS.run(info.discord_token)

Semaphore in Python

I'm trying to implement mutual exclusion using semaphore in Python. The two processes (proc1, proc2) are supposed to be two independent, concurrent processes. They do exactly the same thing: store n in array[n], then increment n.
The purpose of the program is to show that using semaphore we can ensure that the array is filled properly: [0,1,2,3,4,5,6,7,8,9], without skipping any index. However, my code seems to store [0,1,0,0,0,0,0,0,0,0]. I haven't used threads in python before, so I don't know what's going on.
import threading
import time
n = 0
array = [0]*10
sem = threading.Semaphore()
def proc1():
global n, array
while True:
sem.acquire()
array[n] = n
n += 1
sem.release()
time.sleep(0.25)
def proc2():
global n, array
while True:
sem.acquire()
array[n] = n
n += 1
sem.release()
time.sleep(0.25)
t = threading.Thread(target = proc1)
t.start()
t2 = threading.Thread(target = proc2)
t2.start()
print (array)
the problem was that the OP tried to print the result before the threads were done.
He should have waited for join.
import threading
import time
n = 0
array = [0]*10
sem = threading.Semaphore()
def proc(num):
global n
while True:
sem.acquire()
n = n+1
sem.release()
if n > 9:
break
array[n] = n
print ("Thread {}: {}".format(num,array))
time.sleep(0.25)
t1 = threading.Thread(target = proc, args=[1])
t2 = threading.Thread(target = proc, args=[2])
t1.start()
t2.start()
t1.join()
t2.join()
Different take on a Semaphore pattern, handing the "tasks" within the Sempahore itself
class Sempahore:
def __init__(self, max_threads):
self.active_threads = 0
self.max_threads = max_threads
self.tasks = []
def add_task(self, func, args):
self.tasks.append(
Task(
func=func,
args=args
)
)
def run_task(self, task: Task):
_func = task.func
_args = task.args
self.active_threads += 1
_func(*_args)
self.active_threads -= 1
def run(self, blocking=False):
if blocking:
self._run()
else:
t = Thread(target=self._run)
t.start()
def _run(self):
while True:
if self.active_threads < self.max_threads:
task = self.tasks.pop()
logger.info(f'starting task: {task.task_id}')
t = Thread(
target=self.run_task,
args=(task,))
t.start()
if len(self.tasks) == 0:
break

Python _thread Have each thread process the respective number in a sequence

I want the first thread to process the first one and the fifth and the ninth, the second would get the second-sixth-tenth, third-seventh, eleventh, and the fourth would the eighth and twelfth. I know that it is a repeating patterning of (4*counter + original) but I am lost when it comes to moving that actual _thread process. Here is what I have so far.
If I am going about it wrong them just tell me because I am open to suggestions.
Edit- I am using Python 3.3
def calc(threadName):
testRange = 100
testNumber = 100
timesToTest = 25
testCounter = 0
if threadName == 'ThreadOne':
testNumber = (testNumber) + 5*(testCounter)
if threadName == 'ThreadTwo':
testNumber = (testNumber+1) + 5*(testCounter)
if threadName == 'ThreadThree':
testNumber = (testNumber+2) + 5*(testCounter)
if threadName == 'ThreadFour':
testNumber = (testNumber+3) + 5*(testCounter)
while testCounter < timesToTest:
testCounter +=1
while testRange >= 0:
answer = ((testNumber*3) - ((testNumber-1)**2))
testbool = isprime(answer)
print('Testing '+str(testNumber)+' on '+str(threadName))
testNumber +=1
testRange -= 1
if testbool:
list.append((threadName,testNumber,answer))
threadOne = _thread.start_new_thread(calc,('ThreadOne', ))
threadTwo = _thread.start_new_thread(calc,('ThreadTwo', ))
threadThree = _thread.start_new_thread(calc,('ThreadThree', ))
threadFour = _thread.start_new_thread(calc,('ThreadFour', ))
while 1:
pass
tried This:
import threading
import queue
class Worker(threading.Thread):
global results_list
print('in main class')
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.jobs_queue = queue.Queue()
self.results_list = list()
print('in init')
def isprime(self,n):
n = abs(int(n))
print('in isprime')
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:
return False
return True
def run(self):
print('in run')
while True:
testNumber = self.jobs_queue.get()
if testNumber == "END":
return
# here, do your stuff with 'testNumber'
# for example, let's multiply it by 2
answer = ((testNumber**3) - ((testNumber-1)**3))
testbool = self.isprime(answer)
if testbool:# results are appended to a list
self.results_list.append((self.name,testNumber,answer))
def calc(self, n):
print('in calc')
self.jobs_queue.put(n)
if not self.is_alive():
self.start()
def get_result(self):
print('in get_result')
return self.results_list
def stop(self):
print('in stop')
# tell the thread to stop,
# once jobs in queue are done
self.jobs_queue.put("END")
self.join()
print('Anything')
workers = [Worker('thread 1'), Worker('thread 2'), Worker('thread 3'), Worker('thread 4')]
for n in range(100):
print('here 1')
w = workers[n % 4]
w.calc(n)
for w in workers:
w.stop()
for w in workers:
x=1
# print(results_list)
As far as I understand you want a pool of 4 worker threads, fair queuing the same "job"
between those 4 threads.
I would do something more like that:
import threading
import queue
class Worker(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.jobs_queue = queue.Queue()
self.results_list = list()
def run(self):
while True:
testNumber = self.jobs_queue.get()
if testNumber == "END":
return
# here, do your stuff with 'testNumber'
# for example, let's multiply it by 2
answer = testNumber * 2
# results are appended to a list
self.results_list.append((self.name,testNumber,answer))
def calc(self, n):
self.jobs_queue.put(n)
if not self.is_alive():
self.start()
def get_result(self):
return self.results_list
def stop(self):
# tell the thread to stop,
# once jobs in queue are done
self.jobs_queue.put("END")
self.join()
workers = [Worker('thread 1'), Worker('thread 2'), Worker('thread 3'), Worker('thread 4')]
for n in range(100):
w = workers[n % 4]
w.calc(n)
for w in workers:
w.stop()
for w in workers:
print(w.get_result())

Python creating a shared variable between threads

I'm working on a project in Python using the "thread" module.
How can I make a global variable (in my case I need it to be True or False) that all the threads in my project (about 4-6) can access?
We can define the variable outside the thread classes and declare it global inside the methods of the classes.
Please see below trivial example which prints AB alternatively. Two variables flag and val are shared between two threads Thread_A and Thread_B. Thread_A prints val=20 and then sets val to 30. Thread_B prints val=30, since val is modified in Thread_A. Thread_B then sets val to 20 which is again used in Thread_A. This demonstrates that variable val is shared between two threads. Similarly variable flag is also shared between two threads.
import threading
import time
c = threading.Condition()
flag = 0 #shared between Thread_A and Thread_B
val = 20
class Thread_A(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
global flag
global val #made global here
while True:
c.acquire()
if flag == 0:
print "A: val=" + str(val)
time.sleep(0.1)
flag = 1
val = 30
c.notify_all()
else:
c.wait()
c.release()
class Thread_B(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
global flag
global val #made global here
while True:
c.acquire()
if flag == 1:
print "B: val=" + str(val)
time.sleep(0.5)
flag = 0
val = 20
c.notify_all()
else:
c.wait()
c.release()
a = Thread_A("myThread_name_A")
b = Thread_B("myThread_name_B")
b.start()
a.start()
a.join()
b.join()
Output looks like
A: val=20
B: val=30
A: val=20
B: val=30
A: val=20
B: val=30
A: val=20
B: val=30
Each thread prints the value which was modified in another thread.
With no clue as to what you are really trying to do, either go with nio's approach and use locks, or consider condition variables:
From the docs
# Consume one item
cv.acquire()
while not an_item_is_available():
cv.wait()
get_an_available_item()
cv.release()
# Produce one item
cv.acquire()
make_an_item_available()
cv.notify()
cv.release()
You can use this to let one thread tell another a condition has been met, without having to think about the locks explicitly. This example uses cv to signify that an item is available.
How about using a threading.Event object per this description?
For example in the script below, worker1 and worker2 share an Event, and when worker2 changes its value this is seen by worker1:
import time
from threading import Thread, Event
shared_bool = Event()
def worker1(shared_bool):
while True:
if shared_bool.is_set():
print("value is True, quitting")
return
else:
print("value is False")
time.sleep(1)
def worker2(shared_bool):
time.sleep(2.5)
shared_bool.set()
t1 = Thread(target=worker1, args=(shared_bool, ))
t2 = Thread(target=worker2, args=(shared_bool, ))
t1.start()
t2.start()
t1.join()
t2.join()
Prints out:
value is False
value is False
value is False
value is True, quitting

Cancellable threading.Timer in Python

I am trying to write a method that counts down to a given time and unless a restart command is given, it will execute the task. But I don't think Python threading.Timer class allows for timer to be cancelable.
import threading
def countdown(action):
def printText():
print 'hello!'
t = threading.Timer(5.0, printText)
if (action == 'reset'):
t.cancel()
t.start()
I know the above code is wrong somehow. Would appreciate some kind guidance over here.
You would call the cancel method after you start the timer:
import time
import threading
def hello():
print "hello, world"
time.sleep(2)
t = threading.Timer(3.0, hello)
t.start()
var = 'something'
if var == 'something':
t.cancel()
You might consider using a while-loop on a Thread, instead of using a Timer.
Here is an example appropriated from Nikolaus Gradwohl's answer to another question:
import threading
import time
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
self.count = 10
def run(self):
while self.count > 0 and not self.event.is_set():
print self.count
self.count -= 1
self.event.wait(1)
def stop(self):
self.event.set()
tmr = TimerClass()
tmr.start()
time.sleep(3)
tmr.stop()
I'm not sure if I understand correctly. Do you want to write something like in this example?
>>> import threading
>>> t = None
>>>
>>> def sayHello():
... global t
... print "Hello!"
... t = threading.Timer(0.5, sayHello)
... t.start()
...
>>> sayHello()
Hello!
Hello!
Hello!
Hello!
Hello!
>>> t.cancel()
>>>
The threading.Timer class does have a cancel method, and although it won't cancel the thread, it will stop the timer from actually firing. What actually happens is that the cancel method sets a threading.Event, and the thread actually executing the threading.Timer will check that event after it's done waiting and before it actually executes the callback.
That said, timers are usually implemented without using a separate thread for each one. The best way to do it depends on what your program is actually doing (while waiting for this timer), but anything with an event loop, like GUI and network frameworks, all have ways to request a timer that is hooked into the eventloop.
Im not sure if best option but for me is woking like this:
t = timer_mgr(.....) append to list "timers.append(t)" and then after all created you can call:
for tm in timers:#threading.enumerate():
print "********", tm.cancel()
my timer_mgr() class is this:
class timer_mgr():
def __init__(self, st, t, hFunction, id, name):
self.is_list = (type(st) is list)
self.st = st
self.t = t
self.id = id
self.hFunction = hFunction
self.thread = threading.Timer(t, self.handle_function, [id])
self.thread.name = name
def handle_function(self, id):
if self.is_list:
print "run_at_time:", datetime.now()
self.hFunction(id)
dt = schedule_fixed_times(datetime.now(), self.st)
print "next:", dt
self.t = (dt-datetime.now()).total_seconds()
else:
self.t = self.st
print "run_every", self.t, datetime.now()
self.hFunction(id)
self.thread = threading.Timer(self.t, self.handle_function, [id])
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()
Inspired by above post.
Cancelable and Resetting Timer in Python. It uses thread.
Features: Start, Stop, Restart, callback function.
Input: Timeout, sleep_chunk values, and callback_function.
Can use or inherit this class in any other program. Can also pass arguments to the callback function.
Timer should respond in middle also. Not just after completion of full sleep time. So instead of using one full sleep, using small chunks of sleep and kept checking event object in loop.
import threading
import time
class TimerThread(threading.Thread):
def __init__(self, timeout=3, sleep_chunk=0.25, callback=None, *args):
threading.Thread.__init__(self)
self.timeout = timeout
self.sleep_chunk = sleep_chunk
if callback == None:
self.callback = None
else:
self.callback = callback
self.callback_args = args
self.terminate_event = threading.Event()
self.start_event = threading.Event()
self.reset_event = threading.Event()
self.count = self.timeout/self.sleep_chunk
def run(self):
while not self.terminate_event.is_set():
while self.count > 0 and self.start_event.is_set():
# print self.count
# time.sleep(self.sleep_chunk)
# if self.reset_event.is_set():
if self.reset_event.wait(self.sleep_chunk): # wait for a small chunk of timeout
self.reset_event.clear()
self.count = self.timeout/self.sleep_chunk # reset
self.count -= 1
if self.count <= 0:
self.start_event.clear()
#print 'timeout. calling function...'
self.callback(*self.callback_args)
self.count = self.timeout/self.sleep_chunk #reset
def start_timer(self):
self.start_event.set()
def stop_timer(self):
self.start_event.clear()
self.count = self.timeout / self.sleep_chunk # reset
def restart_timer(self):
# reset only if timer is running. otherwise start timer afresh
if self.start_event.is_set():
self.reset_event.set()
else:
self.start_event.set()
def terminate(self):
self.terminate_event.set()
#=================================================================
def my_callback_function():
print 'timeout, do this...'
timeout = 6 # sec
sleep_chunk = .25 # sec
tmr = TimerThread(timeout, sleep_chunk, my_callback_function)
tmr.start()
quit = '0'
while True:
quit = raw_input("Proceed or quit: ")
if quit == 'q':
tmr.terminate()
tmr.join()
break
tmr.start_timer()
if raw_input("Stop ? : ") == 's':
tmr.stop_timer()
if raw_input("Restart ? : ") == 'r':
tmr.restart_timer()

Categories