I have 2 servers in python, I want to mix them up in one single .py and run together:
Server.py:
import logging, time, os, sys
from yowsup.layers import YowLayerEvent, YowParallelLayer
from yowsup.layers.auth import AuthError
from yowsup.layers.network import YowNetworkLayer
from yowsup.stacks.yowstack import YowStackBuilder
from layers.notifications.notification_layer import NotificationsLayer
from router import RouteLayer
class YowsupEchoStack(object):
def __init__(self, credentials):
"Creates the stacks of the Yowsup Server,"
self.credentials = credentials
stack_builder = YowStackBuilder().pushDefaultLayers(True)
stack_builder.push(YowParallelLayer([RouteLayer, NotificationsLayer]))
self.stack = stack_builder.build()
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
logging.info("#" * 50)
logging.info("\tServer started. Phone number: %s" % self.credentials[0])
logging.info("#" * 50)
self.stack.loop(timeout=0.5, discrete=0.5)
except AuthError as e:
logging.exception("Authentication Error: %s" % e.message)
if "<xml-not-well-formed>" in str(e):
os.execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
logging.exception("Unexpected Exception: %s" % e.message)
if __name__ == "__main__":
import sys
import config
logging.basicConfig(stream=sys.stdout, level=config.logging_level, format=config.log_format)
server = YowsupEchoStack(config.auth)
while True:
# In case of disconnect, keeps connecting...
server.start()
logging.info("Restarting..")
App.py:
import web
urls = (
'/', 'index'
)
app = web.application(urls, globals())
class index:
def GET(self):
greeting = "Hello World"
return greeting
if __name__ == "__main__":
app.run()
I want to run both together from single .py file together.
If I try to run them from one file, either of the both starts and other one starts only when first one is done working.
How can I run 2 servers in python together?
import thread
def run_app1():
#something goes here
def run_app2():
#something goes here
if __name__=='__main__':
thread.start_new_thread(run_app1)
thread.start_new_thread(run_app2)
if you need to pass args to the functions you can do:
thread.start_new_thread(run_app1, (arg1,arg2,....))
if you want more control in your threads you could go:
import threading
def app1():
#something here
def app2():
#something here
if __name__=='__main__':
t1 = threading.Thread(target=app1)
t2 = threading.Thread(target=app2)
t1.start()
t2.start()
if you need to pass args you can go:
t1 = threading.Thread(target=app1, args=(arg1,arg2,arg3.....))
What's the differences between thread vs threading? Threading is higher level module than thread and in 3.x thread got renamed to _thread... more info here: http://docs.python.org/library/threading.html but that's for another question I guess.
So in your case, just make a function that runs the first script, and the second script, and just spawn threads to run them.
Related
I'm starting a webserver in a new thread. After all tests are run I want to kill the child thread with running server inside. The only one solution is interrupting entire process with all threads inside by calling "os.system('kill %d' % os.getpid())" (see the code below). I'm not sure it's the smartest solution. I'm not sure all threads will be killed after all. Could I send some kind of "Keyboard interrupt" signal to stop the thread before exiting main thread?
import http
import os
import sys
import unittest
import time
import requests
import threading
from addresses import handle_get_addresses, load_addresses
from webserver import HTTPHandler
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
#run_in_thread
def start_web_server():
web_host = 'localhost'
print("starting server...")
web_port = 8808
httpd = http.server.HTTPServer((web_host, web_port), HTTPHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
class TestAddressesApi(unittest.TestCase):
WEB_SERVER_THREAD: threading.Thread = None
#classmethod
def setUpClass(cls):
cls.WEB_SERVER_THREAD = start_web_server()
pass
#classmethod
def tearDownClass(cls):
print("shutting down the webserver...")
# here someting like cls.WEB_SERVER_THREAD.terminate()
# instead of line below
os.system('kill %d' % os.getpid())
def test_get_all_addresses(self):
pass
def test_1(self):
pass
if __name__ == "__main__":
unittest.main()
Maybe threading.Event is you wanted.
Just found a solution. Daemon Threads stop executing when main thread stops working
Currently, I got some little Python Script running, creating some Web-Requests.
I am absolute new to Python, so I took a bare-bones Script I found, and it uses Multi-Threads (see end of thread for the full Script):
if __name__ == '__main__':
threads = []
for i in range(THREAD_COUNT):
t = Thread(target=callback)
threads.append(t)
t.start()
for t in threads:
t.join()
However, I feel this Script is kinda slow, like it does the Requests after each other and not at the same time.
So I took another approach and tried to find more about Workers and Multi-Threads.
It seems "Workers" are the Way to go, instead of Threads?
So I took the following from a Tutorial and modified it a little:
import logging
import os
from queue import Queue
from threading import Thread
from time import time
from multi import callback
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# that is my Function in Multi.py (A simple Web Request Function)
try:
callback()
finally:
self.queue.task_done()
if __name__ == '__main__':
ts = time()
queue = Queue()
for x in range(8):
worker = DownloadWorker(queue)
worker.daemon = True
worker.start()
# I put that here, because I want to run my "Program" infinite times
for i in range(500000):
logger.info('Queueing')
queue.put(i)
queue.join()
logging.info('Took %s', time() - ts)
I am not sure here, if that is the correct approach, from my Understanding I created 8 Workers and with the queue.put(i). I give them Jobs (500,000 in this Case?) passing them the current counter (which does nothing, it seems to be required tho?)
After he is done queening, the Function is executed, as I can see in my Console.
However, I feel it still runs same slow as before?
(My Original Request File)
from threading import Thread
import requests
import json
import string
import urllib3
import threading
THREAD_COUNT = 5
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def callback():
counter = 0
try:
while True:
print("Prozess " + str(threading.get_ident())+ " " +str(counter))
counter = counter + 1
response = requests.post('ourAPIHere',verify=False, json={"pingme":"hello"})
json_data = json.loads(response.text)
if json_data["status"] == "error":
print("Server Error? Check logs!")
if json_data["status"] == "success":
print("OK")
except KeyboardInterrupt:
return
if __name__ == '__main__':
threads = []
for i in range(THREAD_COUNT):
t = Thread(target=callback)
threads.append(t)
t.start()
for t in threads:
t.join()
tl,dr: How can I programmably execute a python module (not function) as a separate process from a different python module?
On my development laptop, I have a 'server' module containing a bottle server. In this module, the name==main clause starts the bottle server.
#bt_app.post("/")
def server_post():
<< Generate response to 'http://server.com/' >>
if __name__ == '__main__':
serve(bt_app, port=localhost:8080)
I also have a 'test_server' module containing pytests. In this module, the name==main clause runs pytest and displays the results.
def test_something():
_rtn = some_server_function()
assert _rtn == desired
if __name__ == '__main__':
_rtn = pytest.main([__file__])
print("Pytest returned: ", _rtn)
Currently, I manually run the server module (starting the web server on localhost), then I manually start the pytest module which issues html requests to the running server module and checks the responses.
Sometimes I forget to start the server module. No big deal but annoying. So I'd like to know if I can programmatically start the server module as a separate process from the pytest module (just as I'm doing manually now) so I don't forget to start it manually.
Thanks
There is my test cases dir tree:
test
├── server.py
└── test_server.py
server.py start a web server with flask.
from flask import Flask
app = Flask(__name__)
#app.route('/')
def hello_world():
return 'Hello, World!'
if __name__ == '__main__':
app.run()
test_server.py make request to test.
import sys
import requests
import subprocess
import time
p = None # server process
def start_server():
global p
sys.path.append('/tmp/test')
# here you may want to do some check.
# whether the server is already started, then pass this fucntion
kwargs = {} # here u can pass other args needed
p = subprocess.Popen(['python','server.py'], **kwargs)
def test_function():
response = requests.get('http://localhost:5000/')
print('This is response body: ', response.text)
if __name__ == '__main__':
start_server()
time.sleep(3) # waiting server started
test_function()
p.kill()
Then you can do python test_server to start the server and do test cases.
PS: Popen() needs python3.5+. if older version, use run instead
import logging
import threading
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
threads = list()
for index in range(3):
logging.info("Main : create and start thread %d.", index)
x = threading.Thread(target=thread_function, args=(index,))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
logging.info("Main : before joining thread %d.", index)
thread.join()
logging.info("Main : thread %d done", index)
With threading you can run multiple processes at once!
Wim baasically answered this question. I looked into the subprocess module. While reading up on it, I stumbled on the os.system function.
In short, subprocess is a highly flexible and functional program for running a program. os.system, on the other hand, is much simpler, with far fewer functions.
Just running a python module is simple, so I settled on os.system.
import os
server_path = "python -m ../src/server.py"
os.system(server_path)
Wim, thanks for the pointer. Had it been a full fledged answer I would have upvoted it. Redo it as a full fledged answer and I'll do so.
Async to the rescue.
import gevent
from gevent import monkey, spawn
monkey.patch_all()
from gevent.pywsgi import WSGIServer
#bt_app.post("/")
def server_post():
<< Generate response to 'http://server.com/' >>
def test_something():
_rtn = some_server_function()
assert _rtn == desired
print("Pytest returned: ",_rtn)
sleep(0)
if __name__ == '__main__':
spawn(test_something) #runs async
server = WSGIServer(("0.0.0.0", 8080, bt_app)
server.serve_forever()
I have a few scripts that i want to run simultaneously, they read a CSV file, im trying the following;
import sys
import csv
out = open("C:\PYDUMP\PYDUMPINST.csv","r")
dataf=csv.reader(out)
for row in dataf:
take = row[0]
give = row[1]
def example():
try:
lfo = int(take)
if lfo > 0:
#code
except Exception, e:
pass
example()
This is saved as takefile1.py. I have 20 scripts with similar structures that i want to run simultaneously. So im using(which i have been using for running other batches of scripts trouble free) the following;
import csv
import sys
from threading import Thread
def firstsend_lot():
execfile("C:\Users\takefile1.py")
execfile("C:\Users\takefile2.py")
def secondsend_lot():
execfile("C:\Users\takefile3.py")
execfile("C:\Users\takefile4.py")
if __name__ == '__main__':
Thread(target = firstsend_lot).start()
Thread(target = secondsend_lot).start()
So i am getting the error "global name 'take' is not defined". Anyone got any suggestions? Im pretty hopeless at Python so pretend you are talking to an idiot.
Your function example(), do not have access to take. Try adding a line in it:
def example():
global take
try:
lfo = int(take)
if lfo > 0:
#code
except Exception, e:
pass
I am trying to write dbus server where I want to run some external shell program (grep here) to do the job.
when I do:
prompt$ server.py
then:
prompt$ client.py # works fine, ie. runs grep command in child process.
prompt$ client.py # ..., but second invocation produces following error message:
DBusException: org.freedesktop.DBus.Error.ServiceUnknown: The name org.example.ExampleService was not provided by any .service files
I am stuck. Are You able to help me?
here is server.py (client.py thereafter):
import gtk, glib
import os
import dbus
import dbus.service
import dbus.mainloop.glib
import subprocess
messages_queue=list()
grep_pid=0
def queue_msg(message):
global messages_queue
messages_queue.append(message)
return
def dequeue_msg():
global messages_queue,grep_pid
if grep_pid != 0:
try:
pid=os.waitpid(grep_pid,os.P_NOWAIT)
except:
return True
if pid[0] == 0:
return True
grep_pid=0
if len(messages_queue) == 0:
return True
else:
tekst=messages_queue.pop(0)
cmd="grep 'pp'"
print cmd
#works fine, when I do return here
#return True
grep_pid=os.fork()
if grep_pid != 0:
return True
os.setpgid(0,0)
pop=subprocess.Popen(cmd,shell=True,stdin=subprocess.PIPE)
pop.stdin.write(tekst)
pop.stdin.close()
pop.wait()
exit(0)
class DemoException(dbus.DBusException):
_dbus_error_name = 'org.example.Exception'
class MyServer(dbus.service.Object):
#dbus.service.method("org.example.ExampleInterface",
in_signature='', out_signature='')
def QueueMsg(self):
queue_msg("ppppp")
#dbus.service.method("org.example.ExampleInterface",
in_signature='', out_signature='')
def Exit(self):
mainloop.quit()
from dbus.mainloop.glib import threads_init
if __name__ == '__main__':
glib.threads_init()
threads_init()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
name = dbus.service.BusName("org.example.ExampleService", session_bus)
object = MyServer(session_bus, '/My')
glib.timeout_add_seconds(1, dequeue_msg)
mainloop = glib.MainLoop()
print "Running example service."
mainloop.run()
now client.py:
import sys
from traceback import print_exc
import dbus
def main():
bus = dbus.SessionBus()
try:
remote_object = bus.get_object("org.example.ExampleService",
"/My")
except dbus.DBusException:
print_exc()
sys.exit(1)
iface = dbus.Interface(remote_object, "org.example.ExampleInterface")
iface.QueueMsg()
if sys.argv[1:] == ['--exit-service']:
iface.Exit()
if __name__ == '__main__':
main()
You usually get this error message when you try to access a service that is no longer available. Check if your server is still running.
You can use d-feet to debug your dbus connections.
The error message about the missing .service file means that you need to create a service file in dbus-1/services.
For example:
# /usr/local/share/dbus-1/services/org.example.ExampleService.service
[D-BUS Service]
Name=org.example.ExampleService
Exec=/home/user1401567/service.py
A lot of tutorials don't include this detail (maybe .service files didn't use to be required?) But, at least on Ubuntu 12.04, dbus services can't be connected to without it.