How to run multiple julia functions from python multiprocessing pool using juliacall - python

I want to run julia functions/scripts from within python. I managed to call julia scripts via the library juliacall. Now I want to parallelize this. Therefore I created a python multiprocessing Pool and call the julia script from each worker. However this fails with the following message:
python: /buildworker/worker/package_linux64/build/src/debuginfo.cpp:1634: void register_eh_frames(uint8_t*, size_t): Assertion `end_ip != 0' failed.
How can I further debug this?
Here is my minimal working example:
import os
os.environ['PYTHON_JULIAPKG_EXE'] = "/home/user/.juliaup/bin/julia"
os.environ['PYTHON_JULIAPKG_OFFLINE'] = 'yes'
os.environ['PYTHON_JULIAPKG_PROJECT'] = '/home/user/julia/environments/v1.6/'
from juliacall import Main as jl, convert as jlconvert
from multiprocessing import Pool
from tqdm import tqdm
import ipdb
def init_worker():
import os
os.environ['PYTHON_JULIAPKG_EXE'] = "/home/user/juliaup/bin/julia"
os.environ['PYTHON_JULIAPKG_OFFLINE'] = 'yes'
os.environ['PYTHON_JULIAPKG_PROJECT'] = '/home/user/.julia/environments/v1.6/'
from juliacall import Main as jl, convert as jlconvert
print('in init_worker()...')
jl.seval('using Pkg')
jl.seval('Pkg.status()')
print('...done')
def compute(jobid):
print(f'in main({jobid})...')
jl.seval('include("test_julia_simple.jl")')
print('...done')
return
def main():
njobs = 10
#start pool with init_worker() as initializer
with Pool(2, initializer=init_worker) as p, tqdm(total=njobs) as pbar:
res = []
for jid in range(njobs):
res.append(p.apply_async(compute, (jid,)))
for r in res:
r.get()
pbar.update(1)
if __name__ == "__main__":
main()
And the julia script test_julia_simple.jl
for i in 1:10
println(i)
end
1+2
additional info:
$ python --version
Python 3.9.7
$ pip freeze | grep julia
juliacall==0.9.10
juliapkg==0.1.9
$ julia --version
The latest version of Julia in the `1.6` channel is 1.6.7+0.x64.linux.gnu. You currently have `1.6.6+0~x64` installed. Run:
juliaup update
to install Julia 1.6.7+0.x64.linux.gnu and update the `1.6` channel to that version.
julia version 1.6.6
not sure if this is related but the error message is nearly identical
https://github.com/JuliaLang/julia/issues/44969
After some comment I tried using a thread pool but in that case python fails with Segmentation fault:
import os
os.environ['PYTHON_JULIAPKG_EXE'] = "/home/user/.juliaup/bin/julia"
os.environ['PYTHON_JULIAPKG_OFFLINE'] = 'yes'
os.environ['PYTHON_JULIAPKG_PROJECT'] = '/home/user/.julia/environments/v1.6/'
from juliacall import Main as jl, convert as jlconvert
import concurrent.futures
from tqdm import tqdm
import ipdb
def compute(jobid):
print(f'in main({jobid})...')
print('in init_worker()...')
jl.seval('using Pkg')
jl.seval('Pkg.status()')
print('...done')
jl.seval('include("test_julia_simple.jl")')
print('...done')
return
def main():
njobs = 10
#start pool with init_worker() as initializer
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
with tqdm(total=njobs) as pbar:
jobs = {executor.submit(compute, jid):jid for jid in range(njobs)}
for future in concurrent.futures.as_completed(jobs):
jid = jobs[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (jid, exc))
else:
print('%r page is %d bytes' % (jid, len(data)))
pbar.update(1)
if __name__ == "__main__":
main()

Related

Error while targeting a Julia function into multiprocessing.Process of Python

I am trying to parallelize a code in python by using multiprocessing.Process which targets a Julia function.
The function works fine when I call it directly, i.e. when I execute:
if __name__ == "__main__":
import julia
julia.Julia(compiled_modules=False)
julia.Pkg_jl.func_jl(*args)
However, I have an error when I define the same function as a target in a Process function.
This is the code:
from multiprocessing import Process
import julia
julia.Julia(compiled_modules=False)
class JuliaProcess(object):
...
def _wrapper(self, *args):
ret = julia.Pkg_jl.func_jl(args)
self.queue.put(ret) # this is for save the result of the function
def run(self, *args):
p = Process(target=self._wrapper, args=args)
self.processes.append(p) # this is for save the process job
p.start()
...
if __name__ == "__main__":
...
Jlproc = JuliaProcess()
Jlproc.run(some_args)
The error is when the Process starts, with the following output:
fatal: error thrown and no exception handler available.
ReadOnlyMemoryError()
unknown function (ip: 0x7f9df81cb8f0)
...
If I try to compile the julia modules in the _wrapper function, i.e.:
from multiprocessing import Process
import julia
class JuliaProcess(object):
...
def _wrapper(self, *args):
julia.Julia(compiled_modules=False)
ret = julia.Pkg_jl.func_jl(args)
self.queue.put(ret) # this is for save the result of the function
def run(self, *args):
p = Process(target=self._wrapper, args=args)
self.processes.append(p) # this is for save the process job
p.start()
...
if __name__ == "__main__":
...
Jlproc = JuliaProcess()
Jlproc.run(some_args)
I have the following error:
raise JuliaError(u'Exception \'{}\' occurred while calling julia code:\n{}'
julia.core.JuliaError: Exception 'ReadOnlyMemoryError' occurred while calling julia code:
const PyCall = Base.require(Base.PkgId(Base.UUID("438e738f-606a-5dbb-bf0a-cddfbfd45ab0"), "PyCall"))
...
Does anyone know what is happening? and whether it is possible using python to parallelize julia functions as I suggest.
I finally solved the error.
The syntaxis is not the problem, but the instance on which Julia packages are precompiled.
In the first code, the error is in the call [Jl]:
julia.Julia(compiled_modules=False)
just before Julia is imported.
The second code works fine since the expression [Jl] is precompiled in the target process.
Below, I share an example that works fine if you have Julia and PyCall duly installed.
#!/usr/bin/env python3
# coding=utf-8
from multiprocessing import Process, Queue
import julia
class JuliaProcess(object):
def __init__(self):
self.processes = []
self.queue = Queue()
def _wrapper(self, *args):
julia.Julia(compiled_modules=False)
from julia import LinearAlgebra as LA
ret = LA.dot(args[0],args[1])
self.queue.put(ret) # this is for save the result of the function
def run(self, *args):
p = Process(target=self._wrapper, args=args)
self.processes.append(p) # this is for save the process job
p.start()
def wait(self):
self.rets = []
for p in self.processes:
ret = self.queue.get()
self.rets.append(ret)
for p in self.processes:
p.join()
if __name__ == "__main__":
jp = JuliaProcess()
jp.run([1,5,6],[1,3,2])
jp.wait()
print(jp.rets)

RunTime Error: Python multiprocessing not using fork to start your child processes and forgotten to use the proper idiom in the main module? [duplicate]

I am trying my very first formal python program using Threading and Multiprocessing on a windows machine. I am unable to launch the processes though, with python giving the following message. The thing is, I am not launching my threads in the main module. The threads are handled in a separate module inside a class.
EDIT: By the way this code runs fine on ubuntu. Not quite on windows
RuntimeError:
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.
My original code is pretty long, but I was able to reproduce the error in an abridged version of the code. It is split in two files, the first is the main module and does very little other than import the module which handles processes/threads and calls a method. The second module is where the meat of the code is.
testMain.py:
import parallelTestModule
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
parallelTestModule.py:
import multiprocessing
from multiprocessing import Process
import threading
class ThreadRunner(threading.Thread):
""" This class represents a single instance of a running thread"""
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
print self.name,'\n'
class ProcessRunner:
""" This class represents a single instance of a running process """
def runp(self, pid, numThreads):
mythreads = []
for tid in range(numThreads):
name = "Proc-"+str(pid)+"-Thread-"+str(tid)
th = ThreadRunner(name)
mythreads.append(th)
for i in mythreads:
i.start()
for i in mythreads:
i.join()
class ParallelExtractor:
def runInParallel(self, numProcesses, numThreads):
myprocs = []
prunner = ProcessRunner()
for pid in range(numProcesses):
pr = Process(target=prunner.runp, args=(pid, numThreads))
myprocs.append(pr)
# if __name__ == 'parallelTestModule': #This didnt work
# if __name__ == '__main__': #This obviously doesnt work
# multiprocessing.freeze_support() #added after seeing error to no avail
for i in myprocs:
i.start()
for i in myprocs:
i.join()
On Windows the subprocesses will import (i.e. execute) the main module at start. You need to insert an if __name__ == '__main__': guard in the main module to avoid creating subprocesses recursively.
Modified testMain.py:
import parallelTestModule
if __name__ == '__main__':
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
Try putting your code inside a main function in testMain.py
import parallelTestModule
if __name__ == '__main__':
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
See the docs:
"For an explanation of why (on Windows) the if __name__ == '__main__'
part is necessary, see Programming guidelines."
which say
"Make sure that the main module can be safely imported by a new Python
interpreter without causing unintended side effects (such a starting a
new process)."
... by using if __name__ == '__main__'
Though the earlier answers are correct, there's a small complication it would help to remark on.
In case your main module imports another module in which global variables or class member variables are defined and initialized to (or using) some new objects, you may have to condition that import in the same way:
if __name__ == '__main__':
import my_module
As #Ofer said, when you are using another libraries or modules, you should import all of them inside the if __name__ == '__main__':
So, in my case, ended like this:
if __name__ == '__main__':
import librosa
import os
import pandas as pd
run_my_program()
hello here is my structure for multi process
from multiprocessing import Process
import time
start = time.perf_counter()
def do_something(time_for_sleep):
print(f'Sleeping {time_for_sleep} second...')
time.sleep(time_for_sleep)
print('Done Sleeping...')
p1 = Process(target=do_something, args=[1])
p2 = Process(target=do_something, args=[2])
if __name__ == '__main__':
p1.start()
p2.start()
p1.join()
p2.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start,2 )} second(s)')
you don't have to put imports in the if __name__ == '__main__':, just running the program you wish to running inside
In yolo v5 with python 3.8.5
if __name__ == '__main__':
from yolov5 import train
train.run()
In my case it was a simple bug in the code, using a variable before it was created. Worth checking that out before trying the above solutions. Why I got this particular error message, Lord knows.
The below solution should work for both python multiprocessing and pytorch multiprocessing.
As other answers mentioned that the fix is to have if __name__ == '__main__': but I faced several issues in identifying where to start because I am using several scripts and modules. When I can call my first function inside main then everything before it started to create multiple processes (not sure why).
Putting it at the very first line (even before the import) worked. Only calling the first function return timeout error. The below is the first file of my code and multiprocessing is used after calling several functions but putting main in the first seems to be the only fix here.
if __name__ == '__main__':
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.quadratic_baseline import QuadraticBaseline
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.npg_cg import NPG
from mjrl.algos.dapg import DAPG
from mjrl.algos.behavior_cloning import BC
from mjrl.utils.train_agent import train_agent
from mjrl.samplers.core import sample_paths
import os
import json
import mjrl.envs
import mj_envs
import time as timer
import pickle
import argparse
import numpy as np
# ===============================================================================
# Get command line arguments
# ===============================================================================
parser = argparse.ArgumentParser(description='Policy gradient algorithms with demonstration data.')
parser.add_argument('--output', type=str, required=True, help='location to store results')
parser.add_argument('--config', type=str, required=True, help='path to config file with exp params')
args = parser.parse_args()
JOB_DIR = args.output
if not os.path.exists(JOB_DIR):
os.mkdir(JOB_DIR)
with open(args.config, 'r') as f:
job_data = eval(f.read())
assert 'algorithm' in job_data.keys()
assert any([job_data['algorithm'] == a for a in ['NPG', 'BCRL', 'DAPG']])
job_data['lam_0'] = 0.0 if 'lam_0' not in job_data.keys() else job_data['lam_0']
job_data['lam_1'] = 0.0 if 'lam_1' not in job_data.keys() else job_data['lam_1']
EXP_FILE = JOB_DIR + '/job_config.json'
with open(EXP_FILE, 'w') as f:
json.dump(job_data, f, indent=4)
# ===============================================================================
# Train Loop
# ===============================================================================
e = GymEnv(job_data['env'])
policy = MLP(e.spec, hidden_sizes=job_data['policy_size'], seed=job_data['seed'])
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=job_data['vf_batch_size'],
epochs=job_data['vf_epochs'], learn_rate=job_data['vf_learn_rate'])
# Get demonstration data if necessary and behavior clone
if job_data['algorithm'] != 'NPG':
print("========================================")
print("Collecting expert demonstrations")
print("========================================")
demo_paths = pickle.load(open(job_data['demo_file'], 'rb'))
########################################################################################
demo_paths = demo_paths[0:3]
print (job_data['demo_file'], len(demo_paths))
for d in range(len(demo_paths)):
feats = demo_paths[d]['features']
feats = np.vstack(feats)
demo_paths[d]['observations'] = feats
########################################################################################
bc_agent = BC(demo_paths, policy=policy, epochs=job_data['bc_epochs'], batch_size=job_data['bc_batch_size'],
lr=job_data['bc_learn_rate'], loss_type='MSE', set_transforms=False)
in_shift, in_scale, out_shift, out_scale = bc_agent.compute_transformations()
bc_agent.set_transformations(in_shift, in_scale, out_shift, out_scale)
bc_agent.set_variance_with_data(out_scale)
ts = timer.time()
print("========================================")
print("Running BC with expert demonstrations")
print("========================================")
bc_agent.train()
print("========================================")
print("BC training complete !!!")
print("time taken = %f" % (timer.time() - ts))
print("========================================")
# if job_data['eval_rollouts'] >= 1:
# score = e.evaluate_policy(policy, num_episodes=job_data['eval_rollouts'], mean_action=True)
# print("Score with behavior cloning = %f" % score[0][0])
if job_data['algorithm'] != 'DAPG':
# We throw away the demo data when training from scratch or fine-tuning with RL without explicit augmentation
demo_paths = None
# ===============================================================================
# RL Loop
# ===============================================================================
rl_agent = DAPG(e, policy, baseline, demo_paths,
normalized_step_size=job_data['rl_step_size'],
lam_0=job_data['lam_0'], lam_1=job_data['lam_1'],
seed=job_data['seed'], save_logs=True
)
print("========================================")
print("Starting reinforcement learning phase")
print("========================================")
ts = timer.time()
train_agent(job_name=JOB_DIR,
agent=rl_agent,
seed=job_data['seed'],
niter=job_data['rl_num_iter'],
gamma=job_data['rl_gamma'],
gae_lambda=job_data['rl_gae'],
num_cpu=job_data['num_cpu'],
sample_mode='trajectories',
num_traj=job_data['rl_num_traj'],
num_samples= job_data['rl_num_samples'],
save_freq=job_data['save_freq'],
evaluation_rollouts=job_data['eval_rollouts'])
print("time taken = %f" % (timer.time()-ts))
I ran into the same problem. #ofter method is correct because there are some details to pay attention to. The following is the successful debugging code I modified for your reference:
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
def imgshow(img):
img = img / 2 + 0.5
np_img = img.numpy()
plt.imshow(np.transpose(np_img, (1, 2, 0)))
plt.show()
dataiter = iter(train_loader)
images, labels = dataiter.next()
imgshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[i]] for i in range(4)))
For the record, I don't have a subroutine, I just have a main program, but I have the same problem as you. This demonstrates that when importing a Python library file in the middle of a program segment, we should add:
if __name__ == '__main__':
I tried the tricks mentioned above on the following very simple code. but I still cannot stop it from resetting on any of my Window machines with Python 3.8/3.10. I would very much appreciate it if you could tell me where I am wrong.
print('script reset')
def do_something(inp):
print('Done!')
if __name__ == '__main__':
from multiprocessing import Process, get_start_method
print('main reset')
print(get_start_method())
Process(target=do_something, args=[1]).start()
print('Finished')
output displays:
script reset
main reset
spawn
Finished
script reset
Done!
Update:
As far as I understand, you guys are not preventing either the script containing the __main__ or the .start() from resetting (which doesn't happen in Linux), rather you are suggesting workarounds so that we don't see the reset. One has to make all imports minimal and put them in each function separately, but it is still, relative to Linux, slow.

All threads in my python application appear as "python3" [duplicate]

When I set the name for a Python thread, it doesn't show up on htop or ps. The ps output only shows python as the thread name. Is there any way to set a thread name so that it shows up on system reports like them?
from threading import Thread
import time
def sleeper():
while True:
time.sleep(10)
print "sleeping"
t = Thread(target=sleeper, name="Sleeper01")
t.start()
t.join()
ps -T -p {PID} output
PID SPID TTY TIME CMD
31420 31420 pts/30 00:00:00 python
31420 31421 pts/30 00:00:00 python
First install the prctl module. (On debian/ubuntu just type sudo apt-get install python-prctl)
from threading import Thread
import time
import prctl
def sleeper():
prctl.set_name("sleeping tiger")
while True:
time.sleep(10)
print "sleeping"
t = Thread(target=sleeper, name="Sleeper01")
t.start()
t.join()
This prints
$ ps -T
PID SPID TTY TIME CMD
22684 22684 pts/29 00:00:00 bash
23302 23302 pts/29 00:00:00 python
23302 23303 pts/29 00:00:00 sleeping tiger
23304 23304 pts/29 00:00:00 ps
Note: python3 users may wish to use pyprctl.
Prctl module is nice and provide many features, but depends libcap-dev package. Libcap2 is most likely installed because it is a dependency of many packages (systemd for example). So if you only need set thread name, use libcap2 over ctypes.
See improved Grief answer below.
LIB = 'libcap.so.2'
try:
libcap = ctypes.CDLL(LIB)
except OSError:
print(
'Library {} not found. Unable to set thread name.'.format(LIB)
)
else:
def _name_hack(self):
# PR_SET_NAME = 15
libcap.prctl(15, self.name.encode())
threading.Thread._bootstrap_original(self)
threading.Thread._bootstrap_original = threading.Thread._bootstrap
threading.Thread._bootstrap = _name_hack
On Python 2, I use the following monkey patch to propagate the Thread's name to the system if prctl is installed in the system:
try:
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread.__bootstrap_original__(self)
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
threading.Thread._Thread__bootstrap = _thread_name_hack
except ImportError:
log('WARN: prctl module is not installed. You will not be able to see thread names')
def set_thread_name(name): pass
After the execution of this code, you can set thread's name as usual:
threading.Thread(target=some_target, name='Change monitor', ...)
That means, that if you already set names for threads, you don't need to change anything. I cannot guarantee, that this is 100% safe, but it works for me.
I was confused after I found a tool--py-spy to show python thread while running.
install: pip3 install -i https://pypi.doubanio.com/simple/ py-spy
usage: py-spy dump --pid process-number
for example, py-spy dump --pid 1234 can show all the thread stacks,name,id of python process 1234
An alternative solution (actually a dirty one, since it sets the process name, not the thread name) is to use the setproctitle module from pypi.
You can install it with pip install setproctitle and use it as follow:
import setproctitle
import threading
import time
def a_loop():
setproctitle.setproctitle(threading.currentThread().name)
# you can otherwise explicitly declare the name:
# setproctitle.setproctitle("A loop")
while True:
print("Looping")
time.sleep(99)
t = threading.Thread(target=a_loop, name="ExampleLoopThread")
t.start()
https://pypi.org/project/namedthreads/ provides a way to patch threading.Thread.start to call pthread_setname_np with the Python Thread.name.
It is compatible with Python 2.7 & 3.4+ (I've tested it with 3.10)
To activate it,
import namedthreads
namedthreads.patch()
Note that thread names in Python are unlimited, but pthreads has a limit of 15 char, so the Python name will be trimmed.
I attempted to follow answers here to install python-prctl or pyprctl. However none of them could be installed because the need for a gcc that we don't have.
After some digging on the net, this python issue 15500 gave a nice solution [https://bugs.python.org/issue15500]. Here is what I've got based on it:
import ctypes, os, threading
def set_thread_name_np(the_name):
the_lib_path = "/lib/libpthread-2.42.so"
if not os.path.isfile(the_lib_path):
return None
try:
libpthread = ctypes.CDLL(the_lib_path)
except:
return None
if hasattr(libpthread, "pthread_setname_np"):
pthread_setname_np = libpthread.pthread_setname_np
pthread_setname_np.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
pthread_setname_np.restype = ctypes.c_int
if isinstance(the_name, str):
the_name = the_name.encode('ascii', 'replace')
if type(the_name) is not bytes:
return None
the_thread = threading.current_thread()
ident = getattr(the_thread, "ident", None)
if ident is not None:
pthread_setname_np(ident, the_name[:15])
return True
return None

Can't Import SimPy

First of all I searched my problem and didn't find any solution. I imported SimPy in my Python script as "import simpy" and I assured that SimPy installed on my system via pip but nevertheless it couldn't be imported. I also added the picture shows output of pip list and result of my try to run script.
import simpy
def car(env):
while True:
print('Start parking at %d' % env.now)
parking_duration = 5
yield env.timeout(parking_duration)
print('Start driving at %d' % env.now)
trip_duration = 2
yield env.timeout(trip_duration)
def main():
env = simpy.Environment()
env.process(car(env))
env.run(until=15)
if __name__ == '__main__':
main()
Terminal output:
Is it possible that you installed it for but run py2?
Try: python3 SimPy.py
You need to rename your script to something else. It shadows the simpy package.

Run multiple servers in python at same time (Threading)

I have 2 servers in python, I want to mix them up in one single .py and run together:
Server.py:
import logging, time, os, sys
from yowsup.layers import YowLayerEvent, YowParallelLayer
from yowsup.layers.auth import AuthError
from yowsup.layers.network import YowNetworkLayer
from yowsup.stacks.yowstack import YowStackBuilder
from layers.notifications.notification_layer import NotificationsLayer
from router import RouteLayer
class YowsupEchoStack(object):
def __init__(self, credentials):
"Creates the stacks of the Yowsup Server,"
self.credentials = credentials
stack_builder = YowStackBuilder().pushDefaultLayers(True)
stack_builder.push(YowParallelLayer([RouteLayer, NotificationsLayer]))
self.stack = stack_builder.build()
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
logging.info("#" * 50)
logging.info("\tServer started. Phone number: %s" % self.credentials[0])
logging.info("#" * 50)
self.stack.loop(timeout=0.5, discrete=0.5)
except AuthError as e:
logging.exception("Authentication Error: %s" % e.message)
if "<xml-not-well-formed>" in str(e):
os.execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
logging.exception("Unexpected Exception: %s" % e.message)
if __name__ == "__main__":
import sys
import config
logging.basicConfig(stream=sys.stdout, level=config.logging_level, format=config.log_format)
server = YowsupEchoStack(config.auth)
while True:
# In case of disconnect, keeps connecting...
server.start()
logging.info("Restarting..")
App.py:
import web
urls = (
'/', 'index'
)
app = web.application(urls, globals())
class index:
def GET(self):
greeting = "Hello World"
return greeting
if __name__ == "__main__":
app.run()
I want to run both together from single .py file together.
If I try to run them from one file, either of the both starts and other one starts only when first one is done working.
How can I run 2 servers in python together?
import thread
def run_app1():
#something goes here
def run_app2():
#something goes here
if __name__=='__main__':
thread.start_new_thread(run_app1)
thread.start_new_thread(run_app2)
if you need to pass args to the functions you can do:
thread.start_new_thread(run_app1, (arg1,arg2,....))
if you want more control in your threads you could go:
import threading
def app1():
#something here
def app2():
#something here
if __name__=='__main__':
t1 = threading.Thread(target=app1)
t2 = threading.Thread(target=app2)
t1.start()
t2.start()
if you need to pass args you can go:
t1 = threading.Thread(target=app1, args=(arg1,arg2,arg3.....))
What's the differences between thread vs threading? Threading is higher level module than thread and in 3.x thread got renamed to _thread... more info here: http://docs.python.org/library/threading.html but that's for another question I guess.
So in your case, just make a function that runs the first script, and the second script, and just spawn threads to run them.

Categories