Is there any way to use pyswip and Flask? - python

I have an python application whose interface is implemented in Flask and i have a module in backend that use pyswip library. The module works perfectly when i run it separately from the application. As i searched, it seems that pyswip is not thread safe.
I get this error on consult function:
swipl_fid = PL_open_foreign_frame()
OSError: exception: access violation reading 0x00000028
I could try to use another SWI-Prolog library, but in my application i need to consult and external .pl file.
Is there any way i could make it work?
Here's how i use the pyswip library:
from pyswip_alt import Prolog
class My_Prolog():
def __init__(self, query):
self.query = query.split()
self.query = ', '.join(self.query)
self.query = '['+self.query + ']'
self.documents_path = "my/path"
self.prolog = Prolog()
self.prolog.consult("facts.pl")
self.prolog_results = []
self.final_result = ''
def process(self):
for res in self.prolog.query("complex_phrase("+self.query+", F)."):
result = []
for atom in res['F']:
result.append(atom.value)
self.prolog_results.append(result)
def run(self):
self.process()
self.final_result = ' '.join(self.final_result)
return self.final_result
And that's the way i use the class:
nl = My_Prolog(query)
nl_query = nl.run()
and all of this is in a function that is run by Flask module.

Simply use a lock?
from multiprocessing import Lock
prologlock = Lock()
#app.route(...)
def handle_x():
with prologlock:
return MyProlog.handle_x()

Related

django celery beat arguments with imported class functions from another library

I'm trying to get a function to work in my django project with celerybeat that imports a class based function from a wrapper library. I've been reading that celery doesn't work with classes too easily. my function login_mb doesn't take an argument but when I try register and call this task I get an error Couldn't apply scheduled task login_mb: login_mb() takes 0 positional arguments but 1 was given
Is this because of self in the wrapper function imported?
What could I do to get this to work with celerybeat?
settings.py
CELERY_BEAT_SCHEDULE = {
'login_mb': {
'task': 'backend.tasks.login_mb',
'schedule': timedelta(minutes=30),
} ,
tasks.py
from matchbook.apiclient import APIClient
import logging
from celery import task
log = logging.getLogger(__name__)
#shared_task(bind=True)
def login_mb():
mb = APIClient('abc', '123')
mb.login()
mb.keep_alive()
apiclient.py (wrapper library)
from matchbook.baseclient import BaseClient
from matchbook import endpoints
class APIClient(BaseClient):
def __init__(self, username, password=None):
super(APIClient, self).__init__(username, password)
self.login = endpoints.Login(self)
self.keep_alive = endpoints.KeepAlive(self)
self.logout = endpoints.Logout(self)
self.betting = endpoints.Betting(self)
self.account = endpoints.Account(self)
self.market_data = endpoints.MarketData(self)
self.reference_data = endpoints.ReferenceData(self)
self.reporting = endpoints.Reporting(self)
def __repr__(self):
return '<APIClient [%s]>' % self.username
def __str__(self):
return 'APIClient'
The error is not related to your wrapper library, there seems to be nothing wrong with your task.
The problem arises because you've defined your task with bind=True When done so, celery automatillca injects a parameter to the method containing information about the current task. So you can either remove bind=True, or add a parameter to your task method like so:
#shared_task(bind=True)
def login_mb(self):
mb = APIClient('abc', '123')
mb.login()
mb.keep_alive()

mitmproxy load script using API (Python)

Good day,
I am trying to implement the mitmproxy into a bigger application.
For that, I need to be able to load those so called inline scripts in my code and not via command line. I could not find any helpful information about that in the documentation.
I am using mitmproxy version 0.17 and Python 2.7.
I know there is a newer version available, but that one didnt worked using the code examples.
This is the base code I have:
from mitmproxy import controller, proxy
from mitmproxy.proxy.server import ProxyServer
class ProxyMaster(controller.Master):
def __init__(self, server):
controller.Master.__init__(self, server)
def run(self):
try:
return controller.Master.run(self)
except KeyboardInterrupt:
self.shutdown()
def handle_request(self, flow):
flow.reply()
def handle_response(self, flow):
flow.reply()
config = proxy.ProxyConfig(port=8080)
server = ProxyServer(config)
m = ProxyMaster(server)
m.run()
How could I run this proxy using inline scripts?
Thanks in advance
I figured myself out a really ugly workaround.
Instead of using controller.Master I had to use flow.FlowMaster as the controller.Master lib does not seem to be able to handle inline scripts.
For some reason just loading the files did not work, they get triggered immediately, but not by running their matching hooks.
Instead of using the hooks which are not working, I am loading the matching functions as you can see in handle_response (try/except is missing and threading could be useful)
from mitmproxy import flow, proxy
from mitmproxy.proxy.server import ProxyServer
import imp
class ProxyMaster(flow.FlowMaster):
def run(self):
try:
return flow.FlowMaster.run(self)
except KeyboardInterrupt:
self.shutdown()
def handle_request(self, flow):
flow.reply()
def handle_response(self, flow):
for inline_script in self.scripts:
script_file = imp.load_source("response", inline_script.filename)
script_file.response(self, flow)
flow.reply()
proxy_config = proxy.ProxyConfig(port=8080)
server = ProxyServer(proxy_config)
state = flow.State()
m = ProxyMaster(server, state)
m.load_script("upsidedowninternet.py")
m.load_script("add_header.py")
m.run()
Any ideas about doing it the right way are appreciated.

What's the closest I can get to calling a Python function using a different Python version?

Say I have two files:
# spam.py
import library_Python3_only as l3
def spam(x,y)
return l3.bar(x).baz(y)
and
# beans.py
import library_Python2_only as l2
...
Now suppose I wish to call spam from within beans. It's not directly possible since both files depend on incompatible Python versions. Of course I can Popen a different python process, but how could I pass in the arguments and retrieve the results without too much stream-parsing pain?
Here is a complete example implementation using subprocess and pickle that I actually tested. Note that you need to use protocol version 2 explicitly for pickling on the Python 3 side (at least for the combo Python 3.5.2 and Python 2.7.3).
# py3bridge.py
import sys
import pickle
import importlib
import io
import traceback
import subprocess
class Py3Wrapper(object):
def __init__(self, mod_name, func_name):
self.mod_name = mod_name
self.func_name = func_name
def __call__(self, *args, **kwargs):
p = subprocess.Popen(['python3', '-m', 'py3bridge',
self.mod_name, self.func_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = p.communicate(pickle.dumps((args, kwargs)))
data = pickle.loads(stdout)
if data['success']:
return data['result']
else:
raise Exception(data['stacktrace'])
def main():
try:
target_module = sys.argv[1]
target_function = sys.argv[2]
args, kwargs = pickle.load(sys.stdin.buffer)
mod = importlib.import_module(target_module)
func = getattr(mod, target_function)
result = func(*args, **kwargs)
data = dict(success=True, result=result)
except Exception:
st = io.StringIO()
traceback.print_exc(file=st)
data = dict(success=False, stacktrace=st.getvalue())
pickle.dump(data, sys.stdout.buffer, 2)
if __name__ == '__main__':
main()
The Python 3 module (using the pathlib module for the showcase)
# spam.py
import pathlib
def listdir(p):
return [str(c) for c in pathlib.Path(p).iterdir()]
The Python 2 module using spam.listdir
# beans.py
import py3bridge
delegate = py3bridge.Py3Wrapper('spam', 'listdir')
py3result = delegate('.')
print py3result
Assuming the caller is Python3.5+, you have access to a nicer subprocess module. Perhaps you could user subprocess.run, and communicate via pickled Python objects sent through stdin and stdout, respectively. There would be some setup to do, but no parsing on your side, or mucking with strings etc.
Here's an example of Python2 code via subprocess.Popen
p = subprocess.Popen(python3_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate(pickle.dumps(python3_args))
result = pickle.load(stdout)
You could create a simple script as such :
import sys
import my_wrapped_module
import json
params = sys.argv
script = params.pop(0)
function = params.pop(0)
print(json.dumps(getattr(my_wrapped_module, function)(*params)))
You'll be able to call it like that :
pythonx.x wrapper.py myfunction param1 param2
This is obviously a security hazard though, be careful.
Also note that if your params are anything else than string or integers, you'll have some issues, so maybe think about transmitting params as a json string, and convert it using json.loads() in the wrapper.
It's possible to use the multiprocessing.managers module to achieve what you want. It does require a small amount of hacking though.
Given a module that has functions you want to expose then you need to create a Manager that can create proxies for those functions.
manager process that serves proxies to the py3 functions:
from multiprocessing.managers import BaseManager
import spam
class SpamManager(BaseManager):
pass
# Register a way of getting the spam module.
# You can use the exposed arg to control what is exposed.
# By default only "public" functions (without a leading underscore) are exposed,
# but can only ever expose functions or methods.
SpamManager.register("get_spam", callable=(lambda: spam), exposed=["add", "sub"])
# specifying the address as localhost means the manager is only visible to
# processes on this machine
manager = SpamManager(address=('localhost', 50000), authkey=b'abc',
serializer='xmlrpclib')
server = manager.get_server()
server.serve_forever()
I've redefined spam to contain two function called add and sub.
# spam.py
def add(x, y):
return x + y
def sub(x, y):
return x - y
client process that uses the py3 functions exposed by the SpamManager.
from __future__ import print_function
from multiprocessing.managers import BaseManager
class SpamManager(BaseManager):
pass
SpamManager.register("get_spam")
m = SpamManager(address=('localhost', 50000), authkey=b'abc',
serializer='xmlrpclib')
m.connect()
spam = m.get_spam()
print("1 + 2 = ", spam.add(1, 2)) # prints 1 + 2 = 3
print("1 - 2 = ", spam.sub(1, 2)) # prints 1 - 2 = -1
spam.__name__ # Attribute Error -- spam is a module, but its __name__ attribute
# is not exposed
Once set up, this form gives an easy way of accessing functions and values. It also allows these functions and values to be used them in a similar way that you might use them if they were not proxies. Finally, it allows you to set a password on the server process so that only authorised processes can access the manager. That the manager is long running, also means that a new process doesn't have to be started for each function call you make.
One limitation is that I've used the xmlrpclib module rather than pickle to send data back and forth between the server and the client. This is because python2 and python3 use different protocols for pickle. You could fix this by adding your own client to multiprocessing.managers.listener_client that uses an agreed upon protocol for pickling objects.

Maintain sessions with zerorpc

How do I maintain different sessions or local state with my zerorpc server?
For example (below), if I have a multiple clients, subsequent clients will overwrite the model state. I thought about each client having an ID, and the RPC logic will try to separate the variables that way, but tbis seems messy and how would I clear out old states/variables once the clients disconnect?
Server
import zerorpc
import FileLoader
class MyRPC(object):
def load(self, myFile):
self.model = FileLoader.load(myFile)
def getModelName(self):
return self.model.name
s = zerorpc.Server(MyRPC())
s.bind("tcp://0.0.0.0:4242")
s.run()
Client 1
import zerorpc
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.load("file1")
print c.getModelName()
Client 2
import zerorpc
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
c.load("file2") # AAAHH! The previously loaded model gets overwritten here!
print c.getModelName()
Not sure about sessions...but if you want to get back different models? Maybe you could just have once function that instantiates a new Model()?
import zerorpc
import FileLoader
models_dict ={} # Keep track of our models
def get_model(file):
if file in models_dict:
return models_dict[file]
models_dict[file] = MyModel(file)
return model
class MyModel(object):
def __init__(self, file):
if file:
self.load(file)
def load(self, myFile):
self.model = FileLoader.load(myFile)
def getModelName(self):
return self.model.name
s = zerorpc.Server(<mypackagename.mymodulename>) # Supply the name of current package/module
s.bind("tcp://0.0.0.0:4242")
s.run()
Client:
import zerorpc
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:4242")
print c.get_model("file1")

How are Python command line arguments related to methods?

Everyone at Class too big and hard to add new features is completely unphased by the question, which somehow connects command line options to methods, but I can find no documentation for this. It's not optparse, or argparse, or sys.argv - the question implies some kind of direct relationship between methods and command line options. What am I missing?
There isn't any set-in-stone link between them. The question you link to appears to be a program that can do one of several different things, with command-line arguments switching between them. These things happen to be implemented in the program using methods.
It is implied by the question that they have used something like argparse to write the glue between these; but the use of methods is just an implementation detail of the particular program.
I simply use the class like this, what seems not to be a very good idea, because it is very hard to maintain once u got many commands.
class myprogram(object):
def __init__(self)
self.prepare()
def prepare(self):
# some initializations
self.prepareCommands()
def prepareCommands(self):
self.initCommand("--updateDatabase", self.updateDatabase)
self.initCommand("--getImages", self.getImages)
# and so on
def initCommand(self, cmd, func):
options = sys.argv
for option in options:
if option.find(cmd)!=-1:
return func()
# my commands
def updateDatabase(self):
#...
def getImages(self):
#...
if __name__ == "__main__":
p = myprogram()
EDIT1:
Here a cleaner way I just implemented:
myprogram.py:
from config import * # has settings
from commands import *
from logsys import log
import filesys
class myprogram(object):
def __init__(self):
log(_class=self.__name__, _func='__init__', _level=0)
log(_class=self.__name__, _func='__init__', text="DEBUG LEVEL %s" % settings["debug"], _level=0)
self.settings = settings
self.cmds = commands
def prepare(self):
log(_class=self.__name__, _func='prepare', _level=1)
self.dirs = {}
for key in settings["dir"].keys():
self.dirs[key] = settings["dir"][key]
filesys.checkDir(self.dirs[key])
def initCommands(self):
log(_class=self.__name__, _func='initCommands', _level=1)
options = sys.argv
for option in options:
for cmd in self.cmds.keys():
if option.find(cmd) != -1:
return self.cmds[cmd]()
if __name__ == '__main__':
p = myprogram()
p.prepare()
p.initCommands()
commands.py:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
commands = {}
#csv
import csvsys
commands["--getCSV"] = csvsys.getCSV
#commands["--getCSVSplitted"] = csvsys.getCSVSplitted
# update & insert
import database
commands["--insertProductSpecification"] = database.insertProductSpecification
# download
import download
commands["--downloadProductSites"] = download.downloadProductSites
commands["--downloadImages"] = download.downloadImages
# parse
import parse
commands["--parseProductSites"] = parse.parseProductSites
EDIT2: I have now updated my question you linked to your question with a more complete example Class too big and hard to add new features

Categories