So I'm very noob in dealing with nose plugins.
I've been searching a lot but docs regarding nose plugins seem scarce.
I read and tried what's in the following links to try to write a simple nose plugin
and run it with nosetests, without success:
https://nose.readthedocs.org/en/latest/doc_tests/test_init_plugin/init_plugin.html
https://nose.readthedocs.org/en/latest/plugins/writing.html
I don't want to write my own test-runner or run the tests from any other script (via run(argv=argv, suite=suite(), ...)),
like they do in the first link.
I wrote a file myplugin.py with a class like this:
import os
from nose.plugins import Plugin
class MyCustomPlugin(Plugin):
name = 'myplugin'
def options(self, parser, env=os.environ):
parser.add_option('--custom-path', action='store',
dest='custom_path', default=None,
help='Specify path to widget config file')
def configure(self, options, conf):
if options.custom_path:
self.make_some_configs(options.custom_path)
self.enabled = True
def make_some_configs(self, path):
# do some stuff based on the given path
def begin(self):
print 'Maybe print some useful stuff...'
# do some more stuff
and added a setup.py like this:
try:
from setuptools import setup, find_packages
except ImportError:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='mypackage',
...
install_requires=['nose==1.3.0'],
py_modules=['myplugin'],
entry_points={
'nose.plugins.1.3.0': [
'myplugin = myplugin:MyCustomPlugin'
]
}
)
Both files are in the same directory.
Every time I run nosetests --custom-path [path], I get:
nosetests: error: no such option: --custom-path
From the links mentioned above, I thought that's all that was required to register and enable a custom plugin.
But it seems that, either I'm doing something really wrong, or nose's docs are outdated.
Can someone please point me the correct way to register and enable a plugin, that I can use with nosetests?
Thanks a lot!! :)
You don't want the nose version in entry_points in setup.py. Just use nose.plugins.0.10 as the docs say. The dotted version in the entry point name is not so much a nose version as a plugin API version.
Related
My project structure is as below. I couldn't find enough data from the twisted documentation
-rolling
->roll
->__init__.py
->rollserver.py
->twisted
->plugins
->roll_plugin.py
My roll_plugin.py looks like this-
from zope.interface import implementer
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from utils.annon_edro import Options, GPMSBridgeService
from roll..rollserver import RollFactory
#implementer(IServiceMaker, IPlugin)
class MyServiceMaker(object):
tapname = "roll"
description = 'Open the great door of roll for default'
options = Options
def makeService(self, options):
return internet.TCPServer(8999, RollFactory())
serviceMaker = MyServiceMaker()
I ran twistd --help I do not see the sub command there.
Whilst this code runs fine with Python2 but not with Python3.10.
What am I doing wrong here?
Is there any way we debug twistd code?
The directory path was missing in sys.path, adding it worked for me.
Is it allowed to group custom Django commands to separate folders inside the same Django app?
I have a lot of them and wanted to group them logically by purpose. Created folders but Django can't find them.
Maybe I'm trying to run them wrong. Tried:
python manage.py process_A_related_data
the same plus imported all commands in __init__.py
python manage.py folderA process_A_related_data
python manage.py folderA.process_A_related_data
python manage.py folderA/process_A_related_data
Got following error:
Unknown command: 'folderA/process_A_related_data'
Type 'manage.py help' for usage.
I think you can create a basic custom command which will run other commands from relevent folders. Here is an approach you can take:
First make a folder structure like this:
management/
commands/
folder_a/
process_A_related_data.py
folder_b/
process_A_related_data.py
process_data.py
Then inside process_data.py, update the command like this:
from django.core import management
from django.core.management.base import BaseCommand
import importlib
class Command(BaseCommand):
help = 'Folder Process Commands'
def add_arguments(self, parser):
parser.add_argument('-u', '--use', type=str, nargs='?', default='folder_a.process_A_related_data')
def handle(self, *args, **options):
try:
folder_file_module = options['use'] if options['use'].startswith('.') else '.' + options['use']
command = importlib.import_module(folder_file_module, package='your_app.management.commands')
management.call_command(command.Command())
except ModuleNotFoundError:
self.stderr.write(f"No relevent folder found: {e.name}")
Here I am using call_command method to call other managment commands.
Then run commands like this:
python manage.py process_data --use folder_a.process_A_related_data
Finally, if you want to run commands like python manage.py folder_a.process_A_related_data, then probably you need to change in manage.py. Like this:
import re
...
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
if re.search('folder_[a-z].*', sys.argv[-1]):
new_arguments = sys.argv[:-1] + ['process_data','--use', sys.argv[-1]]
execute_from_command_line(new_arguments)
else:
execute_from_command_line(sys.argv)
You should be able to partition the code by using mixins (I have not tried this in this context, though)
A standard management command looks like
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'FIXME A helpful comment goes here'
def add_arguments(self, parser):
parser.add_argument( 'name', ...)
# more argument definitions
def handle(self, *args, **options):
# do stuff
Which can probably be replaced by a "stub" in app/management/commands:
from wherever.commands import FooCommandMixin
from django.core.management.base import BaseCommand
class Command(FooCommandMixin, BaseCommand):
# autogenerated -- do not put any code in here!
pass
and in wherever/commands
class FooCommandMixin( object):
help = 'FIXME A helpful comment goes here'
def add_arguments(self, parser):
parser.add_argument( 'name', ...)
# more argument definitions
def handle(self, *args, **options):
# do the work
It would not be hard to write a script to go through a list of file names or paths (using glob.glob) using re.findall to identify appropriate class declarations, and to (re)generate a matching stub for each in the app's management/commands folder.
Also/instead Python's argparse allows for the definition of sub-commands. So you should be able to define a command that works like
./manage.py foo bar --aa --bb something --cc and
./manage.py foo baz --bazzy a b c
where the syntax after foo is determined by the next word (bar or baz or ...). Again I have no experience of using subcommands in this context.
I found no mention of support for this feature in the release notes. It looks to be that this is still not supported as of version Django 3.0. I would suggest that you use meaningful names for your files that help you specify. You could always come up w/ a naming convention!
A workaround could be: create a specific Django "satellite" app for each group of management commands.
In recent version of Django, the requirements for a Python module to be an app are minimal: you won't need to provide any fake models.py or other specific files as happened in the old days.
While far from perfect from a stylistic point of view, you still gain a few advantages:
no need to hack the framework at all
python manage.py will list the commands grouped by app
you can control the grouping by providing suitable names to the apps
you can use these satellite apps as container for specific unit tests
I always try to avoid fighting against the framework, even when this means to compromise, and sometimes accept it's occasional design limitations.
Given that my library with foobar.py is setup as such:
\foobar.py
\foobar
\__init__.py
\setup.py
Hierarchy of CLI in the console script:
foobar.py
\cli
\foo
\kungfu
\kungpow
\bar
\blacksheep
\haveyouanywool
[code]:
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
#click.group()
#click.version_option()
def cli():
pass
#cli.group(context_settings=CONTEXT_SETTINGS)
def foo():
pass
#cli.group(context_settings=CONTEXT_SETTINGS)
def bar():
pass
#foo.command('kungfu')
def kungfu():
print('bruise lee')
#foo.command('kungpow')
def kungpow():
print('chosen one')
#bar.command('blacksheep')
def blacksheep():
print('bah bah blacksheep')
#bar.command('haveyouanywool')
def haveyouanywool():
print('have you any wool?')
How should I set my entry in setup.py?
There are many examples but they only show a single command for a single entry point, e.g. Entry Points in setup.py
But is it even possible to setup the console script with how the my foobar.py click script is structured?
If not, how should I restructure the commands in foobar.py?
For context, I have this script for the sacremoses library: https://github.com/alvations/sacremoses/blob/cli/sacremoses.py
But I couldn't figure how to configure the setup.py to install the sacremoses.py script properly: https://github.com/alvations/sacremoses/blob/cli/setup.py
To make the entry points work in your example you need:
entry_points='''
[console_scripts]
command_line_name=foobar:cli
''',
What you are missing is an understanding of the meaning of:
command_line_name=foobar:cli
[console_scripts]
There are three things in command_line_name=foobar:cli:
Name of the script from the command line (command_line_name)
Module where the click command handler is located (foobar)
Name of the click command/group in that module (cli)
setup.py
For your github example, I would suggest:
from distutils.core import setup
import setuptools
console_scripts = """
[console_scripts]
sacremoses=sacremoses.cli:cli
"""
setup(
name='sacremoses',
packages=['sacremoses'],
version='0.0.7',
description='SacreMoses',
long_description='LGPL MosesTokenizer in Python',
author='',
license='',
package_data={'sacremoses': [
'data/perluniprops/*.txt',
'data/nonbreaking_prefixes/nonbreaking_prefix.*'
]},
url='https://github.com/alvations/sacremoses',
keywords=[],
classifiers=[],
install_requires=['six', 'click', 'joblib', 'tqdm'],
entry_points=console_scripts,
)
Command Handler
In the referenced branch of your github repo, there is NO cli.py file. The [code] from your question needs to be saved in sacremoses/cli.py, and then combined with the suggested changes to your setup.py, everything should work fine.
I'm trying to configure setuptools and Click module for multiple functions.
Click documentation instructs in Nesting Commands section to use click.group().
How do you write the entry_points for multiple CLick CLI functions?
I was toying with they syntax, and I managed to get something working but I can't recreate it. I was something like this,
entry_points='''
[console_scripts]
somefunc=yourscript:somefunc
morefunc=yourscript:morefunc
'''
Following the sample given below, I converted the syntax to a dictionary:
entry_points= {'console_scripts':
['somefunc = yourscript:somefunc',
'morefunc = yourscript:morefunc'
]},
After I reinstalled, calling the script raised this error:
(clickenv) > somefunc
Traceback (most recent call last):
[...]
raise TypeError('Attempted to convert a callback into a '
TypeError: Attempted to convert a callback into a command twice.
The way I made this work the first time, was I installed the script, and then gradually changed the code through the various examples. At one point, just as described in the docs, I called the script with $ yourscript somefunc. However, when I recreated the pattern in my project I got that error.
Here I've uninstalled and reinstalled (even though its advertised as unnecessary, pip install -e .) and removed the second entrypoint. Here's my testing example. The function morefunc requires a .txt input file.
# yourscript.py
import click
#click.command()
#click.group()
def cli():
pass
#cli.command()
def somefunc():
click.echo('Hello World!')
#cli.command()
#click.argument('input', type=click.File('rb'))
#click.option('--saveas', default='HelloWorld.txt', type=click.File('wb'))
def morefunc(input, saveas):
while True:
chunk = input.read(1024)
if not chunk:
break
saveas.write(chunk)
# setup.py
from setuptools import setup
setup(
name='ClickCLITest',
version='0.1',
py_modules=['yourscript'],
install_requires=[
'Click',
],
entry_points= {'console_scripts':
['somefunc = yourscript:somefunc']},
)
https://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation
setup(
…
entry_points={
'console_scripts': [
'somefunc=yourscript:somefunc',
'morefunc=yourscript:morefunc',
],
},
…
)
We use py2app extensively at our facility to produce self contained .app packages for easy internal deployment without dependency issues. Something I noticed recently, and have no idea how it began, is that when building an .app, py2app started including the .git directory of our main library.
commonLib, for instance, is our root python library package, which is a git repo. Under this package are the various subpackages such as database, utility, etc.
commonLib/
|- .git/ # because commonLib is a git repo
|- __init__.py
|- database/
|- __init__.py
|- utility/
|- __init__.py
# ... etc
In a given project, say Foo, we will do imports like from commonLib import xyz to use our common packages. Building via py2app looks something like: python setup.py py2app
So the recent issue I am seeing is that when building an app for project Foo, I will see it include everything in commonLib/.git/ into the app, which is extra bloat. py2app has an excludes option but that only seems to be for python modules. I cant quite figure out what it would take to exclude the .git subdir, or in fact, what is causing it to be included in the first place.
Has anyone experienced this when using a python package import that is a git repo?
Nothing has changed in our setup.py files for each project, and commonLib has always been a git repo. So the only thing I can think of being a variable is the version of py2app and its deps which have obviously been upgraded over time.
Edit
I'm using the latest py2app 0.6.4 as of right now. Also, my setup.py was first generated from py2applet a while back, but has been hand configured since and copied over as a template for every new project. I am using PyQt4/sip for every single one of these projects, so it also makes me wonder if its an issue with one of the recipes?
Update
From the first answer, I tried to fix this using various combinations of exclude_package_data settings. Nothing seems to force the .git directory to become excluded. Here is a sample of what my setup.py files generally look like:
from setuptools import setup
from myApp import VERSION
appname = 'MyApp'
APP = ['myApp.py']
DATA_FILES = []
OPTIONS = {
'includes': 'atexit, sip, PyQt4.QtCore, PyQt4.QtGui',
'strip': True,
'iconfile':'ui/myApp.icns',
'resources':['src/myApp.png'],
'plist':{
'CFBundleIconFile':'ui/myApp.icns',
'CFBundleIdentifier':'com.company.myApp',
'CFBundleGetInfoString': appname,
'CFBundleVersion' : VERSION,
'CFBundleShortVersionString' : VERSION
}
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
I have tried things like:
setup(
...
exclude_package_data = { 'commonLib': ['.git'] },
#exclude_package_data = { '': ['.git'] },
#exclude_package_data = { 'commonLib/.git/': ['*'] },
#exclude_package_data = { '.git': ['*'] },
...
)
Update #2
I have posted my own answer which does a monkeypatch on distutils. Its ugly and not preferred, but until someone can offer me a better solution, I guess this is what I have.
I am adding an answer to my own question, to document the only thing I have found to work thus far. My approach was to monkeypatch distutils to ignore certain patterns when creating a directory or copying a file. This is really not what I wanted to do, but like I said, its the only thing that works so far.
## setup.py ##
import re
# file_util has to come first because dir_util uses it
from distutils import file_util, dir_util
def wrapper(fn):
def wrapped(src, *args, **kwargs):
if not re.search(r'/\.git/?', src):
fn(src, *args, **kwargs)
return wrapped
file_util.copy_file = wrapper(file_util.copy_file)
dir_util.mkpath = wrapper(dir_util.mkpath)
# now import setuptools so it uses the monkeypatched methods
from setuptools import setup
Hopefully someone will comment on this and tell me a higher level approach to avoid doing this. But as of now, I will probably wrap this into a utility method like exclude_data_patterns(re_pattern) to be reused in my projects.
I can see two options for excluding the .git directory.
Build the application from a 'clean' checkout of the code. When deploying a new version, we always build from a fresh svn export based on a tag to ensure we don't pick up spurious changes/files. You could try the equivalent here - although the git equivalent seems somewhat more involved.
Modify the setup.py file to massage the files included in the application. This might be done using the exclude_package_data functionality as described in the docs, or build the list of data_files and pass it to setup.
As for why it has suddenly started happening, knowing the version of py2app you are using might help, as will knowing the contents of your setup.py and perhaps how this was made (by hand or using py2applet).
I have a similar experience with Pyinstaller, so I'm not sure it applies directly.
Pyinstaller creates a "manifest" of all files to be included in the distribution, before running the export process. You could "massage" this manifest, as per Mark's second suggestion, to exclude any files you want. Including anything within .git or .git itself.
In the end, I stuck with checking out my code before producing a binary as there was more than just .git being bloat (such as UML documents and raw resource files for Qt). A checkout guaranteed a clean result and I experienced no issues automating that process along with the process of creating the installer for the binary.
There is a good answer to this, but I have a more elaborate answer to solve the problem mentioned here with a white-list approach. To have the monkey patch also work for packages outside site-packages.zip I had to monkey patch also copy_tree (because it imports copy_file inside its function), this helps in making a standalone application.
In addition, I create a white-list recipe to mark certain packages zip-unsafe. The approach makes it easy to add filters other than white-list.
import pkgutil
from os.path import join, dirname, realpath
from distutils import log
# file_util has to come first because dir_util uses it
from distutils import file_util, dir_util
# noinspection PyUnresolvedReferences
from py2app import util
def keep_only_filter(base_mod, sub_mods):
prefix = join(realpath(dirname(base_mod.filename)), '')
all_prefix = [join(prefix, sm) for sm in sub_mods]
log.info("Set filter for prefix %s" % prefix)
def wrapped(mod):
name = getattr(mod, 'filename', None)
if name is None:
# ignore anything that does not have file name
return True
name = join(realpath(dirname(name)), '')
if not name.startswith(prefix):
# ignore those that are not in this prefix
return True
for p in all_prefix:
if name.startswith(p):
return True
# log.info('ignoring %s' % name)
return False
return wrapped
# define all the filters we need
all_filts = {
'mypackage': (keep_only_filter, [
'subpackage1', 'subpackage2',
]),
}
def keep_only_wrapper(fn, is_dir=False):
filts = [(f, k[1]) for (f, k) in all_filts.iteritems()
if k[0] == keep_only_filter]
prefixes = {}
for f, sms in filts:
pkg = pkgutil.get_loader(f)
assert pkg, '{f} package not found'.format(f=f)
p = join(pkg.filename, '')
sp = [join(p, sm, '') for sm in sms]
prefixes[p] = sp
def wrapped(src, *args, **kwargs):
name = src
if not is_dir:
name = dirname(src)
name = join(realpath(name), '')
keep = True
for prefix, sub_prefixes in prefixes.iteritems():
if name == prefix:
# let the root pass
continue
# if it is a package we have a filter for
if name.startswith(prefix):
keep = False
for sub_prefix in sub_prefixes:
if name.startswith(sub_prefix):
keep = True
break
if keep:
return fn(src, *args, **kwargs)
return []
return wrapped
file_util.copy_file = keep_only_wrapper(file_util.copy_file)
dir_util.mkpath = keep_only_wrapper(dir_util.mkpath, is_dir=True)
util.copy_tree = keep_only_wrapper(util.copy_tree, is_dir=True)
class ZipUnsafe(object):
def __init__(self, _module, _filt):
self.module = _module
self.filt = _filt
def check(self, dist, mf):
m = mf.findNode(self.module)
if m is None:
return None
# Do not put this package in site-packages.zip
if self.filt:
return dict(
packages=[self.module],
filters=[self.filt[0](m, self.filt[1])],
)
return dict(
packages=[self.module]
)
# Any package that is zip-unsafe (uses __file__ ,... ) should be added here
# noinspection PyUnresolvedReferences
import py2app.recipes
for module in [
'sklearn', 'mypackage',
]:
filt = all_filts.get(module)
setattr(py2app.recipes, module, ZipUnsafe(module, filt))