Get the current git hash in a Python script - python

I would like to include the current git hash in the output of a Python script (as a the version number of the code that generated that output).
How can I access the current git hash in my Python script?

No need to hack around getting data from the git command yourself. GitPython is a very nice way to do this and a lot of other git stuff. It even has "best effort" support for Windows.
After pip install gitpython you can do
import git
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
Something to consider when using this library. The following is taken from gitpython.readthedocs.io
Leakage of System Resources
GitPython is not suited for long-running processes (like daemons) as it tends to leak system resources. It was written in a time where destructors (as implemented in the __del__ method) still ran deterministically.
In case you still want to use it in such a context, you will want to search the codebase for __del__ implementations and call these yourself when you see fit.
Another way assure proper cleanup of resources is to factor out GitPython into a separate process which can be dropped periodically

This post contains the command, Greg's answer contains the subprocess command.
import subprocess
def get_git_revision_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
def get_git_revision_short_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()
when running
print(get_git_revision_hash())
print(get_git_revision_short_hash())
you get output:
fd1cd173fc834f62fa7db3034efc5b8e0f3b43fe
fd1cd17

The git describe command is a good way of creating a human-presentable "version number" of the code. From the examples in the documentation:
With something like git.git current tree, I get:
[torvalds#g5 git]$ git describe parent
v1.0.4-14-g2414721
i.e. the current head of my "parent" branch is based on v1.0.4, but since it has a few commits on top of that, describe has added the number of additional commits ("14") and an abbreviated object name for the commit itself ("2414721") at the end.
From within Python, you can do something like the following:
import subprocess
label = subprocess.check_output(["git", "describe"]).strip()

Here's a more complete version of Greg's answer:
import subprocess
print(subprocess.check_output(["git", "describe", "--always"]).strip().decode())
Or, if the script is being called from outside the repo:
import subprocess, os
print(subprocess.check_output(["git", "describe", "--always"], cwd=os.path.dirname(os.path.abspath(__file__))).strip().decode())
Or, if the script is being called from outside the repo and you like pathlib:
import subprocess
from pathlib import Path
print(subprocess.check_output(["git", "describe", "--always"], cwd=Path(__file__).resolve().parent).strip().decode())

numpy has a nice looking multi-platform routine in its setup.py:
import os
import subprocess
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION

If subprocess isn't portable and you don't want to install a package to do something this simple you can also do this.
import pathlib
def get_git_revision(base_path):
git_dir = pathlib.Path(base_path) / '.git'
with (git_dir / 'HEAD').open('r') as head:
ref = head.readline().split(' ')[-1].strip()
with (git_dir / ref).open('r') as git_hash:
return git_hash.readline().strip()
I've only tested this on my repos but it seems to work pretty consistantly.

This is an improvement of Yuji 'Tomita' Tomita answer.
import subprocess
def get_git_revision_hash():
full_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
full_hash = str(full_hash, "utf-8").strip()
return full_hash
def get_git_revision_short_hash():
short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
short_hash = str(short_hash, "utf-8").strip()
return short_hash
print(get_git_revision_hash())
print(get_git_revision_short_hash())

if you want a bit more data than the hash, you can use git-log:
import subprocess
def get_git_hash():
return subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:%H']).strip()
def get_git_short_hash():
return subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:%h']).strip()
def get_git_short_hash_and_commit_date():
return subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:%h-%ad', '--date=short']).strip()
for full list of formating options - check out git log --help

I ran across this problem and solved it by implementing this function.
https://gist.github.com/NaelsonDouglas/9bc3bfa26deec7827cb87816cad88d59
from pathlib import Path
def get_commit(repo_path):
git_folder = Path(repo_path,'.git')
head_name = Path(git_folder, 'HEAD').read_text().split('\n')[0].split(' ')[-1]
head_ref = Path(git_folder,head_name)
commit = head_ref.read_text().replace('\n','')
return commit
r = get_commit('PATH OF YOUR CLONED REPOSITORY')
print(r)

I had a problem similar to the OP, but in my case I'm delivering the source code to my client as a zip file and, although I know they will have python installed, I cannot assume they will have git. Since the OP didn't specify his operating system and if he has git installed, I think I can contribute here.
To get only the hash of the commit, Naelson Douglas's answer was perfect, but to have the tag name, I'm using the dulwich python package. It's a simplified git client in python.
After installing the package with pip install dulwich --global-option="--pure" one can do:
from dulwich import porcelain
def get_git_revision(base_path):
return porcelain.describe(base_path)
r = get_git_revision("PATH OF YOUR REPOSITORY's ROOT FOLDER")
print(r)
I've just run this code in one repository here and it showed the output v0.1.2-1-gfb41223, similar to what is returned by git describe, meaning that I'm 1 commit after the tag v0.1.2 and the 7-digit hash of the commit is fb41223.
It has some limitations: currently it doesn't have an option to show if a repository is dirty and it always shows a 7-digit hash, but there's no need to have git installed, so one can choose the trade-off.
Edit: in case of errors in the command pip install due to the option --pure (the issue is explained here), pick one of the two possible solutions:
Install Dulwich package's dependencies first:
pip install urllib3 certifi && pip install dulwich --global-option="--pure"
Install without the option pure: pip install dulwich. This will install some platform dependent files in your system, but it will improve the package's performance.

If you don't have Git available for some reason, but you have the git repo (.git folder is found), you can fetch the commit hash from .git/fetch/heads/[branch].
For example, I've used a following quick-and-dirty Python snippet run at the repository root to get the commit id:
git_head = '.git\\HEAD'
# Open .git\HEAD file:
with open(git_head, 'r') as git_head_file:
# Contains e.g. ref: ref/heads/master if on "master"
git_head_data = str(git_head_file.read())
# Open the correct file in .git\ref\heads\[branch]
git_head_ref = '.git\\%s' % git_head_data.split(' ')[1].replace('/', '\\').strip()
# Get the commit hash ([:7] used to get "--short")
with open(git_head_ref, 'r') as git_head_ref_file:
commit_id = git_head_ref_file.read().strip()[:7]

If you are like me :
Multiplatform so subprocess may crash one day
Using Python 2.7 so GitPython not available
Don't want to use Numpy just for that
Already using Sentry (old depreciated version : raven)
Then (this will not work on shell because shell doesn't detect current file path, replace BASE_DIR by your current file path) :
import os
import raven
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(raven.fetch_git_sha(BASE_DIR))
That's it.
I was looking for another solution because I wanted to migrate to sentry_sdk and leave raven but maybe some of you want to continue using raven for a while.
Here was the discussion that get me into this stackoverflow issue
So using the code of raven without raven is also possible (see discussion) :
from __future__ import absolute_import
import os.path
__all__ = 'fetch_git_sha'
def fetch_git_sha(path, head=None):
"""
>>> fetch_git_sha(os.path.dirname(__file__))
"""
if not head:
head_path = os.path.join(path, '.git', 'HEAD')
with open(head_path, 'r') as fp:
head = fp.read().strip()
if head.startswith('ref: '):
head = head[5:]
revision_file = os.path.join(
path, '.git', *head.split('/')
)
else:
return head
else:
revision_file = os.path.join(path, '.git', 'refs', 'heads', head)
if not os.path.exists(revision_file):
# Check for Raven .git/packed-refs' file since a `git gc` may have run
# https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery
packed_file = os.path.join(path, '.git', 'packed-refs')
if os.path.exists(packed_file):
with open(packed_file) as fh:
for line in fh:
line = line.rstrip()
if line and line[:1] not in ('#', '^'):
try:
revision, ref = line.split(' ', 1)
except ValueError:
continue
if ref == head:
return revision
with open(revision_file) as fh:
return fh.read().strip()
I named this file versioning.py and I import "fetch_git_sha" where I need it passing file path as argument.
Hope it will help some of you ;)

Related

Linux package management with Python

I am a complete Python beginner and try to write a Python script to automate the setup of a SDK on Linux machines from remote Github repositories.
The script starts by performing some basic preliminary operations, especially the check/setup of several packages (git, docker, pip, etc.).
For now, I target Debian (Stretch, Buster), Centos (6, 7) and Ubuntu Server 18.04LTS.
Of course, I want the script to run on the widest range of linux machines.
Today I rely on available package managers (apt-get and yum), roughly requested through subprocess.call() statements.
I customize the related commands using nasty script configuration variables like below :
import platform
distribution = platform.dist()[0]
version = platform.dist()[1]
if distribution == 'debian':
pkgInstaller = 'dpkg'
pkmManager = 'apt-get'
checkIfInstalled = '-s'
installPackage = 'install'
yesToAll = '-y'
dockerPackage = 'docker-ce'
elif distribution == 'centos':
pkgInstaller = 'rpm'
pkgManager = 'yum'
checkIfInstalled = '-q'
installPackage = 'install'
yesToAll = '-y'
dockerPackage = 'docker'
I then simply loop on an array containing the names of packages to be installed, then run the command through subprocess.call() :
prerequisites = ['git', dockerPackage, 'doxygen', 'python2-pip']
for pkg in prerequisites:
pgkInstallation = subprocess.call(['sudo', pkgManager, yesToAll, installPackage, pkg])
While this approach may have the benefit of not having too much bonding to third-party Python modules, I guess there are... some smarter ways of doing such simple operation ?
Usually when doing switch statements like this, a dictionary might be a bit more useful. Also, normally I'm not one to try to PEP-8 things, but this is an instance where PEP-8 might really help your readability by not matching up your equals signs for all of your lines of code.
The dict will hold your distro as the key, and your vars as a value wrapped in a tuple
options = {
'debian': ('dpkg', 'apt-get', '-s', 'install', '-y', 'docker-ce'),
'centos': ('rpm', 'yum', '-q', 'install', '-y', 'docker'),
}
# unpack this function call here
distribution, version, *_ = platform.dist()
# now get the match
pkg_installer, pkg_manager, check, install_pkg, yes_to_all, docker = options[distribution]
requisites = ['git', docker, 'doxygen', 'python2-pip']
for pkg in requisites:
pgkInstallation = subprocess.call(['sudo', pkg_manager, yes_to_all, install_pkg, pkg])
The options[distribution] call will raise a KeyError for unsupported distributions, so you can probably catch that and raise something a bit more useful like:
try:
pkg_installer, pkg_manager, check, install_pkg, yes_to_all, docker = options[distribution]
except KeyError as e:
raise ValueError(f"Got unsupported OS, expected one of {', '.join(options.keys())}") from e
To make it less verbose, the only var you use out of order is docker, so you can house all of the others in a single var:
try:
*args, docker = options[distribution]
except KeyError as e:
raise ValueError(f"Got unsupported OS, expected one of {', '.join(options.keys())}") from e
requisites = ['git', docker, 'doxygen', 'python2-pip']
for pkg in requisites:
pgkInstallation = subprocess.call(['sudo', *args, pkg])

GitPython with remotes

I'm trying to solve an easy task with git in python. Actually I just want something like this
git clome remote-repo
git pull
edit file
git commit file
git push
rm -rf remote-repo
So just change a file and commit it and push it right away. But I have my problems get my head arround the documentation http://gitpython.readthedocs.org/en/stable/tutorial.html
This is what I got so far
import git
import shutil
import os
repo_url = 'git#git.internal.server:users/me/mytest'
repo_dir = '/tmp/test/repo'
work_file_name = 'testFile'
work_file = os.path.join(repo_dir, work_file_name)
if os.path.isdir(repo_dir):
shutil.rmtree(repo_dir)
repo = git.Repo.clone_from(repo_url, repo_dir)
git = repo.git
git.pull()
new_file_path = os.path.join(repo.working_tree_dir, work_file_name)
f = open(new_file_path, 'w')
f.write("just some test")
f.close()
repo.index.commit("test commit")
git.push()
shutil.rmtree(repo_dir)
It actually creates a commit and pushes it to the server, but it contains no changes, so the file I tried to write is empty. Any idea what I'm missing?
You have to stage the file/change before the commit:
repo.index.add([new_file_path])
As a side note: The following line overrides your import and might lead to errors if you ever expand your script:
git = repo.git

Reference requirements.txt for the install_requires kwarg in setuptools setup.py file

I have a requirements.txt file that I'm using with Travis-CI. It seems silly to duplicate the requirements in both requirements.txt and setup.py, so I was hoping to pass a file handle to the install_requires kwarg in setuptools.setup.
Is this possible? If so, how should I go about doing it?
Here is my requirements.txt file:
guessit>=0.5.2
tvdb_api>=1.8.2
hachoir-metadata>=1.3.3
hachoir-core>=1.3.3
hachoir-parser>=1.3.4
On the face of it, it does seem that requirements.txt and setup.py are silly duplicates, but it's important to understand that while the form is similar, the intended function is very different.
The goal of a package author, when specifying dependencies, is to say "wherever you install this package, these are the other packages you need, in order for this package to work."
In contrast, the deployment author (which may be the same person at a different time) has a different job, in that they say "here's the list of packages that we've gathered together and tested and that I now need to install".
The package author writes for a wide variety of scenarios, because they're putting their work out there to be used in ways they may not know about, and have no way of knowing what packages will be installed alongside their package. In order to be a good neighbor and avoid dependency version conflicts with other packages, they need to specify as wide a range of dependency versions as can possibly work. This is what install_requires in setup.py does.
The deployment author writes for a very different, very specific goal: a single instance of an installed application or service, installed on a particular computer. In order to precisely control a deployment, and be sure that the right packages are tested and deployed, the deployment author must specify the exact version and source-location of every package to be installed, including dependencies and dependency's dependencies. With this spec, a deployment can be repeatably applied to several machines, or tested on a test machine, and the deployment author can be confident that the same packages are deployed every time. This is what a requirements.txt does.
So you can see that, while they both look like a big list of packages and versions, these two things have very different jobs. And it's definitely easy to mix this up and get it wrong! But the right way to think about this is that requirements.txt is an "answer" to the "question" posed by the requirements in all the various setup.py package files. Rather than write it by hand, it's often generated by telling pip to look at all the setup.py files in a set of desired packages, find a set of packages that it thinks fits all the requirements, and then, after they're installed, "freeze" that list of packages into a text file (this is where the pip freeze name comes from).
So the takeaway:
setup.py should declare the loosest possible dependency versions that are still workable. Its job is to say what a particular package can work with.
requirements.txt is a deployment manifest that defines an entire installation job, and shouldn't be thought of as tied to any one package. Its job is to declare an exhaustive list of all the necessary packages to make a deployment work.
Because these two things have such different content and reasons for existing, it's not feasible to simply copy one into the other.
References:
install_requires vs Requirements files from the Python packaging user guide.
You can flip it around and list the dependencies in setup.py and have a single character — a dot . — in requirements.txt instead.
Alternatively, even if not advised, it is still possible to parse the requirements.txt file (if it doesn't refer any external requirements by URL) with the following hack (tested with pip 9.0.1):
install_reqs = parse_requirements('requirements.txt', session='hack')
This doesn't filter environment markers though.
In old versions of pip, more specifically older than 6.0, there is a public API that can be used to achieve this. A requirement file can contain comments (#) and can include some other files (--requirement or -r). Thus, if you really want to parse a requirements.txt you can use the pip parser:
from pip.req import parse_requirements
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(<requirements_path>)
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
setup(
...
install_requires=reqs
)
It can't take a file handle. The install_requires argument can only be a string or a list of strings.
You can, of course, read your file in the setup script and pass it as a list of strings to install_requires.
import os
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(...
install_requires=required,
...)
Requirements files use an expanded pip format, which is only useful if you need to complement your setup.py with stronger constraints, for example specifying the exact urls some of the dependencies must come from, or the output of pip freeze to freeze the entire package set to known-working versions. If you don't need the extra constraints, use only a setup.py. If you feel like you really need to ship a requirements.txt anyway, you can make it a single line:
.
It will be valid and refer exactly to the contents of the setup.py that is in the same directory.
While not an exact answer to the question, I recommend Donald Stufft's blog post at https://caremad.io/2013/07/setup-vs-requirement/ for a good take on this problem. I've been using it to great success.
In short, requirements.txt is not a setup.py alternative, but a deployment complement. Keep an appropriate abstraction of package dependencies in setup.py. Set requirements.txt or more of 'em to fetch specific versions of package dependencies for development, testing, or production.
E.g. with packages included in the repo under deps/:
# fetch specific dependencies
--no-index
--find-links deps/
# install package
# NOTE: -e . for editable mode
.
pip executes package's setup.py and installs the specific versions of dependencies declared in install_requires. There's no duplicity and the purpose of both artifacts is preserved.
Using parse_requirements is problematic because the pip API isn't publicly documented and supported. In pip 1.6, that function is actually moving, so existing uses of it are likely to break.
A more reliable way to eliminate duplication between setup.py and requirements.txt is to specific your dependencies in setup.py and then put -e . into your requirements.txt file. Some information from one of the pip developers about why that's a better way to go is available here: https://caremad.io/blog/setup-vs-requirement/
First, I believe parsing requirements.txt to fill the list of dependencies in package metadata is not a good idea. The requirements.txt file and the list of "install dependencies" are two different concepts, they are not interchangeable. It should be the other way around, the list of dependencies in package metadata should be considered as some kind of source of truth, and files such as requirements.txt should be generated from there. For example with a tool such as pip-compile. See the notes at the bottom of this answer.
But everyone has different needs, that lead to different workflows. So with that said... There are 3 possibilities to handle this, depending on where you want your project's package metadata to be written: pyproject.toml, setup.cfg, or setup.py.
Words of caution!
If you insist on having the list of dependencies in package metadata be read from a requirements.txt file then make sure that this requirements.txt file is included in the "source distribution" (sdist) otherwise installation will fail, for obvious reasons.
These techniques will work only for simple requirements.txt files. See Requirements parsing in the documentation page for pkg_resources to get details about what is handled. In short, each line should be a valid PEP 508 requirement. Notations that are really specific to pip are not supported and it will cause a failure.
pyproject.toml
[project]
# ...
dynamic = ["dependencies"]
[tool.setuptools.dynamic]
# ...
dependencies = requirements.txt
setup.cfg
Since setuptools version 62.6 it is possible to write something like this in setup.cfg:
[options]
install_requires = file: requirements.txt
setup.py
It is possible to parse a relatively simple requirements.txt file from a setuptools setup.py script without pip. The setuptools project already contains necessary tools in its top level package pkg_resources.
It could more or less look like this:
#!/usr/bin/env python
import pathlib
import pkg_resources
import setuptools
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
setuptools.setup(
install_requires=install_requires,
)
Notes:
https://github.com/pypa/setuptools/issues/1951#issuecomment-1431345869
https://caremad.io/posts/2013/07/setup-vs-requirement/
https://setuptools.pypa.io/en/latest/history.html#v62-6-0
See also this other answer: https://stackoverflow.com/a/59971236
Most of the other answers above don't work with the current version of pip's API. Here is the correct* way to do it with the current version of pip (6.0.8 at the time of writing, also worked in 7.1.2. You can check your version with pip -V).
from pip.req import parse_requirements
from pip.download import PipSession
install_reqs = parse_requirements(<requirements_path>, session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
setup(
...
install_requires=reqs
....
)
* Correct, in that it is the way to use parse_requirements with the current pip. It still probably isn't the best way to do it, since, as posters above said, pip doesn't really maintain an API.
Install the current package in Travis. This avoids the use of a requirements.txt file.
For example:
language: python
python:
- "2.7"
- "2.6"
install:
- pip install -q -e .
script:
- python runtests.py
from pip.req import parse_requirements did not work for me and I think it's for the blank lines in my requirements.txt, but this function does work
def parse_requirements(requirements):
with open(requirements) as f:
return [l.strip('\n') for l in f if l.strip('\n') and not l.startswith('#')]
reqs = parse_requirements(<requirements_path>)
setup(
...
install_requires=reqs,
...
)
The following interface became deprecated in pip 10:
from pip.req import parse_requirements
from pip.download import PipSession
So I switched it just to simple text parsing:
with open('requirements.txt', 'r') as f:
install_reqs = [
s for s in [
line.split('#', 1)[0].strip(' \t\n') for line in f
] if s != ''
]
BEWARE OF parse_requirements BEHAVIOUR!
Please note that pip.req.parse_requirements will change underscores to dashes. This was enraging me for a few days before I discovered it. Example demonstrating:
from pip.req import parse_requirements # tested with v.1.4.1
reqs = '''
example_with_underscores
example-with-dashes
'''
with open('requirements.txt', 'w') as f:
f.write(reqs)
req_deps = parse_requirements('requirements.txt')
result = [str(ir.req) for ir in req_deps if ir.req is not None]
print result
produces
['example-with-underscores', 'example-with-dashes']
If you don't want to force your users to install pip, you can emulate its behavior with this:
import sys
from os import path as p
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
def read(filename, parent=None):
parent = (parent or __file__)
try:
with open(p.join(p.dirname(parent), filename)) as f:
return f.read()
except IOError:
return ''
def parse_requirements(filename, parent=None):
parent = (parent or __file__)
filepath = p.join(p.dirname(parent), filename)
content = read(filename, parent)
for line_number, line in enumerate(content.splitlines(), 1):
candidate = line.strip()
if candidate.startswith('-r'):
for item in parse_requirements(candidate[2:].strip(), filepath):
yield item
else:
yield candidate
setup(
...
install_requires=list(parse_requirements('requirements.txt'))
)
I created a reusable function for this. It actually parses an entire directory of requirements files and sets them to extras_require.
Latest always available here: https://gist.github.com/akatrevorjay/293c26fefa24a7b812f5
import glob
import itertools
import os
# This is getting ridiculous
try:
from pip._internal.req import parse_requirements
from pip._internal.network.session import PipSession
except ImportError:
try:
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
except ImportError:
from pip.req import parse_requirements
from pip.download import PipSession
def setup_requirements(
patterns=[
'requirements.txt', 'requirements/*.txt', 'requirements/*.pip'
],
combine=True):
"""
Parse a glob of requirements and return a dictionary of setup() options.
Create a dictionary that holds your options to setup() and update it using this.
Pass that as kwargs into setup(), viola
Any files that are not a standard option name (ie install, tests, setup) are added to extras_require with their
basename minus ext. An extra key is added to extras_require: 'all', that contains all distinct reqs combined.
Keep in mind all literally contains `all` packages in your extras.
This means if you have conflicting packages across your extras, then you're going to have a bad time.
(don't use all in these cases.)
If you're running this for a Docker build, set `combine=True`.
This will set `install_requires` to all distinct reqs combined.
Example:
>>> import setuptools
>>> _conf = dict(
... name='mainline',
... version='0.0.1',
... description='Mainline',
... author='Trevor Joynson <github#trevor.joynson,io>',
... url='https://trevor.joynson.io',
... namespace_packages=['mainline'],
... packages=setuptools.find_packages(),
... zip_safe=False,
... include_package_data=True,
... )
>>> _conf.update(setup_requirements())
>>> # setuptools.setup(**_conf)
:param str pattern: Glob pattern to find requirements files
:param bool combine: Set True to set install_requires to extras_require['all']
:return dict: Dictionary of parsed setup() options
"""
session = PipSession()
# Handle setuptools insanity
key_map = {
'requirements': 'install_requires',
'install': 'install_requires',
'tests': 'tests_require',
'setup': 'setup_requires',
}
ret = {v: set() for v in key_map.values()}
extras = ret['extras_require'] = {}
all_reqs = set()
files = [glob.glob(pat) for pat in patterns]
files = itertools.chain(*files)
for full_fn in files:
# Parse
reqs = {
str(r.req)
for r in parse_requirements(full_fn, session=session)
# Must match env marker, eg:
# yarl ; python_version >= '3.0'
if r.match_markers()
}
all_reqs.update(reqs)
# Add in the right section
fn = os.path.basename(full_fn)
barefn, _ = os.path.splitext(fn)
key = key_map.get(barefn)
if key:
ret[key].update(reqs)
extras[key] = reqs
extras[barefn] = reqs
if 'all' not in extras:
extras['all'] = list(all_reqs)
if combine:
extras['install'] = ret['install_requires']
ret['install_requires'] = list(all_reqs)
def _listify(dikt):
ret = {}
for k, v in dikt.items():
if isinstance(v, set):
v = list(v)
elif isinstance(v, dict):
v = _listify(v)
ret[k] = v
return ret
ret = _listify(ret)
return ret
__all__ = ['setup_requirements']
if __name__ == '__main__':
reqs = setup_requirements()
print(reqs)
Another possible solution...
def gather_requirements(top_path=None):
"""Captures requirements from repo.
Expected file format is: requirements[-_]<optional-extras>.txt
For example:
pip install -e .[foo]
Would require:
requirements-foo.txt
or
requirements_foo.txt
"""
from pip.download import PipSession
from pip.req import parse_requirements
import re
session = PipSession()
top_path = top_path or os.path.realpath(os.getcwd())
extras = {}
for filepath in tree(top_path):
filename = os.path.basename(filepath)
basename, ext = os.path.splitext(filename)
if ext == '.txt' and basename.startswith('requirements'):
if filename == 'requirements.txt':
extra_name = 'requirements'
else:
_, extra_name = re.split(r'[-_]', basename, 1)
if extra_name:
reqs = [str(ir.req) for ir in parse_requirements(filepath, session=session)]
extras.setdefault(extra_name, []).extend(reqs)
all_reqs = set()
for key, values in extras.items():
all_reqs.update(values)
extras['all'] = list(all_reqs)
return extras
and then to use...
reqs = gather_requirements()
install_reqs = reqs.pop('requirements', [])
test_reqs = reqs.pop('test', [])
...
setup(
...
'install_requires': install_reqs,
'test_requires': test_reqs,
'extras_require': reqs,
...
)
Cross posting my answer from this SO question for another simple, pip version proof solution.
try: # for pip >= 10
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
from pip.download import PipSession
requirements = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=PipSession())
if __name__ == '__main__':
setup(
...
install_requires=[str(requirement.req) for requirement in requirements],
...
)
Then just throw in all your requirements under requirements.txt under project root directory.
Yet another parse_requirements hack that also parses environment markers into extras_require:
from collections import defaultdict
from pip.req import parse_requirements
requirements = []
extras = defaultdict(list)
for r in parse_requirements('requirements.txt', session='hack'):
if r.markers:
extras[':' + str(r.markers)].append(str(r.req))
else:
requirements.append(str(r.req))
setup(
...,
install_requires=requirements,
extras_require=extras
)
It should support both sdist and binary dists.
As stated by others, parse_requirements has several shortcomings, so this is not what you should do on public projects, but it may suffice for internal/personal projects.
I did this:
import re
def requirements(filename):
with open(filename) as f:
ll = f.read().splitlines()
d = {}
for l in ll:
k, v = re.split(r'==|>=', l)
d[k] = v
return d
def packageInfo():
try:
from pip._internal.operations import freeze
except ImportError:
from pip.operations import freeze
d = {}
for kv in freeze.freeze():
k, v = re.split(r'==|>=', kv)
d[k] = v
return d
req = getpackver('requirements.txt')
pkginfo = packageInfo()
for k, v in req.items():
print(f'{k:<16}: {v:<6} -> {pkginfo[k]}')
Here is a complete hack (tested with pip 9.0.1) based on Romain's answer that parses requirements.txt and filters it according to current environment markers:
from pip.req import parse_requirements
requirements = []
for r in parse_requirements('requirements.txt', session='hack'):
# check markers, such as
#
# rope_py3k ; python_version >= '3.0'
#
if r.match_markers():
requirements.append(str(r.req))
print(requirements)

How do you define the host for Fabric to push to GitHub?

Original Question
I've got some python scripts which have been using Amazon S3 to upload screenshots taken following Selenium tests within the script.
Now we're moving from S3 to use GitHub so I've found GitPython but can't see how you use it to actually commit to the local repo and push to the server.
My script builds a directory structure similar to \images\228M\View_Use_Case\1.png in the workspace and when uploading to S3 it was a simple process;
for root, dirs, files in os.walk(imagesPath):
for name in files:
filename = os.path.join(root, name)
k = bucket.new_key('{0}/{1}/{2}'.format(revisionNumber, images_process, name)) # returns a new key object
k.set_contents_from_filename(filename, policy='public-read') # opens local file buffers to key on S3
k.set_metadata('Content-Type', 'image/png')
Is there something similar for this or is there something as simple as a bash type git add images command in GitPython that I've completely missed?
Updated with Fabric
So I've installed Fabric on kracekumar's recommendation but I can't find docs on how to define the (GitHub) hosts.
My script is pretty simple to just try and get the upload to work;
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
import os
def git_server():
env.hosts = ['github.com']
env.user = 'git'
env.passowrd = 'password'
def test():
process = 'View Employee'
os.chdir('\Work\BPTRTI\main\employer_toolkit')
with cd('\Work\BPTRTI\main\employer_toolkit'):
result = local('ant viewEmployee_git')
if result.failed and not confirm("Tests failed. Continue anyway?"):
abort("Aborting at user request.")
def deploy():
process = "View Employee"
os.chdir('\Documents and Settings\markw\GitTest')
with cd('\Documents and Settings\markw\GitTest'):
local('git add images')
local('git commit -m "Latest Selenium screenshots for %s"' % (process))
local('git push -u origin master')
def viewEmployee():
#test()
deploy()
It Works \o/ Hurrah.
You should look into Fabric. http://docs.fabfile.org/en/1.4.1/index.html. Automated server deployment tool. I have been using this quite some time, it works pretty fine.
Here is my one of the application which uses it, https://github.com/kracekumar/sachintweets/blob/master/fabfile.py
It looks like you can do this:
index = repo.index
index.add(['images'])
new_commit = index.commit("my commit message")
and then, assuming you have origin as the default remote:
origin = repo.remotes.origin
origin.push()

py2app picking up .git subdir of a package during build

We use py2app extensively at our facility to produce self contained .app packages for easy internal deployment without dependency issues. Something I noticed recently, and have no idea how it began, is that when building an .app, py2app started including the .git directory of our main library.
commonLib, for instance, is our root python library package, which is a git repo. Under this package are the various subpackages such as database, utility, etc.
commonLib/
|- .git/ # because commonLib is a git repo
|- __init__.py
|- database/
|- __init__.py
|- utility/
|- __init__.py
# ... etc
In a given project, say Foo, we will do imports like from commonLib import xyz to use our common packages. Building via py2app looks something like: python setup.py py2app
So the recent issue I am seeing is that when building an app for project Foo, I will see it include everything in commonLib/.git/ into the app, which is extra bloat. py2app has an excludes option but that only seems to be for python modules. I cant quite figure out what it would take to exclude the .git subdir, or in fact, what is causing it to be included in the first place.
Has anyone experienced this when using a python package import that is a git repo?
Nothing has changed in our setup.py files for each project, and commonLib has always been a git repo. So the only thing I can think of being a variable is the version of py2app and its deps which have obviously been upgraded over time.
Edit
I'm using the latest py2app 0.6.4 as of right now. Also, my setup.py was first generated from py2applet a while back, but has been hand configured since and copied over as a template for every new project. I am using PyQt4/sip for every single one of these projects, so it also makes me wonder if its an issue with one of the recipes?
Update
From the first answer, I tried to fix this using various combinations of exclude_package_data settings. Nothing seems to force the .git directory to become excluded. Here is a sample of what my setup.py files generally look like:
from setuptools import setup
from myApp import VERSION
appname = 'MyApp'
APP = ['myApp.py']
DATA_FILES = []
OPTIONS = {
'includes': 'atexit, sip, PyQt4.QtCore, PyQt4.QtGui',
'strip': True,
'iconfile':'ui/myApp.icns',
'resources':['src/myApp.png'],
'plist':{
'CFBundleIconFile':'ui/myApp.icns',
'CFBundleIdentifier':'com.company.myApp',
'CFBundleGetInfoString': appname,
'CFBundleVersion' : VERSION,
'CFBundleShortVersionString' : VERSION
}
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
I have tried things like:
setup(
...
exclude_package_data = { 'commonLib': ['.git'] },
#exclude_package_data = { '': ['.git'] },
#exclude_package_data = { 'commonLib/.git/': ['*'] },
#exclude_package_data = { '.git': ['*'] },
...
)
Update #2
I have posted my own answer which does a monkeypatch on distutils. Its ugly and not preferred, but until someone can offer me a better solution, I guess this is what I have.
I am adding an answer to my own question, to document the only thing I have found to work thus far. My approach was to monkeypatch distutils to ignore certain patterns when creating a directory or copying a file. This is really not what I wanted to do, but like I said, its the only thing that works so far.
## setup.py ##
import re
# file_util has to come first because dir_util uses it
from distutils import file_util, dir_util
def wrapper(fn):
def wrapped(src, *args, **kwargs):
if not re.search(r'/\.git/?', src):
fn(src, *args, **kwargs)
return wrapped
file_util.copy_file = wrapper(file_util.copy_file)
dir_util.mkpath = wrapper(dir_util.mkpath)
# now import setuptools so it uses the monkeypatched methods
from setuptools import setup
Hopefully someone will comment on this and tell me a higher level approach to avoid doing this. But as of now, I will probably wrap this into a utility method like exclude_data_patterns(re_pattern) to be reused in my projects.
I can see two options for excluding the .git directory.
Build the application from a 'clean' checkout of the code. When deploying a new version, we always build from a fresh svn export based on a tag to ensure we don't pick up spurious changes/files. You could try the equivalent here - although the git equivalent seems somewhat more involved.
Modify the setup.py file to massage the files included in the application. This might be done using the exclude_package_data functionality as described in the docs, or build the list of data_files and pass it to setup.
As for why it has suddenly started happening, knowing the version of py2app you are using might help, as will knowing the contents of your setup.py and perhaps how this was made (by hand or using py2applet).
I have a similar experience with Pyinstaller, so I'm not sure it applies directly.
Pyinstaller creates a "manifest" of all files to be included in the distribution, before running the export process. You could "massage" this manifest, as per Mark's second suggestion, to exclude any files you want. Including anything within .git or .git itself.
In the end, I stuck with checking out my code before producing a binary as there was more than just .git being bloat (such as UML documents and raw resource files for Qt). A checkout guaranteed a clean result and I experienced no issues automating that process along with the process of creating the installer for the binary.
There is a good answer to this, but I have a more elaborate answer to solve the problem mentioned here with a white-list approach. To have the monkey patch also work for packages outside site-packages.zip I had to monkey patch also copy_tree (because it imports copy_file inside its function), this helps in making a standalone application.
In addition, I create a white-list recipe to mark certain packages zip-unsafe. The approach makes it easy to add filters other than white-list.
import pkgutil
from os.path import join, dirname, realpath
from distutils import log
# file_util has to come first because dir_util uses it
from distutils import file_util, dir_util
# noinspection PyUnresolvedReferences
from py2app import util
def keep_only_filter(base_mod, sub_mods):
prefix = join(realpath(dirname(base_mod.filename)), '')
all_prefix = [join(prefix, sm) for sm in sub_mods]
log.info("Set filter for prefix %s" % prefix)
def wrapped(mod):
name = getattr(mod, 'filename', None)
if name is None:
# ignore anything that does not have file name
return True
name = join(realpath(dirname(name)), '')
if not name.startswith(prefix):
# ignore those that are not in this prefix
return True
for p in all_prefix:
if name.startswith(p):
return True
# log.info('ignoring %s' % name)
return False
return wrapped
# define all the filters we need
all_filts = {
'mypackage': (keep_only_filter, [
'subpackage1', 'subpackage2',
]),
}
def keep_only_wrapper(fn, is_dir=False):
filts = [(f, k[1]) for (f, k) in all_filts.iteritems()
if k[0] == keep_only_filter]
prefixes = {}
for f, sms in filts:
pkg = pkgutil.get_loader(f)
assert pkg, '{f} package not found'.format(f=f)
p = join(pkg.filename, '')
sp = [join(p, sm, '') for sm in sms]
prefixes[p] = sp
def wrapped(src, *args, **kwargs):
name = src
if not is_dir:
name = dirname(src)
name = join(realpath(name), '')
keep = True
for prefix, sub_prefixes in prefixes.iteritems():
if name == prefix:
# let the root pass
continue
# if it is a package we have a filter for
if name.startswith(prefix):
keep = False
for sub_prefix in sub_prefixes:
if name.startswith(sub_prefix):
keep = True
break
if keep:
return fn(src, *args, **kwargs)
return []
return wrapped
file_util.copy_file = keep_only_wrapper(file_util.copy_file)
dir_util.mkpath = keep_only_wrapper(dir_util.mkpath, is_dir=True)
util.copy_tree = keep_only_wrapper(util.copy_tree, is_dir=True)
class ZipUnsafe(object):
def __init__(self, _module, _filt):
self.module = _module
self.filt = _filt
def check(self, dist, mf):
m = mf.findNode(self.module)
if m is None:
return None
# Do not put this package in site-packages.zip
if self.filt:
return dict(
packages=[self.module],
filters=[self.filt[0](m, self.filt[1])],
)
return dict(
packages=[self.module]
)
# Any package that is zip-unsafe (uses __file__ ,... ) should be added here
# noinspection PyUnresolvedReferences
import py2app.recipes
for module in [
'sklearn', 'mypackage',
]:
filt = all_filts.get(module)
setattr(py2app.recipes, module, ZipUnsafe(module, filt))

Categories