I have the following folder structure:
.
├── include
│ └── ctset
│ ├── hashtable.h
│ └── set.h
└── src
└── hashtable
└── hashtable.c
And in hashtable.c the include #include "ctset/hashtable.h",
but YCM keeps telling me that it doesn't know the types I defined in the header and use in the source.
My .ycm_extra_conf.py is almost the default one with some adjustments:
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c',
'-I', '.',
'-I', './src/',
'-I', './include/',
'-I', './ClangCompleter',
'-isystem', '../llvm/include',
'-isystem', '../llvm/tools/clang/include',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile( replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
Assuming the .ycm_extra_conf.py in the root of your project. There is a typo in MakeRelativePathsInFlagsAbsolute. The if statement directly after the for loop needs to be indented another level since the if statement breaks out of the outer loop instead of the inner loop.
So change
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
To
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
The final .ycm_extra_conf.py should look like this.
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c',
'-I', '.',
'-I', './src/',
'-I', './include/',
'-I', './ClangCompleter',
'-isystem', '../llvm/include',
'-isystem', '../llvm/tools/clang/include',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile( replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
Useful debugging commands for YouCompleteMe is :YcmDebugInfo. This shows you the compilation flags and you would see that not all the flags are there.
Related
I'm working on a bazel rule (using version 5.2.0) that uses SWIG (version 4.0.1) to make a python library from C++ code, adapted from a rule in the tensorflow library. The problem I've run into is that, depending on the contents of ctx.file.source.path, the swig invocation might produce a necessary .h file. If it does, the rule below works great. If it doesn't, I get:
ERROR: BUILD:31:11: output 'foo_swig_h.h' was not created
ERROR: BUILD:31:11: SWIGing foo.i. failed: not all outputs were created or valid
If the h_out stuff is removed from _py_swig_gen_impl, the rule below works great when swig doesn't produce the .h file. But, if swig does produce one, bazel seems to ignore it and it isn't available for native.cc_binary to compile, resulting in gcc failing with a 'no such file or directory' error on an #include <foo_swig_cc.h> line in foo_swig_cc.cc.
(The presence or absence of the .h file in the output is determined by whether the .i file at ctx.file.source.path uses SWIG's "directors" feature.)
def _include_dirs(deps):
return depset(transitive = [dep[CcInfo].compilation_context.includes for dep in deps]).to_list()
def _headers(deps):
return depset(transitive = [dep[CcInfo].compilation_context.headers for dep in deps]).to_list()
# Bazel rules for building swig files.
def _py_swig_gen_impl(ctx):
module_name = ctx.attr.module_name
cc_out = ctx.actions.declare_file(module_name + "_swig_cc.cc")
h_out = ctx.actions.declare_file(module_name + "_swig_h.h")
py_out = ctx.actions.declare_file(module_name + ".py")
args = ["-c++", "-python", "-py3"]
args += ["-module", module_name]
args += ["-I" + x for x in _include_dirs(ctx.attr.deps)]
args += ["-I" + x.dirname for x in ctx.files.swig_includes]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += ["-oh", h_out.path]
args.append(ctx.file.source.path)
outputs = [cc_out, h_out, py_out]
ctx.actions.run(
executable = "swig",
arguments = args,
mnemonic = "Swig",
inputs = [ctx.file.source] + _headers(ctx.attr.deps) + ctx.files.swig_includes,
outputs = outputs,
progress_message = "SWIGing %{input}.",
)
return [DefaultInfo(files = depset(direct = [cc_out, py_out]))]
_py_swig_gen = rule(
attrs = {
"source": attr.label(
mandatory = True,
allow_single_file = True,
),
"swig_includes": attr.label_list(
allow_files = [".i"],
),
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
"module_name": attr.string(mandatory = True),
},
implementation = _py_swig_gen_impl,
)
def py_wrap_cc(name, source, module_name = None, deps = [], copts = [], **kwargs):
if module_name == None:
module_name = name
python_deps = [
"#local_config_python//:python_headers",
"#local_config_python//:python_lib",
]
# First, invoke the _py_wrap_cc rule, which runs swig. This outputs:
# `module_name.cc`, `module_name.py`, and, sometimes, `module_name.h` files.
swig_rule_name = "swig_gen_" + name
_py_swig_gen(
name = swig_rule_name,
source = source,
swig_includes = ["//third_party/swig_rules:swig_includes"],
deps = deps + python_deps,
module_name = module_name,
)
# Next, we need to compile the `module_name.cc` and `module_name.h` files
# from the previous rule. The `module_name.py` file already generated
# expects there to be a `_module_name.so` file, so we name the cc_binary
# rule this way to make sure that's the resulting file name.
cc_lib_name = "_" + module_name + ".so"
native.cc_binary(
name = cc_lib_name,
srcs = [":" + swig_rule_name],
linkopts = ["-dynamic", "-L/usr/local/lib/"],
linkshared = True,
deps = deps + python_deps,
)
# Finally, package everything up as a python library that can be depended
# on. Note that this rule uses the user-given `name`.
native.py_library(
name = name,
srcs = [":" + swig_rule_name],
srcs_version = "PY3",
data = [":" + cc_lib_name],
imports = ["./"],
)
My question, broadly, how I might best handle this with a single rule. I've tried adding a ctx.actions.write before the ctx.actions.run, thinking that I could generate a dummy '.h' file that would be overwritten if needed. That gives me:
ERROR: BUILD:41:11: for foo_swig_h.h, previous action: action 'Writing file foo_swig_h.h', attempted action: action 'SWIGing foo.i.'
My next idea is to remove the h_out stuff and then try to capture the h file for the cc_binary rule with some kind of glob invocation.
I've seen two approaches: add an attribute to indicate whether it applies, or write a wrapper script to generate it unconditionally.
Adding an attribute means something like "has_h": attr.bool(), and then use that in _py_swig_gen_impl to make the ctx.actions.declare_file(module_name + "_swig_h.h") conditional.
The wrapper script option means using something like this for the executable:
#!/bin/bash
set -e
touch the_path_of_the_header
exec swig "$#"
That will unconditionally create the output, and then swig will overwrite it if applicable. If it's not applicable, then passing around an empty header file in the Bazel rules should be harmless.
For posterity, this is what my _py_swig_gen_impl looks like after implementing #Brian's suggestion above:
def _py_swig_gen_impl(ctx):
module_name = ctx.attr.module_name
cc_out = ctx.actions.declare_file(module_name + "_swig_cc.cc")
h_out = ctx.actions.declare_file(module_name + "_swig_h.h")
py_out = ctx.actions.declare_file(module_name + ".py")
include_dirs = _include_dirs(ctx.attr.deps)
headers = _headers(ctx.attr.deps)
args = ["-c++", "-python", "-py3"]
args += ["-module", module_name]
args += ["-I" + x for x in include_dirs]
args += ["-I" + x.dirname for x in ctx.files.swig_includes]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += ["-oh", h_out.path]
args.append(ctx.file.source.path)
outputs = [cc_out, h_out, py_out]
# Depending on the contents of `ctx.file.source`, swig may or may not
# output a .h file needed by subsequent rules. Bazel doesn't like optional
# outputs, so instead of invoking swig directly we're going to make a
# lightweight executable script that first `touch`es the .h file that may
# get generated, and then execute that. This means we may be propagating
# an empty .h file around as a "dependency" sometimes, but that's okay.
swig_script_file = ctx.actions.declare_file("swig_exec.sh")
ctx.actions.write(
output = swig_script_file,
is_executable = True,
content = "#!/bin/bash\n\nset -e\ntouch " + h_out.path + "\nexec swig \"$#\"",
)
ctx.actions.run(
executable = swig_script_file,
arguments = args,
mnemonic = "Swig",
inputs = [ctx.file.source] + headers + ctx.files.swig_includes,
outputs = outputs,
progress_message = "SWIGing %{input}.",
)
return [
DefaultInfo(files = depset(direct = outputs)),
]
The ctx.actions.write generates the suggested bash script:
#!/bin/bash
set -e
touch %{h_out.path}
exec swig "$#"
Which guarantees that the expected h_out will always be output by ctx.actions.run, whether or not swig generates it.
I've a monorepo that contains a set of Python AWS lambdas and I'm using Bazel for building and packaging the lambdas. I'm now trying to use Bazel to create a zip file that follows the expected AWS Lambdas packaging and that I can upload to Lambda. Wondering what's the best way to do this with Bazel?
Below are a few different things I've tried thus far:
Attempt 1: py_binary
BUILD.bazel
py_binary(
name = "main_binary",
srcs = glob(["*.py"]),
main = "main.py",
visibility = ["//appcode/api/transaction_details:__subpackages__"],
deps = [
requirement("Faker"),
],
)
Problem:
This generates the following:
main_binary (python executable)
main_binary.runfiles
main_binary.runfiles_manifest
Lambda expects the handler to be in the format of lambda_function.lambda_handler. Since main_binary is an executable vs. a python file, it doesn't expose the actual handler method and the lambda blows up because it can't find it. I tried updating the handler configuration to simply point to the main_binary but it blows up because it expects two arguments(i.e. lambda_function.lambda_handler).
Attempt 2: py_library + pkg_zip
BUILD.bazel
py_library(
name = "main",
srcs = glob(["*.py"]),
visibility = ["//appcode/api/transaction_details:__subpackages__"],
deps = [
requirement("Faker"),
],
)
pkg_zip(
name = "main_zip",
srcs =["//appcode/api/transaction_details/src:main" ],
)
Problem:
This generates a zip file with:
main.py
__init__.py
The zip file now includes the main.py but none of its runtime dependencies. Thus the lambda blows up because it can't find Faker.
Other Attempts:
I've also tried using the --build_python_zip flag as well as the #bazel_tools//tools/zip:zipper with a generic rule but they both lead to similar outcomes as the two previous attempts.
We use #bazel_tools//tools/zip:zipper with a custom rule. We also pull serverless in using rules_nodejs and run it through bazel, which causes the package building to happen prior to running sls deploy.
We use pip_parse from rules_python. I'm not sure whether the _short_path function below will work with pip_install or other mechanisms.
File filtering is supported, although it's awkward. Ideally the zip generation would be handled by a separate binary (i.e., a Python script) which would allow filtering using regular expressions/globs/etc. Bazel doesn't support regular expressions in Starlark, so we use our own thing.
I've included an excerpt:
lambda.bzl
"""
Support for serverless deployments.
"""
def contains(pattern):
return "contains:" + pattern
def startswith(pattern):
return "startswith:" + pattern
def endswith(pattern):
return "endswith:" + pattern
def _is_ignored(path, patterns):
for p in patterns:
if p.startswith("contains:"):
if p[len("contains:"):] in path:
return True
elif p.startswith("startswith:"):
if path.startswith(p[len("startswith:"):]):
return True
elif p.startswith("endswith:"):
if path.endswith(p[len("endswith:"):]):
return True
else:
fail("Invalid pattern: " + p)
return False
def _short_path(file_):
# Remove prefixes for external and generated files.
# E.g.,
# ../py_deps_pypi__pydantic/pydantic/__init__.py -> pydantic/__init__.py
short_path = file_.short_path
if short_path.startswith("../"):
second_slash = short_path.index("/", 3)
short_path = short_path[second_slash + 1:]
return short_path
def _py_lambda_zip_impl(ctx):
deps = ctx.attr.target[DefaultInfo].default_runfiles.files
f = ctx.outputs.output
args = []
for dep in deps.to_list():
short_path = _short_path(dep)
# Skip ignored patterns
if _is_ignored(short_path, ctx.attr.ignore):
continue
args.append(short_path + "=" + dep.path)
ctx.actions.run(
outputs = [f],
inputs = deps,
executable = ctx.executable._zipper,
arguments = ["cC", f.path] + args,
progress_message = "Creating archive...",
mnemonic = "archiver",
)
out = depset(direct = [f])
return [
DefaultInfo(
files = out,
),
OutputGroupInfo(
all_files = out,
),
]
_py_lambda_zip = rule(
implementation = _py_lambda_zip_impl,
attrs = {
"target": attr.label(),
"ignore": attr.string_list(),
"_zipper": attr.label(
default = Label("#bazel_tools//tools/zip:zipper"),
cfg = "host",
executable = True,
),
"output": attr.output(),
},
executable = False,
test = False,
)
def py_lambda_zip(name, target, ignore, **kwargs):
_py_lambda_zip(
name = name,
target = target,
ignore = ignore,
output = name + ".zip",
**kwargs
)
BUILD.bazel
load("#npm_serverless//serverless:index.bzl", "serverless")
load(":lambda.bzl", "contains", "endswith", "py_lambda_zip", "startswith")
py_binary(
name = "my_lambda_app",
...
)
py_lambda_zip(
name = "lambda_archive",
ignore = [
contains("/__pycache__/"),
endswith(".pyc"),
endswith(".pyo"),
# Ignore boto since it's provided by Lambda.
startswith("boto3/"),
startswith("botocore/"),
# With the move to hermetic toolchains, the zip gets a lib/ directory containing the
# python runtime. We don't need that.
startswith("lib/"),
],
target = ":my_lambda_app",
# Only allow building on linux, since we don't want to upload a lambda zip file
# with e.g. macos compiled binaries.
target_compatible_with = [
"#platforms//os:linux",
],
)
# The sls command requires that serverless.yml be in its working directory, and that the yaml file
# NOT be a symlink. So this target builds a directory containing a copy of serverless.yml, and also
# symlinks the generated lambda_archive.zip in the same directory.
#
# It also generates a chdir.js script that we instruct node to execute to change to the proper working directory.
genrule(
name = "sls_files",
srcs = [
"lambda_archive.zip",
"serverless.yml",
],
outs = [
"sls_files/lambda_archive.zip",
"sls_files/serverless.yml",
"sls_files/chdir.js",
],
cmd = """
mkdir -p $(#D)/sls_files
cp $(location serverless.yml) $(#D)/sls_files/serverless.yml
cp -P $(location lambda_archive.zip) $(#D)/sls_files/lambda_archive.zip
echo "const fs = require('fs');" \
"const path = require('path');" \
"process.chdir(path.dirname(fs.realpathSync(__filename)));" > $(#D)/sls_files/chdir.js
""",
)
# Usage:
# bazel run //:sls -- deploy <more args>
serverless(
name = "sls",
args = ["""--node_options=--require=./$(location sls_files/chdir.js)"""],
data = [
"sls_files/chdir.js",
"sls_files/serverless.yml",
"sls_files/lambda_archive.zip",
],
)
serverless.yml
service: my-app
package:
artifact: lambda_archive.zip
# ... other config ...
Below are the changes I made to the previous answer to generate the lambda zip. Thanks #jvolkman for the original suggestion.
project/BUILD.bazel: Added rule to generate requirements_lock.txt from project/requirements.txt
load("#rules_python//python:pip.bzl", "compile_pip_requirements")
compile_pip_requirements(
name = "requirements",
extra_args = ["--allow-unsafe"],
requirements_in = "requirements.txt",
requirements_txt = "requirements_lock.txt",
)
project/WORKSPACE.bazel: swap pip_install with pip_parse
workspace(name = "mdc-eligibility")
load("#bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_python",
sha256 = "9fcf91dbcc31fde6d1edb15f117246d912c33c36f44cf681976bd886538deba6",
strip_prefix = "rules_python-0.8.0",
url = "https://github.com/bazelbuild/rules_python/archive/refs/tags/0.8.0.tar.gz",
)
load("#rules_python//python:repositories.bzl", "python_register_toolchains")
python_register_toolchains(
name = "python3_9",
python_version = "3.9",
)
load("#rules_python//python:pip.bzl", "pip_parse")
load("#python3_9//:defs.bzl", "interpreter")
pip_parse(
name = "mndc-eligibility-deps",
requirements_lock = "//:requirements_lock.txt",
python_interpreter_target = interpreter,
quiet = False
)
load("#mndc-eligibility-deps//:requirements.bzl", "install_deps")
install_deps()
project/build_rules/lambda_packaging/lambda.bzl: Modified custom rule provided by #jvolkman to include source code in the resulting zip code.
def contains(pattern):
return "contains:" + pattern
def startswith(pattern):
return "startswith:" + pattern
def endswith(pattern):
return "endswith:" + pattern
def _is_ignored(path, patterns):
for p in patterns:
if p.startswith("contains:"):
if p[len("contains:"):] in path:
return True
elif p.startswith("startswith:"):
if path.startswith(p[len("startswith:"):]):
return True
elif p.startswith("endswith:"):
if path.endswith(p[len("endswith:"):]):
return True
else:
fail("Invalid pattern: " + p)
return False
def _short_path(file_):
# Remove prefixes for external and generated files.
# E.g.,
# ../py_deps_pypi__pydantic/pydantic/__init__.py -> pydantic/__init__.py
short_path = file_.short_path
if short_path.startswith("../"):
second_slash = short_path.index("/", 3)
short_path = short_path[second_slash + 1:]
return short_path
# steven chambers
def _py_lambda_zip_impl(ctx):
deps = ctx.attr.target[DefaultInfo].default_runfiles.files
f = ctx.outputs.output
args = []
for dep in deps.to_list():
short_path = _short_path(dep)
# Skip ignored patterns
if _is_ignored(short_path, ctx.attr.ignore):
continue
args.append(short_path + "=" + dep.path)
# MODIFICATION: Added source files to the map of files to zip
source_files = ctx.attr.target[DefaultInfo].files
for source_file in source_files.to_list():
args.append(source_file.basename+"="+source_file.path)
ctx.actions.run(
outputs = [f],
inputs = deps,
executable = ctx.executable._zipper,
arguments = ["cC", f.path] + args,
progress_message = "Creating archive...",
mnemonic = "archiver",
)
out = depset(direct = [f])
return [
DefaultInfo(
files = out,
),
OutputGroupInfo(
all_files = out,
),
]
_py_lambda_zip = rule(
implementation = _py_lambda_zip_impl,
attrs = {
"target": attr.label(),
"ignore": attr.string_list(),
"_zipper": attr.label(
default = Label("#bazel_tools//tools/zip:zipper"),
cfg = "host",
executable = True,
),
"output": attr.output(),
},
executable = False,
test = False,
)
def py_lambda_zip(name, target, ignore, **kwargs):
_py_lambda_zip(
name = name,
target = target,
ignore = ignore,
output = name + ".zip",
**kwargs
)
project/appcode/api/transaction_details/src/BUILD.bazel: Used custom py_lambda_zip rule to zip up py_library
load("#mndc-eligibility-deps//:requirements.bzl", "requirement")
load("#python3_9//:defs.bzl", "interpreter")
load("//build_rules/lambda_packaging:lambda.bzl", "contains", "endswith", "py_lambda_zip", "startswith")
py_library(
name = "main",
srcs = glob(["*.py"]),
visibility = ["//appcode/api/transaction_details:__subpackages__"],
deps = [
requirement("Faker"),
],
)
py_lambda_zip(
name = "lambda_archive",
ignore = [
contains("/__pycache__/"),
endswith(".pyc"),
endswith(".pyo"),
# Ignore boto since it's provided by Lambda.
startswith("boto3/"),
startswith("botocore/"),
# With the move to hermetic toolchains, the zip gets a lib/ directory containing the
# python runtime. We don't need that.
startswith("lib/"),
],
target = ":main",
)
I am working on a task to find the boot drive among list of SSDs connected.
Trying to get it by matching device path("\\.\PHYSICALDRIVE0") of drives with logical disk partition using WMI.
Here is my code:
import wmi
w = wmi.WMI()
for physical_disk in w.Win32_DiskDrive():
if physical_disk.DeviceID == "\\.\PHYSICALDRIVE0":
for partition in physical_disk.associators("Win32_DiskDriveToDiskPartition"):
for logical in partition.associators("Win32_LogicalDiskToPartition"):
if logical.caption == 'C:':
print "Its a boot drive"
break
I feel like just checking if we are looking at the c-drive is not quite enough. I feel like this can go wrong... Are we assured that 'C:' is always the boot drive? It probably is most of the time, but can we really say that 'C:' is always the boot drive?
Can we have a more robust way to ensure the drive we are looking at is a boot drive?
Can we have a more robust way to ensure the drive we are looking at is a boot drive? We could numerate all disks/partitions/volumes as follows:
import wmi
w = wmi.WMI()
print ( "\r\nnamespace='root/CIMV2' -> Win32_DiskDrive" )
csvlike='{0:>2}`{1:18}`{2:21}`{3:>5}`{4:>5}`{5:1}`{6:1}`{7}'
# print header
print ( csvlike.format( 'dl',
'diskDeviceID',
'partitionDeviceID',
'BootA',
'Activ',
'd',
'p',
'diskSerialNumber'))
for w32_disk in w.Win32_DiskDrive():
auxpart = w32_disk.associators("Win32_DiskDriveToDiskPartition")
if len( auxpart) == 0:
print ( csvlike.format( '',w32_disk.DeviceID,'','','',
w32_disk.Index,'',w32_disk.SerialNumber))
else:
for w32_part in auxpart:
auxlogic = w32_part.associators("Win32_LogicalDiskToPartition")
if len(auxlogic) == 0:
print ( csvlike.format( '', w32_disk.DeviceID,
w32_part.DeviceID,
str(w32_part.Bootable) if w32_part.Bootable else '',
str(w32_part.BootPartition) if w32_part.BootPartition else '',
str(w32_part.DiskIndex), # == w32_disk.Index
str(w32_part.Index),
w32_disk.SerialNumber))
else:
for w32_ldsk in auxlogic:
print ( csvlike.format( w32_ldsk.caption,
w32_disk.DeviceID,
w32_part.DeviceID,
str(w32_part.Bootable) if w32_part.Bootable else '',
str(w32_part.BootPartition) if w32_part.BootPartition else '',
str(w32_part.DiskIndex), # == w32_disk.Index == party.DiskNumber
str(w32_part.Index), # == party.PartitionNumber -1
w32_disk.SerialNumber))
print ( "\r\nnamespace='root/Microsoft/Windows/Storage' -> MSFT_Disk" )
s = wmi.WMI(namespace='root/Microsoft/Windows/Storage')
csvheader='{0:2}`{1:>5}`{2:>5}`{3:>5}`{4:1}`{5:1}`{6:38}`{7}'
print ( csvheader.format( 'dl', 'Syst', 'Boot', 'Activ', 'd', 'p',
'volumeUniqueId (truncated to UUID)', 'SerialNumber' ) )
for d in s.MSFT_Disk():
dps=d.associators("MSFT_DiskToPartition")
for party in dps:
auxvolumy = party.associators("MSFT_PartitionToVolume")
if len( auxvolumy) == 0:
print ( csvheader.format(
'',
str( party.IsSystem ) if party.IsSystem else '',
str( party.IsBoot ) if party.IsBoot else '',
str( party.IsActive ) if ( d.PartitionStyle==1
and party.IsActive ) else '' ,
str( party.DiskNumber ), # == w32_disk.Index
str( party.PartitionNumber ), # == w32_part.Index +1
'',
d.SerialNumber))
else:
for volumy in auxvolumy:
print ( csvheader.format(
chr( volumy.DriveLetter ) if volumy.DriveLetter else '',
str( party.IsSystem ) if party.IsSystem else '',
str( party.IsBoot ) if party.IsBoot else '',
str( party.IsActive ) if ( d.PartitionStyle==1
and party.IsActive ) else '' ,
str( party.DiskNumber ), # == w32_disk.Index
str( party.PartitionNumber ), # == w32_part.Index +1
volumy.UniqueId[10:][:38], # .replace('\\?\Volume','')
d.SerialNumber))
print ( "\r\nnamespace='root/CIMV2' -> Win32_Volume" )
csvheaderwv='{0:2}`{1:>5}`{2:>5}`{3:>5}`{4}'
# print header
print ( csvheaderwv.format( 'dl',
'Syst',
'Boot',
'PageF',
'volumeDeviceID'))
volumes = w.Win32_Volume()
for volume in volumes:
print( csvheaderwv.format(
volume.DriveLetter if volume.DriveLetter else '',
str( volume.SystemVolume ) if volume.SystemVolume else '',
str( volume.BootVolume ) if volume.BootVolume else '',
str( volume.PagefilePresent ) if volume.PagefilePresent else '',
volume.DeviceID ))
Are we assured that C: is always the boot drive? Pay your attention to the read-only environment variables SystemDrive and SystemRoot:
… %SystemRoot% is a built-in variable (along with a small handful of
others like %SystemDrive%). That is, it is not actually defined in the
environment variable store at
HKLM\SYSTEM\CurrentControlSet\Control\Session Manager\Environment in
the registry… In addition, %SystemRoot% (and %SystemDrive%) are set
during the installation process (when you choose the destination
drive)…
Both above variables are set during windows boot and you can see their origin running from an elevated command prompt (see Boot Configuration Data (BCD) data stores):
bcdedit /enum ACTIVE
Various sources (with basic terminology on active/system/boot partition):
Win32 Provider: Win32_DiskPartition class
Bootable property indicates whether the computer can be booted from this partition.
If TRUE, the disk partition is labeled as bootable. This does not mean that an operating system is loaded on the partition.
BootPartition property. Partition is the active partition. The operating system uses the active partition when booting from a hard disk.
Storage Volume Provider (deprecated?): Win32_Volume class (no direct relationship exists between Win32_Volume and Win32_DiskDrive.)
SystemVolume property indicates whether the volume contains the hardware specific files required to start the operating system.
BootVolume property indicates whether the volume contains the currently running operating system files.
Storage Management API Classes (deprecated?): MSFT_Partition class
IsActive property signifies whether or not the partition is active and can be booted. This property is only relevant for MBR disks.
If TRUE, the partition is active and can be used to start the system. This property is only valid when the disk's PartitionStyle property is MBR (i.e. MSFT_Disk.PartitionStyle==1) and will be NULL for all other partition styles.
IsBoot property. If TRUE, the partition is the current boot partition.
The boot partition is the partition that holds the Windows installation.
IsSystem property. If TRUE, this is a system partition.
The system partition contains the files that your Windows needs to start (the Boot Configuration Data or BCD). The reserved partitions (or the system partitions) do not have a letter assigned to them.
I know only the very basics of python. I have this project for my INFORMATION STORAGE AND MANAGEMENT subject. I have to give an explanation the following code.
I searched every command used in this script but could not find most of them. The code can be found here:
import glob
import json
import os
import re
import string
import sys
from oslo.config import cfg
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import utils
CONF = cfg.CONF
def usage():
print("""
Usage:
python %s --config-file /etc/nova/nova.conf
Note: This script intends to clean up the iSCSI multipath faulty devices
hosted by VNX Block Storage.""" % sys.argv[0])
class FaultyDevicesCleaner(object):
def __init__(self):
# Get host name of Nova computer node.
self.host_name = self._get_host_name()
def _get_host_name(self):
(out, err) = utils.execute('hostname')
return out
def _get_ncpu_emc_target_info_list(self):
target_info_list = []
# Find the targets used by VM on the compute node
bdms = db_api.model_query(context.get_admin_context(),
models.BlockDeviceMapping,
session = db_api.get_session())
bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None)
bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by(
host=string.strip(self.host_name))
for bdm in bdms:
conn_info = json.loads(bdm.connection_info)
if 'data' in conn_info:
if 'target_iqns' in conn_info['data']:
target_iqns = conn_info['data']['target_iqns']
target_luns = conn_info['data']['target_luns']
elif 'target_iqn' in conn_info['data']:
target_iqns = [conn_info['data']['target_iqn']]
target_luns = [conn_info['data']['target_lun']]
else:
target_iqns = []
target_luns = []
for target_iqn, target_lun in zip(target_iqns, target_luns):
if 'com.emc' in target_iqn:
target_info = {
'target_iqn': target_iqn,
'target_lun': target_lun,
}
target_info_list.append(target_info)
return target_info_list
def _get_ncpu_emc_target_info_set(self):
target_info_set = set()
for target_info in self._get_ncpu_emc_target_info_list():
target_iqn = target_info['target_iqn']
target_lun = target_info['target_lun']
target_info_key = "%s-%s" % (target_iqn.rsplit('.', 1)[0],
target_lun)
# target_iqn=iqn.1992-04.com.emc:cx.fnm00130200235.a7
# target_lun=203
# target_info_key=iqn.1992-04.com.emc:cx.fnm00130200235-203
target_info_set.add(target_info_key)
return target_info_set
def _get_target_info_key(self, path):
temp_tuple = path.split('-lun-', 1)
target_lun = temp_tuple[1]
target_iqn = temp_tuple[0].split('-iscsi-')[1]
target_info_key = "%s-%s" % (target_iqn.rsplit('.', 1)[0], target_lun)
# path=/dev/disk/by-path/ip-192.168.3.52:3260-iscsi-iqn.1992-
# 04.com.emc:cx.fnm00130200235.a7-lun-203
# target_info_key=iqn.1992-04.com.emc:cx.fnm00130200235-203
return target_info_key
def _get_non_ncpu_target_info_map(self):
# Group the paths by target_info_key
ncpu_target_info_set = self._get_ncpu_emc_target_info_set()
device_paths = self._get_emc_device_paths()
target_info_map = {}
for path in device_paths:
target_info_key = self._get_target_info_key(path)
if target_info_key in ncpu_target_info_set:
continue
if target_info_key not in target_info_map:
target_info_map[target_info_key] = []
target_info_map[target_info_key].append(path)
return target_info_map
def _all_related_paths_faulty(self, paths):
for path in paths:
real_path = os.path.realpath(path)
out, err = self._run_multipath(['-ll', real_path],
run_as_root=True,
check_exit_code=False)
if 'active ready' in out:
# At least one path is still working
return False
return True
def _delete_all_related_paths(self, paths):
for path in paths:
real_path = os.path.realpath(path)
device_name = os.path.basename(real_path)
device_delete = '/sys/block/%s/device/delete' % device_name
if os.path.exists(device_delete):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', device_delete,
process_input='1', run_as_root=True)
else:
print "Unable to delete %s" % real_path
def _cleanup_faulty_paths(self):
non_ncpu_target_info_map = self._get_non_ncpu_target_info_map()
for paths in non_ncpu_target_info_map.itervalues():
if self._all_related_paths_faulty(paths):
self._delete_all_related_paths(paths)
def _cleanup_faulty_dm_devices(self):
out_ll, err_ll = self._run_multipath(['-ll'],
run_as_root=True,
check_exit_code=False)
# Pattern to split the dm device contents as follows
# Each section starts with a WWN and ends with a line with
# " `-" as the prefix
#
# 3600601601bd032007c097518e96ae411 dm-2 ,
# size=1.0G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
# `-+- policy='round-robin 0' prio=0 status=active
# `- #:#:#:# - #:# active faulty running
# 36006016020d03200bb93e048f733e411 dm-0 DGC,VRAID
# size=1.0G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
# |-+- policy='round-robin 0' prio=130 status=active
# | |- 3:0:0:2 sdd 8:48 active ready running
# | `- 5:0:0:2 sdj 8:144 active ready running
# `-+- policy='round-robin 0' prio=10 status=enabled
# |- 4:0:0:2 sdg 8:96 active ready running
# `- 6:0:0:2 sdm 8:192 active ready running
dm_pat = r'([0-9a-fA-F]{30,})[^\n]+,[^\n]*\n[^,]* `-[^\n]*'
dm_m = re.compile(dm_pat)
path_pat = r'- \d+:\d+:\d+:\d+ '
path_m = re.compile(path_pat)
for m in dm_m.finditer(out_ll):
if not path_m.search(m.group(0)):
# Only #:#:#:# remain in the output, all the paths of the dm
# device should have been deleted. No need to keep the device
out_f, err_f = self._run_multipath(['-f', m.group(1)],
run_as_root=True,
check_exit_code=False)
def cleanup(self):
self._cleanup_faulty_paths()
# Make sure the following configuration is in /etc/multipath.conf
# Otherwise, there may be "map in use" failure when deleting
# dm device
#
# defaults {
# flush_on_last_del yes
# }
#
self._cleanup_faulty_dm_devices()
def _get_emc_device_paths(self):
# Find all the EMC iSCSI devices under /dev/disk/by-path
# except LUNZ and partition reference
pattern = '/dev/disk/by-path/ip-*-iscsi-iqn*com.emc*-lun-*'
device_paths = [path for path in glob.glob(pattern)
if ('lun-0' not in path and '-part' not in path)]
return device_paths
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
print ("multipath %(command)s: stdout=%(out)s stderr=%(err)s"
% {'command': multipath_command, 'out': out, 'err': err})
return out, err
if __name__ == "__main__":
if len(sys.argv) != 3 or sys.argv[1] != '--config-file':
usage()
exit(1)
out, err = utils.execute('which', 'multipath', check_exit_code=False)
if 'multipath' not in out:
print('Info: Multipath tools not installed. No cleanup need be done.')
exit(0)
multipath_flush_on_last_del = False
multipath_conf_path = "/etc/multipath.conf"
if os.path.exists(multipath_conf_path):
flush_on_last_del_yes = re.compile(r'\s*flush_on_last_del.*yes')
for line in open(multipath_conf_path, "r"):
if flush_on_last_del_yes.match(line):
multipath_flush_on_last_del = True
break
if not multipath_flush_on_last_del:
print("Warning: 'flush_on_last_del yes' is not seen in"
" /etc/multipath.conf."
" 'map in use' failure may show up during cleanup.")
CONF(sys.argv[1:])
# connect_volume and disconnect_volume in nova/virt/libvirt/volume.py
# need be adjusted to take the same 'external=True' lock for
# synchronization
#utils.synchronized('connect_volume', external=True)
def do_cleanup():
cleaner = FaultyDevicesCleaner()
cleaner.cleanup()
do_cleanup()
https://wiki.python.org/moin/BeginnersGuide/Programmers
http://www.astro.ufl.edu/~warner/prog/python.html
looks like this python version 3 so. go for the tutorials of version three.
try downloading any IDE. eric5 is good by the way.
try executing this file once.
learn indentations
and dynamic variable declaration
do not jump into the ocean first try swimming pool : )
Also Try to learn method declaration.
Python is a bit different than java.
I will give you a hint looks like system call are also made to execute os commands so try looking at subprocess and how its output is directed to an output stream and error stream.
I'm experimenting on converting a makefile from another buildsystem to waf.
I'm trying to direct waf to the directory containing the necessary dlls.
However, when running waf configure:
Checking for library libiconv2 : not found
It can't find the required library.
Directory stucture:
project/
| build/
| inc/
| | XGetopt.h
| | common.h
| | define.h
| | libpst.h
| | libstrfunc.h
| | lzfu.h
| | msg.h
| | timeconv.h
| | vbuf.h
| libs/
| | libiconv2.dll
| | regex2.dll
| src/
| | XGetopt.c
| | debug.c
| | dumpblocks.c
| | getidblock.c
| | libpst.c
| | libstrfunc.c
| | lspst.c
| | lzfu.c
| | readpst.c
| | timeconv.c
| | vbuf.c
| | deltasearch.cpp
| | msg.cpp
| | nick2ldif.cpp
| | pst2dii.cpp
| | pst2ldif.cpp
| | wscript_build
| waf-1.7.10
| wscript
top-level wscript:
#! /usr/bin/env python
VERSION = "0.1"
APPNAME = "readpst"
top = "." # The topmost directory of the waf project
out = "build/temp" # The build directory of the waf project
import os
from waflib import Build
from waflib import ConfigSet
from waflib import Logs
# Variant memory variables
var_path = out + "/variant.txt" # The variant memory file path
default_variant = "debug" # The default if no variant is stored
stored_variant = ""
def options(opt):
'''
A script hook function that defines addtional switch options for the build.
'''
opt.load("compiler_cxx")
def configure(cfg):
'''
A script hook function that configures the build environment.
'''
cfg.load("compiler_cxx")
cfg.find_program("strip")
cfg.env.PREFIX = "."
cfg.env.DEFINES = ["WAF=1"]
cfg.env.FEATURES = [] # Additional features
cfg.env.LIBPATH = [os.path.join(os.getcwd(), "libs")]
print cfg.env.LIBPATH
cfg.define("VERSION", VERSION)
base_env = cfg.env
# Compiler checks
cfg.check_large_file(mandatory = False)
cfg.check_inline()
# Check for the existance and function of specific headers
cfg.check(header_name = "stdint.h")
cfg.check(header_name = "stdio.h")
cfg.check(compiler="cxx", uselib_store="LIBICONV2", mandatory=True, lib="libiconv2")
# Define the debug build environment
cfg.setenv("debug", env = base_env.derive())
cfg.env.CFLAGS = ["-g"]
cfg.define("DEBUG", 1)
cfg.write_config_header("/debug/inc/config.h")
# Define the release build environment
cfg.setenv("release", env = base_env.derive())
cfg.env.CFLAGS = ["-O2"]
cfg.env.FEATURES = ["strip"]
cfg.define("RELEASE", 1)
cfg.write_config_header("/release/inc/config.h")
def pre(ctx):
'''
A callback for before build task start.
'''
print "Starting %sbuild" % (("%s " % ctx.variant) if(ctx.variant) else "")
if ctx.cmd == "install":
print "Installing"
def post(ctx):
'''
A callback for after build task finish.
'''
global var_path
print "Finished %sbuild" % (("%s " % ctx.variant) if(ctx.variant) else "")
env = ConfigSet.ConfigSet()
env.stored_variant = ctx.variant
env.store(var_path)
def build(bld):
'''
A script hook function that specifies the build behaviour.
'''
bld.add_pre_fun(pre)
bld.add_post_fun(post)
bld.recurse\
(
[
"src"
]
)
if bld.cmd != "clean":
bld.logger = Logs.make_logger("test.log", "build") # just to get a clean output
def dist(ctx):
'''
A script hook function that specifies the packaging behaviour.
'''
ctx.base_name = "_".join([APPNAME, VERSION])
ctx.algo = "zip"
file_ex_patterns = \
[
out + "/**",
"**/.waf-1*",
"**/*~",
"**/*.pyc",
"**/*.swp",
"**/.lock-w*"
]
file_in_patterns = \
[
"**/wscript*",
"**/*.h",
"**/*.c",
"**/*.cpp",
"**/*.txt",
]
ctx.files = ctx.path.ant_glob(incl = file_in_patterns, excl = file_ex_patterns)
def set_variant():
'''
A function that facilitates dynamic changing of the Context classes variant member.
It retrieves the stored variant, if existant, otherwise the default.
'''
global default_variant
global stored_variant
global var_path
env = ConfigSet.ConfigSet()
try:
env.load(var_path)
except:
stored_variant = default_variant
else:
if(env.stored_variant):
stored_variant = env.stored_variant
print "Resuming %s variant" % stored_variant
else:
stored_variant = default_variant
def get_variant():
'''
A function that facilitates dynamic changing of the Context classes variant member.
It sets the variant, if undefined, and returns.
'''
global stored_variant
if(not stored_variant):
set_variant()
return stored_variant
class release(Build.BuildContext):
'''
A class that provides the release build.
'''
cmd = "release"
variant = "release"
class debug(Build.BuildContext):
'''
A class that provides the debug build.
'''
cmd = "debug"
variant = "debug"
class default_build(Build.BuildContext):
'''
A class that provides the default variant build.
This is set to debug.
'''
variant = "debug"
class default_clean(Build.CleanContext):
'''
A class that provides the stored variant build clean.
'''
#property
def variant(self):
return get_variant()
class default_install(Build.InstallContext):
'''
A class that provides the stored variant build install.
'''
#property
def variant(self):
return get_variant()
class default_uninstall(Build.UninstallContext):
'''
A class that provides the stored variant build uninstall.
'''
#property
def variant(self):
return get_variant()
# Addtional features
from waflib import Task, TaskGen
class strip(Task.Task):
run_str = "${STRIP} ${SRC}"
color = "BLUE"
#TaskGen.feature("strip")
#TaskGen.after("apply_link")
def add_strip_task(self):
try:
link_task = self.link_task
except:
return
tsk = self.create_task("strip", self.link_task.outputs[0])
You just lack the use variable setup, but this has to be fixed in your child-wscripts, i.e.
bld.program (...,
libpath = ['/usr/lib', 'subpath'], #this has to be relative to the wscript it appears in! (or the root wscript, can not recall)
...,
use = ['iconv2', 'regex2'] )
See section 9.1.2 of the waf book
Alternatively: (and probably the cleaner version)
cfg.check_cc(lib='iconv2', uselib_store="LIBICONV2", mandatory=True)
and then use uselib with
bld.program (...,
libpath = ['/usr/lib', 'subpath'], #this has to be relative to the wscript it appears in! (or the root wscript, can not recall)
...,
uselib = ['LIBICONV2', ...] )
After some consideration, I realised I required further information. The default error information provided by waf seems to be about waf itself, rather than the wscripts or the project.
To rectify this, loggers need to be added. I added loggers to the configure and build functions.
configure logger:
cfg.logger = Logs.make_logger("configure_%s.log" % datetime.date.today().strftime("%Y_%m_%d"), "configure")
build logger:
bld.logger = Logs.make_logger("build_%s.log" % datetime.date.today().strftime("%Y_%m_%d"), "build")
Doing this lead me to that nature of the problems:
['C:\\MinGW64\\bin\\g++.exe', '-Wl,--enable-auto-import', '-Wl,--enable-auto-import', 'test.cpp.1.o', '-o', 'C:\\Users\\Administrator\\Downloads\\libpst-0.6.60\\clean\\build\\temp\\conf_check_5fe204eaa3b3bcb7a9f85e15cebb726e\\testbuild\\testprog.exe', '-Wl,-Bstatic', '-Wl,-Bdynamic', '-LC:\\Users\\Administrator\\Downloads\\libpst-0.6.60\\clean\\libs', '-llibiconv2']
err: c:/mingw64/bin/../lib/gcc/x86_64-w64-mingw32/4.7.2/../../../../x86_64-w64-mingw32/bin/ld.exe: skipping incompatible C:\Users\Administrator\Downloads\libpst-0.6.60\clean\libs/libiconv2.dll when searching for -llibiconv2
The library path has been passed correctly to gcc, but the dll is 32bit whereas the gcc installation is 64bit and so it is incompatible.
top-level wscript:
#! /usr/bin/env python
VERSION = "0.1"
APPNAME = "readpst"
top = "." # The topmost directory of the waf project
out = "build/temp" # The build directory of the waf project
import os
import datetime
from waflib import Build
from waflib import ConfigSet
from waflib import Logs
# Variant memory variables
var_path = out + "/variant.txt" # The variant memory file path
default_variant = "debug" # The default if no variant is stored
stored_variant = ""
def options(opt):
'''
A script hook function that defines addtional switch options for the build.
'''
opt.load("compiler_c compiler_cxx")
def configure(cfg):
'''
A script hook function that configures the build environment.
'''
cfg.logger = Logs.make_logger("configure_%s.log" % datetime.date.today().strftime("%Y_%m_%d"), "configure")
cfg.load("compiler_c compiler_cxx")
cfg.find_program("strip")
cfg.env.DEFINES = \
[
"WAF=1",
"HAVE_CONFIG_H=1"
]
cfg.env.FEATURES = [] # Additional features
cfg.env.append_value("LIBPATH", os.path.join(os.getcwd(), "libs"))
cfg.env.append_value("INCLUDES", os.path.join(os.getcwd(), "inc"))
cfg.env.append_value("INCLUDES", os.path.join(os.getcwd(), "inc", "glib-2.0"))
cfg.env.append_value("INCLUDES", os.path.join(os.getcwd(), "inc", "glib-2.0", "glib"))
cfg.env.append_value("INCLUDES", os.path.join(os.getcwd(), "libs", "regex", "2.7", "regex-2.7-src", "src"))
cfg.env.append_value("INCLUDES", os.path.join(os.getcwd(), "libs", "libiconv", "1.9.2", "libiconv-1.9.2-src", "include"))
cfg.define("VERSION", VERSION)
base_env = cfg.env
# Compiler checks
cfg.check_large_file(mandatory = False)
cfg.check_inline()
cfg.multicheck\
(
{"header_name" : "fcntl.h"},
{"header_name" : "iostream"},
{"header_name" : "list"},
{"header_name" : "set"},
{"header_name" : "string"},
{"header_name" : "vector"},
msg = "Checking for standard headers",
mandatory = True
)
cfg.check(header_name = "glib.h", mandatory = False)
cfg.multicheck\
(
{"header_name" : "gsf\\gsf-infile-stdio.h"},
{"header_name" : "gsf\\gsf-infile.h"},
{"header_name" : "gsf\\gsf-input-stdio.h"},
{"header_name" : "gsf\\gsf-outfile-msole.h"},
{"header_name" : "gsf\\gsf-outfile.h"},
{"header_name" : "gsf\\gsf-output-stdio.h"},
{"header_name" : "gsf\\gsf-utils.h"},
msg = "Checking for gsf headers",
mandatory = False
)
# Checking for headers expected in config.h
cfg.check(header_name = "ctype.h", define_name = "HAVE_CTYPE_H" , mandatory = False)
cfg.check(header_name = "dirent.h", define_name = "HAVE_DIRENT_H" , mandatory = False)
cfg.check(header_name = "errno.h", define_name = "HAVE_ERRNO_H" , mandatory = False)
cfg.check(header_name = "gd.h", define_name = "HAVE_GD_H" , mandatory = False)
cfg.check(header_name = "iconv.h", define_name = "HAVE_ICON" , mandatory = False)
cfg.check(header_name = "limits.h", define_name = "HAVE_LIMITS_H" , mandatory = False)
cfg.check(header_name = "regex.h", define_name = "HAVE_REGEX_H" , mandatory = False)
#cfg.check(header_name = "semaphore.h", define_name = "HAVE_SEMAPHORE_H", mandatory = False)
cfg.check(header_name = "signal.h", define_name = "HAVE_SIGNAL_H" , mandatory = False)
cfg.check(header_name = "string.h", define_name = "HAVE_STRING_H" , mandatory = False)
cfg.check(header_name = "sys/shm.h", define_name = "HAVE_SYS_SHM_H" , mandatory = False)
cfg.check(header_name = "sys/stat.h", define_name = "HAVE_SYS_STAT_H" , mandatory = False)
cfg.check(header_name = "sys/types.h", define_name = "HAVE_SYS_TYPES_H", mandatory = False)
cfg.check(header_name = "sys/wait.h", define_name = "HAVE_SYS_WAIT_H" , mandatory = False)
cfg.check(header_name = "wchar.h", define_name = "HAVE_WCHAR_H" , mandatory = False)
cfg.check(header_name = "define.h", mandatory = False)
cfg.check(header_name = "lzfu.h", mandatory = False)
cfg.check(header_name = "msg.h", mandatory = False)
# Check for the existance and function of specific headers
cfg.check_cxx(lib = "libiconv2", uselib_store = "LIBICONV2", mandatory = False)
# Define the debug build environment
cfg.setenv("debug", env = base_env.derive())
cfg.env.append_value("CFLAGS", "-g")
cfg.define("DEBUG", 1)
cfg.write_config_header("/debug/inc/config.h")
# Define the release build environment
cfg.setenv("release", env = base_env.derive())
cfg.env.append_value("CFLAGS", "-02")
cfg.env.FEATURES = ["strip"]
cfg.define("RELEASE", 1)
cfg.write_config_header("/release/inc/config.h")
def pre(ctx):
'''
A callback for before build task start.
'''
print "Starting %sbuild" % (("%s " % ctx.variant) if(ctx.variant) else "")
if ctx.cmd == "install":
print "Installing"
def post(ctx):
'''
A callback for after build task finish.
'''
global var_path
print "Finished %sbuild" % (("%s " % ctx.variant) if(ctx.variant) else "")
env = ConfigSet.ConfigSet()
env.stored_variant = ctx.variant
env.store(var_path)
def build(bld):
'''
A script hook function that specifies the build behaviour.
'''
if bld.cmd != "clean":
bld.logger = Logs.make_logger("build_%s.log" % datetime.date.today().strftime("%Y_%m_%d"), "build")
bld.add_pre_fun(pre)
bld.add_post_fun(post)
bld.recurse\
(
[
"src"
]
)
def dist(ctx):
'''
A script hook function that specifies the packaging behaviour.
'''
ctx.base_name = "_".join([APPNAME, VERSION])
ctx.algo = "zip"
file_ex_patterns = \
[
out + "/**",
"**/.waf-1*",
"**/*~",
"**/*.pyc",
"**/*.swp",
"**/.lock-w*"
]
file_in_patterns = \
[
"**/wscript*",
"**/*.h",
"**/*.c",
"**/*.cpp",
"**/*.txt",
]
ctx.files = ctx.path.ant_glob(incl = file_in_patterns, excl = file_ex_patterns)
def set_variant():
'''
A function that facilitates dynamic changing of the Context classes variant member.
It retrieves the stored variant, if existant, otherwise the default.
'''
global default_variant
global stored_variant
global var_path
env = ConfigSet.ConfigSet()
try:
env.load(var_path)
except:
stored_variant = default_variant
else:
if(env.stored_variant):
stored_variant = env.stored_variant
print "Resuming %s variant" % stored_variant
else:
stored_variant = default_variant
def get_variant():
'''
A function that facilitates dynamic changing of the Context classes variant member.
It sets the variant, if undefined, and returns.
'''
global stored_variant
if(not stored_variant):
set_variant()
return stored_variant
class release(Build.BuildContext):
'''
A class that provides the release build.
'''
cmd = "release"
variant = "release"
class debug(Build.BuildContext):
'''
A class that provides the debug build.
'''
cmd = "debug"
variant = "debug"
class default_build(Build.BuildContext):
'''
A class that provides the default variant build.
This is set to debug.
'''
variant = "debug"
class default_clean(Build.CleanContext):
'''
A class that provides the stored variant build clean.
'''
#property
def variant(self):
return get_variant()
class default_install(Build.InstallContext):
'''
A class that provides the stored variant build install.
'''
#property
def variant(self):
return get_variant()
class default_uninstall(Build.UninstallContext):
'''
A class that provides the stored variant build uninstall.
'''
#property
def variant(self):
return get_variant()
# Addtional features
from waflib import Task, TaskGen
class strip(Task.Task):
run_str = "${STRIP} ${SRC}"
color = "BLUE"
#TaskGen.feature("strip")
#TaskGen.after("apply_link")
def add_strip_task(self):
try:
link_task = self.link_task
except:
return
tsk = self.create_task("strip", self.link_task.outputs[0])