I am trying to call go lang function from python
when I call my python program I am seeing the following error.
I am referring to the Go to pythn link.
Python Program
from ctypes import *
def call_go_function():
lib = cdll.LoadLibrary("./awesome.so")
lib.Add.argtypes = [c_longlong, c_longlong]
print( lib.Add(12,99))
call_go_function()
Go Program
package main
import "C"
import (
"sync"
)
var count int
var mtx sync.Mutex
//export Add
func Add(a, b int) int { return a + b }
func main() {}
From the Python path it looks like this is a 32-bit Python version. You cannot mix 32-bit and 64-bit user-space code.
So I guess you need to either:
Rebuild your Go code as a 32-bit DLL (see GOARCH=386) or
Install and run a 64-bit Python version.
Maybe it's an environment. Try running a simple program.
from ctypes import *
lib = cdll.LoadLibrary("./func.so")
r=lib.fun(10,20)
print(r)
package main
import "C"
//export fun
func fun(x int,y int) int{
return x+y
}
func main(){}
>go build -o func.so -buildmode=c-shared func.go
>python test.py
30
Related
I have the following function signature which then return a JSON string
func getData(symbol, day, month, year *C.char) *C.char {
combine, _ := json.Marshal(combineRecords)
log.Println(string(combine))
return C.CString(string(combine))
}
The Go code is then being called in Python
import ctypes
from time import sleep
library = ctypes.cdll.LoadLibrary('./deribit.so')
get_data = library.getData
# Make python convert its values to C representation.
# get_data.argtypes = [ctypes.c_char_p, ctypes.c_char_p,ctypes.c_char_p,ctypes.c_char_p]
get_data.restype = ctypes.c_char_p
for i in range(1,100):
j= get_data("BTC".encode("utf-8"), "5".encode("utf-8"), "JAN".encode("utf-8"), "23".encode("utf-8"))
# j= get_data(b"BTC", b"3", b"JAN", b"23")
print('prnting in Python')
# print(j)
sleep(1)
It works fine as expected on the Python side but I fear memory leaks when the function will be called in a loop at the Python end.
How do I deal with memory leaks? should I return bytes instead of a CString and deal bytes at Python end to avoid memory leaks? I did find this link to deal with it but somehow I do not know the size of JSON string returned after marshalling
You are right, you have to free it by using C.free
https://pkg.go.dev/cmd/cgo
// Go string to C string
// The C string is allocated in the C heap using malloc.
// It is the caller's responsibility to arrange for it to be
// freed, such as by calling C.free (be sure to include stdlib.h
// if C.free is needed).
func C.CString(string) *C.char
The python should look like:
import ctypes
from time import sleep
library = ctypes.CDLL('./stackoverflow.so')
get_data = library.GetData
free_me = library.FreeMe
free_me.argtypes = [ctypes.POINTER(ctypes.c_char)]
get_data.restype = ctypes.POINTER(ctypes.c_char)
for i in range(1,100):
j = get_data("", "", "")
print(ctypes.c_char_p.from_buffer(j).value)
free_me(j)
sleep(1)
The go should look like:
package main
/*
#include <stdlib.h>
*/
import "C"
import (
"log"
"unsafe"
)
//export GetData
func GetData(symbol, day, month, year *C.char) *C.char {
combine := "combine"
log.Println(string(combine))
return C.CString(string(combine))
}
//export FreeMe
func FreeMe(data *C.char) {
C.free(unsafe.Pointer(data))
}
func main() {}
And use this command line to generate the shared library:
python3 --version
Python 3.8.10
go version
go version go1.19.2 linux/amd64
go build -o stackoverflow.so -buildmode=c-shared github.com/sjeandeaux/stackoverflow
python3 stackoverflow.py
2023/01/03 13:54:14 combine
b'combine'
...
FROM ubuntu:18.04
RUN apt-get update -y && apt-get install python -y
COPY stackoverflow.so stackoverflow.so
COPY stackoverflow.py stackoverflow.py
CMD ["python", "stackoverflow.py"]
docker build --tag stackoverflow .
docker run -ti stackoverflow
2023/01/03 15:04:24 combine
b'combine'
...
I am creating a vscode extension where I need machine learning tasks to be performed. I have python files that have code that is required in vscode extension. I don't want things to be done using request-response on any python server. What I want is to perform the ML tasks on device (integrated with the vsix).
We have child-process available in js to run basic python file using spawn. It is running fine on both, extension host window and external vscode editor after packaging, with the python code that has basic imports like import sys. But if I try to import some other libraries like numpy, pygments, it is working only on extension host environment, not on other vs window after packaging it. How can I run the typical python code with the vsix?
Below are both the codes that is working fine and not working at all-
TS file (MLOps.ts)-
import { ChildProcessWithoutNullStreams, spawn } from "child_process";
import { join } from "path";
import * as vscode from 'vscode'
export async function pythonOps(): Promise<string> {
var result = "testt"
var promise = await new Promise((resolve, reject) => {
var p = __dirname.split('\\')
p.pop()
var path = p.join('\\')
var pyPath = join(path, 'src', 'py_operations.py')
var result = "blank result"
var arg1 = "arg one"
var arg2 = "arg two"
var py_process = spawn('python', [pyPath, arg1, arg2])
py_process.stdout.on('data', (data: any) => {
vscode.window.showInformationMessage(data.toString())
result = data.toString()
})
})
}
Working Python code (py_operations.py)- This code is working on both, on extension host window and after packaging the extension and installing the vsix on other system.
import sys
print("Some text with: ",sys.argv[0], sys.argv[1], sys.argv[2])
sys.stdout.flush()
Not working Python code- This code is working only on extension host window, and not working after packaging this and isntalling on other system.
import sys
from pygments.lexers.javascript import TypeScriptLexer
lexer = TypeScriptLexer()
src = "alert('text here')"
lexer_tokens = lexer.get_tokens(src)
l = []
for t in lexer_tokens:
l.append(t[1])
print("list: ",l)
sys.stdout.flush()
How can I run the second python code with packaged vsix?
I am using Python3.6. I have created a C++ extension using (pybind11)[https://github.com/pybind/pybind11]. I copied the compiled *.pyd file along with the dependent dll to the site packages. But when I try to load any functions from the external DLL, python complains that the function is not present. If I want to access the function, I need write
sys.path.append(r'C:\Users\test\AppData\Local\Programs\Python\Python36\Lib\site-packages\CppProject')
or I need to add the same path to the PYTHONPATH environment variable.
Why Python is not able to load the function even though it is present in the same path as the pyd? I don't want to append the sys path everytime I need to use the module or use the environment variable? Is there any way to avoid this? Is there any way to add this path to the sys automatically whenever the user import the module?
Example:
CppExport.dll
#ifdef CPPEXPORT_EXPORTS
#define CPPEXPORT_API __declspec(dllexport)
#else
#define CPPEXPORT_API __declspec(dllimport)
#endif
extern "C" CPPEXPORT_API double sin_impl(double x);
const double e = 2.7182818284590452353602874713527;
double sin_impl(double x){
return (1 - pow(e, (-2 * x))) / (2 * pow(e, -x));
}
CppProject.pyd
PYBIND11_MODULE(CppProject, m) {
m.def("sin_impl", &sin_impl, R"pbdoc(
Compute a hyperbolic tangent of a single argument expressed in radians.
)pbdoc");
#ifdef VERSION_INFO
m.attr("__version__") = VERSION_INFO;
#else
m.attr("__version__") = "dev";
#endif
}
Setup.py
from setuptools import setup
import distutils
import sys
from setuptools.dist import Distribution
from distutils.sysconfig import get_python_lib
relative_site_packages = get_python_lib().split(sys.prefix+os.sep)[1]
date_files_relative_path = os.path.join(relative_site_packages, "CppProject")
class BinaryDistribution(Distribution):
"""Distribution which always forces a binary package with platform name"""
def has_ext_modules(foo):
return True
setup(
name='CppProject',
version='1.0',
description='CppProject Library',
packages=['CppProject'],
package_data={
'CppProject': ['CppProject.pyd'],
},
data_files = [(date_files_relative_path, ["CppExport.dll"])],
distclass=BinaryDistribution
)
In Python:
from CppProject import sin_impl
Error:
ImportError: cannot import name 'sin_impl'
Full Code is present in Github
Sorry for the previous reply here is some better advises :
You want to distribute your library, to do so you need to create setup.py and init.py. Once this done you will be able to install your package using python setup.py install.
For me the setup.py look like :
README_rst = ''
from distutils.core import setup
with open('README.rst', mode='r', encoding='utf-8') as fd:
README_rst = fd.read()
setup(
name='MyStack',
version='0.0.1',
description='Cool short description',
author='Author',
author_email='author#mail.com',
url='repo.com',
packages=['Pkg'],
long_description=README_rst,
include_package_data=True,
classifiers=[
# Trove classifiers
# The full list is here: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
]
)
In the init.py you will have to find your library and import it. Here is an example how Qt does :
def find_qt():
import os
path = os.environ['PATH']
dll_dir = os.path.dirname(__file__) + '\\Qt\\bin'
if os.path.isfile(dll_dir + '\\Qt5Core.dll'):
path = dll_dir + ';' + path
os.environ['PATH'] = path
else:
for dll_dir in path.split(';'):
if os.path.isfile(dll_dir + '\\Qt5Core.dll'):
break
else:
raise ImportError("unable to find Qt5Core.dll on PATH")
try:
os.add_dll_directory(dll_dir)
except AttributeError:
pass
find_qt()
del find_qt
Hope this help
The fact that your code works when you explicitly add the directory to sys.path is the key to understand what's happening.
Since site-packages is one of the locations searched by the interpreter when importing modules, this statement:
from CppProject import sin_impl
is actually searching for a module named sin_impl inside the CppProject folder.
Instead you should do:
from CppProject.CppProject import sin_impl
which points to the actual module of the same name.
This actually doesn't require the presence of __init__.py inside CppProject to qualify it as a Python package, since Python 3.3+ implements implicit namespace packages.
However, when you are building a complex program with many dependencies the package constructor enables you to add some kind of initialization to be performed before any regular module is executed.
I'm trying to make a setup.py for cgal-bindings. To install this, the user needs to have at least a certain version of CGAL. In addition, CGAL has a few optional targets that should be built if the user has some libraries (like Eigen3). Is there a cross-platform way in Python to check for this?
I can use find_library in ctypes.util to check if the library exists, but I don't see any easy way to get the version. <-- This doesn't actually work all the time, some libraries are header-only like eigen3, which is a C++ template library.
Using the install_requires argument of setup() only works for Python libraries and CGAL is a C/C++ library.
Whether a particular extension module should be compiled depending on the availability of some library version, can be accomplished by dynamically generating the ext_modules argument of setup() in setup.py.
For the _yaml.so module of ruamel.yaml, that only should be compiled when the libyaml development libraries have been installed on the system I do:
import os
from textwrap import dedent
def check_extensions():
"""check if the C module can be build by trying to compile a small
program against the libyaml development library"""
import tempfile
import shutil
import distutils.sysconfig
import distutils.ccompiler
from distutils.errors import CompileError, LinkError
libraries = ['yaml']
# write a temporary .c file to compile
c_code = dedent("""
#include <yaml.h>
int main(int argc, char* argv[])
{
yaml_parser_t parser;
parser = parser; /* prevent warning */
return 0;
}
""")
tmp_dir = tempfile.mkdtemp(prefix = 'tmp_ruamel_yaml_')
bin_file_name = os.path.join(tmp_dir, 'test_yaml')
file_name = bin_file_name + '.c'
with open(file_name, 'w') as fp:
fp.write(c_code)
# and try to compile it
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.link_executable(
compiler.compile([file_name]),
bin_file_name,
libraries=libraries,
)
except CompileError:
print('libyaml compile error')
ret_val = None
except LinkError:
print('libyaml link error')
ret_val = None
else:
ret_val = [
Extension(
'_yaml',
sources=['ext/_yaml.c'],
libraries=libraries,
),
]
shutil.rmtree(tmp_dir)
return ret_val
This way you require no extra files in the distribution. Even if you cannot fail to compile based on the version number at compile time, you should be
able to run the resulting program from the temporary directory and check the exit value and/or output.
I have written some code in Python that uses some libraries that are not in Go. I have a web server that I have written in Go and I would like to be able to call a Python program from my Go program and then use the output of the Python program as input in my Go program. Is there anyway to do this?
It's actually relatively easy. All you need to do is use the os/exec library. Here is an example below.
Go Code:
package main
import (
"fmt"
"os/exec"
)
func main() {
cmd := exec.Command("python", "python.py", "foo", "bar")
fmt.Println(cmd.Args)
out, err := cmd.CombinedOutput()
if err != nil { fmt.Println(err); }
fmt.Println(string(out))
}
Python Code:
import sys
for i in range(len(sys.argv)):
print str(i) + ": " + sys.argv[i]
Output From Go Code:
[python python.py foo bar]
0: python.py
1: foo
2: bar