I am trying to run the code below which requires numpy. I installed it via pip install numpy. However, numpy gets highlighted in the editor with the note unresolved import 'numpy'. When I try to run it I get the error No module named 'numpy'. After I got the error the first time I uninstalled numpy and re-installed it but the problem persists.
I am using Python 3.7.8 and NumPy 1.20.2.
The code I am trying to run:
#!/usr/bin/env python3
#
# Copyright (c) 2018 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Super Mario Bros level extractor
This script requires py65emu, numpy, and PIL to run. Run with no arguments to see usage.
See http://matthewearl.github.io/2018/06/28/smb-level-extractor/ for a description of how this was written.
To run you'll need to compile https://gist.github.com/1wErt3r/4048722 with x816 to obtain the PRG-ROM and symbol files.
The CHR-ROM should be extracted from a Super Mario Bros ROM, or can be read from an INES ROM file. See
https://wiki.nesdev.com/w/index.php/INES for information on the INES format. In addition you'll need a NES palette
saved in "data/ntscpalette.pal", generated using the tool here: https://bisqwit.iki.fi/utils/nespalette.php
"""
import collections
import pathlib
import re
import numpy as np
from py65emu.cpu import CPU
from py65emu.mmu import MMU
_WORKING_RAM_SIZE = 0x800
Symbol = collections.namedtuple('Symbol', ('name', 'address', 'line_num'))
class SymbolFile:
_LINE_RE = r"(?P<name>[A-Z0-9_]+) *= \$(?P<address>[A-F0-9]*) *; <> \d+, statement #(?P<line_num>\d+)"
def __init__(self, fname):
with open(fname) as f:
self._symbols = [self._parse_symbol(line) for line in f.readlines()]
self._symbols = list(sorted(self._symbols, key=lambda s: s.address))
self._name_to_addr = {s.name: s.address for s in self._symbols}
self._addr_to_name = {s.address: s.name for s in self._symbols}
def _parse_symbol(self, line):
m = re.match(self._LINE_RE, line)
return Symbol(m.group('name'), int(m.group('address'), 16), int(m.group('line_num')))
def __getitem__(self, name):
return self._name_to_addr[name]
def _read_ppu_data(mmu, addr):
while True:
ppu_high_addr = mmu.read(addr)
if ppu_high_addr == 0x0:
break
ppu_low_addr = mmu.read(addr + 1)
assert ppu_high_addr == 0x3f and ppu_low_addr == 0x00
flags_and_length = mmu.read(addr + 2)
assert (flags_and_length & (1<<7)) == 0, "32-byte increment flag set"
assert (flags_and_length & (1<<6)) == 0, "Repeating flag set"
length = flags_and_length & 0b111111
addr += 3
for i in range(length):
yield mmu.read(addr)
addr += 1
def _load_palette(mmu, sym_file, nes_palette):
area_type = mmu.read(sym_file['AREATYPE'])
idx = mmu.read(sym_file['AREAPALETTE'] + area_type)
high_addr = mmu.read(sym_file['VRAM_ADDRTABLE_HIGH'] + idx)
low_addr = mmu.read(sym_file['VRAM_ADDRTABLE_LOW'] + idx)
palette_data = list(_read_ppu_data(mmu, high_addr << 8 | low_addr))
assert len(palette_data) == 32
a = np.array(palette_data[:16]).reshape(4, 4)
a[:, 0] = mmu.read(sym_file['BACKGROUNDCOLORS'] + area_type)
return nes_palette[a]
def _execute_subroutine(cpu, addr):
s_before = cpu.r.s
cpu.JSR(addr)
while cpu.r.s != s_before:
cpu.step()
def _get_metatile_buffer(mmu, sym_file):
return [mmu.read(sym_file['METATILEBUFFER'] + i) for i in range(13)]
def load_tile(chr_rom, idx):
chr_rom_addr = 0x1000 + 16 * idx
d = chr_rom[chr_rom_addr:chr_rom_addr + 16]
a = np.array([[b & (128 >> i) != 0 for i in range(8)] for b in d]).reshape(2, 8, 8)
return a[0] + 2 * a[1]
def _render_metatile(mmu, chr_rom, mtile, palette):
palette_num = mtile >> 6
palette_idx = mtile & 0b111111
high_addr = mmu.read(sym_file['METATILEGRAPHICS_HIGH'] + palette_num)
low_addr = mmu.read(sym_file['METATILEGRAPHICS_LOW'] + palette_num)
addr = (high_addr << 8 | low_addr) + palette_idx * 4
t = np.vstack([np.hstack([load_tile(chr_rom, mmu.read(addr + c * 2 + r)) for c in range(2)])
for r in range(2)])
return palette[palette_num][t]
def load_level(stage, prg_rom, chr_rom, sym_file, nes_palette):
# Initialize the MMU / CPU
mmu = MMU([
(0x0, _WORKING_RAM_SIZE, False, []),
(0x8000, 0x10000, True, list(prg_rom))
])
cpu = CPU(mmu, 0x0)
# Execute some preamble subroutines which set up variables used by the main subroutines.
if isinstance(stage, tuple):
world_num, area_num = stage
mmu.write(sym_file['WORLDNUMBER'], world_num - 1)
mmu.write(sym_file['AREANUMBER'], area_num - 1)
_execute_subroutine(cpu, sym_file['LOADAREAPOINTER'])
else:
area_pointer = stage
mmu.write(sym_file['AREAPOINTER'], area_pointer)
mmu.write(sym_file['HALFWAYPAGE'], 0)
mmu.write(sym_file['ALTENTRANCECONTROL'], 0)
mmu.write(sym_file['PRIMARYHARDMODE'], 0)
mmu.write(sym_file['OPERMODE_TASK'], 0)
_execute_subroutine(cpu, sym_file['INITIALIZEAREA'])
# Extract the palette.
palette = _load_palette(mmu, sym_file, nes_palette)
# Repeatedly extract meta-tile columns, until the level starts repeating.
cols = []
for column_pos in range(1000):
_execute_subroutine(cpu, sym_file['AREAPARSERCORE'])
cols.append(_get_metatile_buffer(mmu, sym_file))
_execute_subroutine(cpu, sym_file['INCREMENTCOLUMNPOS'])
if len(cols) >= 96 and cols[-48:] == cols[-96:-48]:
cols = cols[:-80]
break
level = np.array(cols).T
# Render a dict of metatiles.
mtiles = {mtile: _render_metatile(mmu, chr_rom, mtile, palette)
for mtile in set(level.flatten())}
return level, mtiles
def render_level(level, mtiles):
return np.vstack([np.hstack([mtiles[mtile] for mtile in row]) for row in level])
if __name__ == "__main__":
import sys
import PIL.Image
world_map = {
'{}-{}'.format(world_num, area_num): (world_num, area_num)
for world_num in range(1, 9)
for area_num in range(1, 5)
}
world_map.update({
'bonus': 0xc2,
'cloud1': 0x2b,
'cloud2': 0x34,
'water1': 0x00,
'water2': 0x02,
'warp': 0x2f,
})
if len(sys.argv) < 6:
print("Usage: {} <world> <prg-rom> <sym-file> <chr-rom> <out-file>".format(sys.argv[0]), file=sys.stderr)
print(" <world> is one of {}".format(', '.join(sorted(world_map.keys()))), file=sys.stderr)
print(" <prg-rom> is the binary output from x816")
print(" <sym-file> is the sym file output from x816")
print(" <chr-rom> is a CHR-ROM dump")
print(" <out-file> is the output image name")
sys.exit(-1)
stage = world_map[sys.argv[1]]
with open(sys.argv[2], 'rb') as f:
prg_rom = f.read()
sym_file = SymbolFile(sys.argv[3])
with open(sys.argv[4], 'rb') as f:
chr_rom = f.read()
out_fname = sys.argv[5]
with (pathlib.Path(sys.argv[0]).parent / "data" / "ntscpalette.pal").open("rb") as f:
nes_palette = np.array(list(f.read())).reshape(64, 3)
level, mtiles = load_level(stage, prg_rom, chr_rom, sym_file, nes_palette)
a = render_level(level, mtiles).astype(np.uint8)
im = PIL.Image.fromarray(a)
im.save(out_fname)
How did you create your workspace in Visual Studio? Do you have Python development tools installed with Visual Studio? Did you create a "Python application" as your project template?
If so then your project should have a virtual environment created, which you can see in the solution directory. If that is the case do:
Go to "Solution Explorer" Tab >
Find "Python Environments"
Find your active env. For me there was only one called "Python 3.9 (global default)"
Right click and select "Manage Python Packages..."
There it should list all the packages installed and versions. If numpy is not there, just type "numpy" in the search box and click the suggests install option: "run command: pip install numpy".
Make sure you have installed NumPy in the same python environment that you use to run the program. (Check the PATH variable if it includes the path to the correct python environment)
Related
In Python, I am trying to use the J1939 filtering as mentionned in the linux kernel docs: https://www.kernel.org/doc/html/latest/networking/j1939.html
The following code fails at the setsockopt() line (setting up filters):
import socket
import struct
def pack_J1939_filters(can_filters):
can_filter_fmt = "=" + "2Q2B2I" * len(can_filters)
filter_data = []
for can_filter in can_filters:
name = can_filter['name']
name_mask = can_filter['name_mask']
addr = can_filter['addr']
addr_mask = can_filter['addr_mask']
pgn = can_filter['pgn']
pgn_mask = can_filter['pgn_mask']
filter_data.append(name)
filter_data.append(name_mask)
filter_data.append(addr)
filter_data.append(addr_mask)
filter_data.append(pgn)
filter_data.append(pgn_mask)
return struct.pack(can_filter_fmt, *filter_data)
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
interface = "vcan0"
src_name = socket.J1939_NO_NAME
src_pgn = socket.J1939_NO_PGN
src_addr = 0x81
src_sck_addr = (interface, src_name, src_pgn, src_addr)
s.bind(src_sck_addr)
filters = [{"name": 0, "name_mask":0, "addr":0, "addr_mask":0, "pgn": 0, "pgn_mask": 0}]
packed_filters = pack_J1939_filters(filters)
# socket.SOL_CAN_J1939 does not seem to exist
SOL_CAN_BASE = 100
CAN_J1939 = 7
SOL_CAN_J1939 = SOL_CAN_BASE + CAN_J1939
s.setsockopt(SOL_CAN_J1939, socket.SO_J1939_FILTER , packed_filters)
s.recvfrom(128)
s.close()
First, the kernel documentation mentions to use SOL_CAN_J1939 as the first argument. However socket.SOL_CAN_J1939 does not exist in the socket package. So looking at the code at this location I was able to understand that this int value should be 107: http://socket-can.996257.n3.nabble.com/RFC-v3-0-6-CAN-add-SAE-J1939-protocol-td7571.html
As for the setsockopt() third argument, I packed the filters to match the j1939_filter structure (26 bytes as described in the code from the previous link). This is similar to what is done in can.interfaces.socketcan.utils for raw CAN.
What am I doing wrong to cause setsockopt() to fail?
The first issue was with the struct.pack format (can_filter_fmt) being wrong. I first assumed that the kernel j1939_filter structure size was the sum of the members. This is wrong since the compiler adds padding. This can be added to the struct.pack format as x such as 2Q2I2B6x. Please see Why isn't sizeof for a struct equal to the sum of sizeof of each member?
The second issue was that can_filter_fmt is not packed as 2Q2B2I but as 2Q2I2B6x (the addr member is in the middle).
As for SOL_CAN_J1939 I was correct and needs to be created in file because it is not yet in the package.
The final code is the following:
#!/usr/bin/env python3
import socket
import struct
def pack_J1939_filters(can_filters=None):
if can_filters is None:
# Pass all messages
can_filters = [{}]
can_filter_fmt = "=" + "2Q2I2B6x" * len(can_filters)
filter_data = []
for can_filter in can_filters:
if 'name' in can_filter:
name = can_filter['name']
else:
name = 0
if 'name_mask' in can_filter:
name_mask = can_filter['name_mask']
else:
name_mask = 0
if 'pgn' in can_filter:
pgn = can_filter['pgn']
else:
pgn = 0
if 'pgn_mask' in can_filter:
pgn_mask = can_filter['pgn_mask']
else:
pgn_mask = 0
if 'addr' in can_filter:
addr = can_filter['addr']
else:
addr = 0
if 'addr_mask' in can_filter:
addr_mask = can_filter['addr_mask']
else:
addr_mask = 0
filter_data.append(name)
filter_data.append(name_mask)
filter_data.append(pgn)
filter_data.append(pgn_mask)
filter_data.append(addr)
filter_data.append(addr_mask)
return struct.pack(can_filter_fmt, *filter_data)
def print_msg(data, sck_addr):
print(f"SA:{hex(sck_addr[3])} PGN:{hex(sck_addr[2])}")
for j in range(len(data)):
if j % 8 == 0 and j != 0:
print()
if j % 8 == 0:
print(f"bytes {j} to {j+7}: ", end="")
print(f"{hex(data[j])} ", end="")
print()
print()
def main():
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
# allows to receive broadcast messages
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
interface = "vcan0"
src_name = socket.J1939_NO_NAME
src_pgn = socket.J1939_NO_PGN # always no PGN for source, unless filtering is needed
src_addr = 0x81 # recvfrom() will not return destination specific messages for other addresses
src_sck_addr = (interface, src_name, src_pgn, src_addr)
s.bind(src_sck_addr)
packed_filters = pack_J1939_filters()
SOL_CAN_BASE = 100
CAN_J1939 = 7
SOL_CAN_J1939 = SOL_CAN_BASE + CAN_J1939
s.setsockopt(SOL_CAN_J1939, socket.SO_J1939_FILTER , packed_filters)
(recv_data, recv_sck_addr) = s.recvfrom(128)
print_msg(recv_data, recv_sck_addr)
s.close()
if __name__ == "__main__":
main()
Thank you.
For J1939 to work with SocketCAN you need two things:
kernel 5.4+
can-j1939 kernel module enabled
Testing for can-1939:
If you install can-utils and after sudo modprobe can-j1939 all you get is fatal error, or if you start testj1939 from can-utils and you get error that protocol is not supported, then it means that can-j1939 was not enabled in your kernel and you need to compile it manually.
Here are my instructions for enabling can-j1939 in Debian 10 kernel:
https://github.com/linux-can/can-utils/blob/master/can-j1939-install-kernel-module.md
I have a VBA script in Microsoft Access. The VBA script is part of a large project with multiple people, and so it is not possible to leave the VBA environment.
In a section of my script, I need to do complicated linear algebra on a table quickly. So, I move the VBA tables written as recordsets) into Python to do linear algebra, and back into VBA. The matrices in python are represented as numpy arrays.
Some of the linear algebra is proprietary and so we are compiling the proprietary scripts with pyinstaller.
The details of the process are as follows:
The VBA script creates a csv file representing the table input.csv.
The VBA script runs the python script through the command line
The python script loads the csv file input.csv as a numpy matrix, does linear algebra on it, and creates an output csv file output.csv.
VBA waits until python is done, then loads output.csv.
VBA deletes the no-longer-needed input.csv file and output.csv file.
This process is inefficient.
Is there a way to load VBA matrices into Python (and back) without the csv clutter? Do these methods work with compiled python code through pyinstaller?
I have found the following examples on stackoverflow that are relevant. However, they do not address my problem specifically.
Return result from Python to Vba
How to pass Variable from Python to VBA Sub
Solution 1
Either retrieve the COM running instance of Access and get/set the data directly with the python script via the COM API:
VBA:
Private Cache
Public Function GetData()
GetData = Cache
Cache = Empty
End Function
Public Sub SetData(data)
Cache = data
End Sub
Sub Usage()
Dim wshell
Set wshell = VBA.CreateObject("WScript.Shell")
' Make the data available via GetData()'
Cache = Array(4, 6, 8, 9)
' Launch the python script compiled with pylauncher '
Debug.Assert 0 = wshell.Run("C:\dev\myapp.exe", 0, True)
' Handle the returned data '
Debug.Assert Cache(3) = 2
End Sub
Python (myapp.exe):
import win32com.client
if __name__ == "__main__":
# get the running instance of Access
app = win32com.client.GetObject(Class="Access.Application")
# get some data from Access
data = app.run("GetData")
# return some data to Access
app.run("SetData", [1, 2, 3, 4])
Solution 2
Or create a COM server to expose some functions to Access :
VBA:
Sub Usage()
Dim Py As Object
Set Py = CreateObject("Python.MyModule")
Dim result
result = Py.MyFunction(Array(5, 6, 7, 8))
End Sub
Python (myserver.exe or myserver.py):
import sys, os, win32api, win32com.server.localserver, win32com.server.register
class MyModule(object):
_reg_clsid_ = "{5B4A4174-EE23-4B70-99F9-E57958CFE3DF}"
_reg_desc_ = "My Python COM Server"
_reg_progid_ = "Python.MyModule"
_public_methods_ = ['MyFunction']
def MyFunction(self, data) :
return [(1,2), (3, 4)]
def register(*classes) :
regsz = lambda key, val: win32api.RegSetValue(-2147483647, key, 1, val)
isPy = not sys.argv[0].lower().endswith('.exe')
python_path = isPy and win32com.server.register._find_localserver_exe(1)
server_path = isPy and win32com.server.register._find_localserver_module()
for cls in classes :
if isPy :
file_path = sys.modules[cls.__module__].__file__
class_name = '%s.%s' % (os.path.splitext(os.path.basename(file_path))[0], cls.__name__)
command = '"%s" "%s" %s' % (python_path, server_path, cls._reg_clsid_)
else :
file_path = sys.argv[0]
class_name = '%s.%s' % (cls.__module__, cls.__name__)
command = '"%s" %s' % (file_path, cls._reg_clsid_)
regsz("SOFTWARE\\Classes\\" + cls._reg_progid_ + '\\CLSID', cls._reg_clsid_)
regsz("SOFTWARE\\Classes\\AppID\\" + cls._reg_clsid_, cls._reg_progid_)
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_, cls._reg_desc_)
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_ + '\\LocalServer32', command)
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_ + '\\ProgID', cls._reg_progid_)
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_ + '\\PythonCOM', class_name)
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_ + '\\PythonCOMPath', os.path.dirname(file_path))
regsz("SOFTWARE\\Classes\\CLSID\\" + cls._reg_clsid_ + '\\Debugging', "0")
print('Registered ' + cls._reg_progid_)
if __name__ == "__main__":
if len(sys.argv) > 1 :
win32com.server.localserver.serve(set([v for v in sys.argv if v[0] == '{']))
else :
register(MyModule)
Note that you'll have to run the script once without any argument to register the class and to make it available to VBA.CreateObject.
Both solutions work with pylauncher and the array received in python can be converted with numpy.array(data).
Dependency :
https://pypi.python.org/pypi/pywin32
You can try loading your record set into an array, dim'ed as Double
Dim arr(1 to 100, 1 to 100) as Double
by looping, then pass the pointer to the first element ptr = VarPtr(arr(1, 1)) to Python, where
arr = numpy.ctypeslib.as_array(ptr, (100 * 100,)) ?
But VBA will still own the array memory
There is a very simple way of doing this with xlwings. See xlwings.org and make sure to follow the instructions to enable macro settings, tick xlwings in VBA references, etc. etc.
The code would then look as simple as the following (a slightly silly block of code that just returns the same dataframe back, but you get the picture):
import xlwings as xw
import numpy as np
import pandas as pd
# the #xw.decorator is to tell xlwings to create an Excel VBA wrapper for this function.
# It has no effect on how the function behaves in python
#xw.func
#xw.arg('pensioner_data', pd.DataFrame, index=False, header=True)
#xw.ret(expand='table', index=False)
def pensioner_CF(pensioner_data, mortality_table = "PA(90)", male_age_adj = 0, male_improv = 0, female_age_adj = 0, female_improv = 0,
years_improv = 0, arrears_advance = 0, discount_rate = 0, qxy_tables=0):
pensioner_data = pensioner_data.replace(np.nan, '', regex=True)
cashflows_df = pd.DataFrame()
return cashflows_df
I'd be interested to hear if this answers the question. It certainly made my VBA / python experience a lot easier.
This command works fine on my personal computer but keeps giving me this error on my work PC. What could be going on? I can run the Char_Limits.py script directly in Powershell without a problem.
error: compiling 'C:\ProgramData\Anaconda2\lib\site-packages\jinja2\asyncsupport.py' failed
SyntaxError: invalid syntax (asyncsupport.py, line 22)
My setup.py file looks like:
from distutils.core import setup
import py2exe
setup (console=['Char_Limits.py'])
My file looks like:
import xlwings as xw
from win32com.client import constants as c
import win32api
"""
Important Notes: Header row has to be the first row. No columns without a header row. If you need/want a blank column, just place a random placeholder
header value in the first row.
Product_Article_Number column is used to determine the number of rows. It must be populated for every row.
"""
#functions, hooray!
def setRange(columnDict, columnHeader):
column = columnDict[columnHeader]
rngForFormatting = xw.Range((2,column), (bttm, column))
cellReference = xw.Range((2,column)).get_address(False, False)
return rngForFormatting, cellReference
def msg_box(message):
win32api.MessageBox(wb.app.hwnd, message)
#Character limits for fields in Hybris
CharLimits_Fields = {"alerts":500, "certifications":255, "productTitle":300,
"teaserText":450 , "includes":1000, "compliance":255, "disclaimers":9000,
"ecommDescription100":100, "ecommDescription240":240,
"internalKeyword":1000, "metaKeywords":1000, "metaDescription":1000,
"productFeatures":7500, "productLongDescription":1500,"requires":500,
"servicePlan":255, "skuDifferentiatorText":255, "storage":255,
"techDetailsAndRefs":12000, "warranty":1000}
# Fields for which a break tag is problematic.
BreakTagNotAllowed = ["ecommDescription100", "ecommDescription240", "productTitle",
"skuDifferentiatorText"]
app = xw.apps.active
wb = xw.Book(r'C:\Users\XXXX\Documents\Import File.xlsx')
#identifies the blanket range of interest
firstCell = xw.Range('A1')
lstcolumn = firstCell.end("right").column
headers_Row = xw.Range((1,1), (1, lstcolumn)).value
columnDict = {}
for column in range(1, len(headers_Row) + 1):
header = headers_Row[column - 1]
columnDict[header] = column
try:
articleColumn = columnDict["Product_Article_Number"]
except:
articleColumn = columnDict["Family_Article_Number"]
firstCell = xw.Range((1,articleColumn))
bttm = firstCell.end("down").row
wholeRange = xw.Range((1,1),(bttm, lstcolumn))
wholeRangeVal = wholeRange.value
#Sets the font and deletes previous conditional formatting
wholeRange.api.Font.Name = "Arial Unicode MS"
wholeRange.api.FormatConditions.Delete()
for columnHeader in columnDict.keys():
if columnHeader in CharLimits_Fields.keys():
rng, cellRef = setRange(columnDict, columnHeader)
rng.api.FormatConditions.Add(2,3, "=len(" + cellRef + ") >=" + str(CharLimits_Fields[columnHeader]))
rng.api.FormatConditions(1).Interior.ColorIndex = 3
if columnHeader in BreakTagNotAllowed:
rng, cellRef = setRange(columnDict, columnHeader)
rng.api.FormatConditions.Add(2,3, '=OR(ISNUMBER(SEARCH("<br>",' + cellRef + ')), ISNUMBER(SEARCH("<br/>",' + cellRef + ")))")
rng.api.FormatConditions(2).Interior.ColorIndex = 6
searchResults = wholeRange.api.Find("~\"")
if searchResults is not None:
msg_box("There's a double quote in this spreadsheet")
else:
msg_box("There are no double quotes in this spreadsheet")
# app.api.FindFormat.Clear
# app.api.FindFormat.Interior.ColorIndex = 3
# foundRed = wholeRange.api.Find("*", SearchFormat=True)
# if foundRed is None:
# msg_box("There are no values exceeding character limits")
# else:
# msg_box("There are values exceeding character limits")
# app.api.FindFormat.Clear
# app.api.FindFormat.Interior.ColorIndex = 6
# foundYellow = wholeRange.api.Find("*", SearchFormat=True)
# if foundYellow is None:
# msg_box("There are no break tags in this spreadsheet")
# else:
# msg_box("There are break tags in this spreadsheet")
Note:
If you are reading this, I would try Santiago's solution first.
The issue:
Looking at what is likely at line 22 on the github package:
async def concat_async(async_gen):
This is making use of the async keyword which was added in python 3.5, however py2exe only supports up to python 3.4. Now jinja looks to be extending the python language in some way (perhaps during runtime?) to support this async keyword in earlier versions of python. py2exe cannot account for this language extension.
The Fix:
async support was added in jinja2 version 2.9 according to the documentation. So I tried installing an earlier version of jinja (version 2.8) which I downloaded here.
I made a backup of my current jinja installation by moving the contents of %PYTHONHOME%\Lib\site-packages\jinja2 to some other place.
extract the previously downloaded tar.gz file and install the package via pip:
cd .\Downloads\dist\Jinja2-2.8 # or wherever you extracted jinja2.8
python setup.py install
As a side note, I also had to increase my recursion limit because py2exe was reaching the default limit.
from distutils.core import setup
import py2exe
import sys
sys.setrecursionlimit(5000)
setup (console=['test.py'])
Warning:
If whatever it is you are using relies on the latest version of jinja2, then this might fail or have unintended side effects when actually running your code. I was compiling a very simple script.
I had the same trouble coding in python3.7. I fixed that adding the excludes part to my py2exe file:
a = Analysis(['pyinst_test.py'],
#...
excludes=['jinja2.asyncsupport','jinja2.asyncfilters'],
#...)
I took that from: https://github.com/pyinstaller/pyinstaller/issues/2393
I know only the very basics of python. I have this project for my INFORMATION STORAGE AND MANAGEMENT subject. I have to give an explanation the following code.
I searched every command used in this script but could not find most of them. The code can be found here:
import glob
import json
import os
import re
import string
import sys
from oslo.config import cfg
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import utils
CONF = cfg.CONF
def usage():
print("""
Usage:
python %s --config-file /etc/nova/nova.conf
Note: This script intends to clean up the iSCSI multipath faulty devices
hosted by VNX Block Storage.""" % sys.argv[0])
class FaultyDevicesCleaner(object):
def __init__(self):
# Get host name of Nova computer node.
self.host_name = self._get_host_name()
def _get_host_name(self):
(out, err) = utils.execute('hostname')
return out
def _get_ncpu_emc_target_info_list(self):
target_info_list = []
# Find the targets used by VM on the compute node
bdms = db_api.model_query(context.get_admin_context(),
models.BlockDeviceMapping,
session = db_api.get_session())
bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None)
bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by(
host=string.strip(self.host_name))
for bdm in bdms:
conn_info = json.loads(bdm.connection_info)
if 'data' in conn_info:
if 'target_iqns' in conn_info['data']:
target_iqns = conn_info['data']['target_iqns']
target_luns = conn_info['data']['target_luns']
elif 'target_iqn' in conn_info['data']:
target_iqns = [conn_info['data']['target_iqn']]
target_luns = [conn_info['data']['target_lun']]
else:
target_iqns = []
target_luns = []
for target_iqn, target_lun in zip(target_iqns, target_luns):
if 'com.emc' in target_iqn:
target_info = {
'target_iqn': target_iqn,
'target_lun': target_lun,
}
target_info_list.append(target_info)
return target_info_list
def _get_ncpu_emc_target_info_set(self):
target_info_set = set()
for target_info in self._get_ncpu_emc_target_info_list():
target_iqn = target_info['target_iqn']
target_lun = target_info['target_lun']
target_info_key = "%s-%s" % (target_iqn.rsplit('.', 1)[0],
target_lun)
# target_iqn=iqn.1992-04.com.emc:cx.fnm00130200235.a7
# target_lun=203
# target_info_key=iqn.1992-04.com.emc:cx.fnm00130200235-203
target_info_set.add(target_info_key)
return target_info_set
def _get_target_info_key(self, path):
temp_tuple = path.split('-lun-', 1)
target_lun = temp_tuple[1]
target_iqn = temp_tuple[0].split('-iscsi-')[1]
target_info_key = "%s-%s" % (target_iqn.rsplit('.', 1)[0], target_lun)
# path=/dev/disk/by-path/ip-192.168.3.52:3260-iscsi-iqn.1992-
# 04.com.emc:cx.fnm00130200235.a7-lun-203
# target_info_key=iqn.1992-04.com.emc:cx.fnm00130200235-203
return target_info_key
def _get_non_ncpu_target_info_map(self):
# Group the paths by target_info_key
ncpu_target_info_set = self._get_ncpu_emc_target_info_set()
device_paths = self._get_emc_device_paths()
target_info_map = {}
for path in device_paths:
target_info_key = self._get_target_info_key(path)
if target_info_key in ncpu_target_info_set:
continue
if target_info_key not in target_info_map:
target_info_map[target_info_key] = []
target_info_map[target_info_key].append(path)
return target_info_map
def _all_related_paths_faulty(self, paths):
for path in paths:
real_path = os.path.realpath(path)
out, err = self._run_multipath(['-ll', real_path],
run_as_root=True,
check_exit_code=False)
if 'active ready' in out:
# At least one path is still working
return False
return True
def _delete_all_related_paths(self, paths):
for path in paths:
real_path = os.path.realpath(path)
device_name = os.path.basename(real_path)
device_delete = '/sys/block/%s/device/delete' % device_name
if os.path.exists(device_delete):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', device_delete,
process_input='1', run_as_root=True)
else:
print "Unable to delete %s" % real_path
def _cleanup_faulty_paths(self):
non_ncpu_target_info_map = self._get_non_ncpu_target_info_map()
for paths in non_ncpu_target_info_map.itervalues():
if self._all_related_paths_faulty(paths):
self._delete_all_related_paths(paths)
def _cleanup_faulty_dm_devices(self):
out_ll, err_ll = self._run_multipath(['-ll'],
run_as_root=True,
check_exit_code=False)
# Pattern to split the dm device contents as follows
# Each section starts with a WWN and ends with a line with
# " `-" as the prefix
#
# 3600601601bd032007c097518e96ae411 dm-2 ,
# size=1.0G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
# `-+- policy='round-robin 0' prio=0 status=active
# `- #:#:#:# - #:# active faulty running
# 36006016020d03200bb93e048f733e411 dm-0 DGC,VRAID
# size=1.0G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
# |-+- policy='round-robin 0' prio=130 status=active
# | |- 3:0:0:2 sdd 8:48 active ready running
# | `- 5:0:0:2 sdj 8:144 active ready running
# `-+- policy='round-robin 0' prio=10 status=enabled
# |- 4:0:0:2 sdg 8:96 active ready running
# `- 6:0:0:2 sdm 8:192 active ready running
dm_pat = r'([0-9a-fA-F]{30,})[^\n]+,[^\n]*\n[^,]* `-[^\n]*'
dm_m = re.compile(dm_pat)
path_pat = r'- \d+:\d+:\d+:\d+ '
path_m = re.compile(path_pat)
for m in dm_m.finditer(out_ll):
if not path_m.search(m.group(0)):
# Only #:#:#:# remain in the output, all the paths of the dm
# device should have been deleted. No need to keep the device
out_f, err_f = self._run_multipath(['-f', m.group(1)],
run_as_root=True,
check_exit_code=False)
def cleanup(self):
self._cleanup_faulty_paths()
# Make sure the following configuration is in /etc/multipath.conf
# Otherwise, there may be "map in use" failure when deleting
# dm device
#
# defaults {
# flush_on_last_del yes
# }
#
self._cleanup_faulty_dm_devices()
def _get_emc_device_paths(self):
# Find all the EMC iSCSI devices under /dev/disk/by-path
# except LUNZ and partition reference
pattern = '/dev/disk/by-path/ip-*-iscsi-iqn*com.emc*-lun-*'
device_paths = [path for path in glob.glob(pattern)
if ('lun-0' not in path and '-part' not in path)]
return device_paths
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
print ("multipath %(command)s: stdout=%(out)s stderr=%(err)s"
% {'command': multipath_command, 'out': out, 'err': err})
return out, err
if __name__ == "__main__":
if len(sys.argv) != 3 or sys.argv[1] != '--config-file':
usage()
exit(1)
out, err = utils.execute('which', 'multipath', check_exit_code=False)
if 'multipath' not in out:
print('Info: Multipath tools not installed. No cleanup need be done.')
exit(0)
multipath_flush_on_last_del = False
multipath_conf_path = "/etc/multipath.conf"
if os.path.exists(multipath_conf_path):
flush_on_last_del_yes = re.compile(r'\s*flush_on_last_del.*yes')
for line in open(multipath_conf_path, "r"):
if flush_on_last_del_yes.match(line):
multipath_flush_on_last_del = True
break
if not multipath_flush_on_last_del:
print("Warning: 'flush_on_last_del yes' is not seen in"
" /etc/multipath.conf."
" 'map in use' failure may show up during cleanup.")
CONF(sys.argv[1:])
# connect_volume and disconnect_volume in nova/virt/libvirt/volume.py
# need be adjusted to take the same 'external=True' lock for
# synchronization
#utils.synchronized('connect_volume', external=True)
def do_cleanup():
cleaner = FaultyDevicesCleaner()
cleaner.cleanup()
do_cleanup()
https://wiki.python.org/moin/BeginnersGuide/Programmers
http://www.astro.ufl.edu/~warner/prog/python.html
looks like this python version 3 so. go for the tutorials of version three.
try downloading any IDE. eric5 is good by the way.
try executing this file once.
learn indentations
and dynamic variable declaration
do not jump into the ocean first try swimming pool : )
Also Try to learn method declaration.
Python is a bit different than java.
I will give you a hint looks like system call are also made to execute os commands so try looking at subprocess and how its output is directed to an output stream and error stream.
I am trying to write a python program to get the list of dependencies available for a package using python Yum API.
The following is my code getting the dependencies list similar to "yum deplist chkconfig-1.3.49.3-2.el6". This resulting a list of all the needed packages regardless with the already installed list on the system.
But what i am trying is to write a wrapper that is equivalent to this command "yum update chkconfig-1.3.49.3-2.el6". This command resulting the dependencies that are not installed on the system and that are required.
The following is the code that i have tried so far. And is there any other way for accessing the python Yum API for getting our needs. This is the actual function "customMethod". Other's "compare" and "listCompare" are for comparing the rpms from a list and for getting the latest among them.
import sys, re
import yum, rpm
from yum import _
sys.path.insert(0, '/usr/share/yum-cli')
import output
class YumFrame(yum.YumBase, output.YumOutput):
def __init__(self):
try:
yum.YumBase.__init__(self)
output.YumOutput.__init__(self)
except Exception, e:
raise e
self.pattern1 = re.compile(r'^([a-zA-Z0-9_\-\+]*)-([a-zA-Z0-9_\.]*)-([a-zA-Z0-9_\.]*)')
def compare(self, pkg1, pkg2):
Info1 = self.pattern1.search(pkg1).groups()
Info2 = self.pattern1.search(pkg2).groups()
n1, v1, r1 = Info1
n2, v2, r2 = Info2
if n1 == n2:
return rpm.labelCompare(('1', v1, r1), ('1', v2, r2))
else:
return 2
def listCompare(self, input):
latest = input[0]
refinedList = []
for index, item in enumerate(input):
result = self.compare(item, latest)
if result == 1:
latest = item
elif result == 2:
refinedList.append(item)
refinedList.append(latest)
return refinedList
def customMethod(self, package):
pkgs = []
completeList = []
ematch, match, unmatch = self.pkgSack.matchPackageNames([package])
for po in ematch + match:
pkgs.append(po)
print "Matched Object: " + str(pkgs)
results = self.findDeps(pkgs)
for value in results.itervalues():
for packageObject in value.itervalues():
actualList = []
for item in packageObject:
completeList.append(item.name + "-" + item.ver + "-" + item.rel)
completeList = self.listCompare(completeList)
completeList = list(set(completeList))
return completeList
if __name__ == "__main__":
yumObj = YumFrame()
print yumObj.customMethod("chkconfig-1.3.49.3-2.el6")
Thanks in Advance,
M Ram
I am not an expert in python but in Yum CLI interface there is a simple way:
yum erase [package name]
It will show you a dependency list before actual deletion.
Hope it helps