I am trying to write a code for dynamic mode decomposition using the modred module in python. I write the CustomVector class and CustomVecHandle class folowing the modred Documentation like these:
class CustomVector(mr.Vector):
def __init__(self, grids, data_array):
self.grids = grids
self.data_array = data_array
self.weighted_ip = mr.InnerProductTrapz(*self.grids)
def __add__(self, other):
"""Return a new object that is the sum of self and other"""
sum_vec = deepcopy(self)
sum_vec.data_array = self.data_array + other.data_array
return sum_vec
def __mul__(self, scalar):
"""Return a new object that is ``self * scalar`` """
mult_vec = deepcopy(self)
mult_vec.data_array = mult_vec.data_array * scalar
return mult_vec
def inner_product(self, other):
return self.weighted_ip(self.data_array, other.data_array)
class CustomVecHandle(mr.VecHandle):
def __init__(self, vec_path, base_handle=None, scale=None):
mr.VecHandle.__init__(self, base_handle, scale)
self.vec_path = vec_path
def _get(self):
# read in the values
print ("reading data from {}".format(self.vec_path))
reader = vtk.vtkPolyDataReader()
reader.SetFileName(self.vec_path)
reader.Update()
grids = dsa.WrapDataObject(reader.GetOutput()).Points
data_array = dsa.WrapDataObject(reader.GetOutput()).PointData['U']
return CustomVector(grids, data_array)
def _put(self, vec):
print ("writing data to {}".format(self.vec_path))
common_writer.SetFileName(self.vec_path)
U_common_reader[:]=vec.data_array
ier1 = common_writer.Write() # return 1 means success
grids_common_reader[:]=vec.grids
ier2 = common_writer.Write()
if ier1!=1 or ier2!=1:
raise Error()
return
And another function is defined outside all classes as:
def inner_product(v1, v2):
return v1.inner_product(v2)
I have created a list object as:
vec_handles=[CustomVecHandle(os.path.join(data_root,d,"U_zNormal.vtk")) for d in dirs]
Then I use the folowing code to compute the modes:
myDMD=mr.DMDHandles(inner_product,max_vecs_per_node=50)
myDMD.compute_decomp(vec_handles)
But when I running the code I get the error that:
Traceback (most recent call last):
File "vtkDMD.py", line 192, in <module>
eigvals,Rlo_eigvecs,Llo_eigvecs,cm_eigvals,cl_eigvecs = myDMD.compute_decomp(vec_handles)
File "C:\Python27\lib\site-packages\modred\dmd.py", line 679, in compute_decomp
self.vec_handles)
File "C:\Python27\lib\site-packages\modred\vectorspace.py", line 495, in compute_symmetric_inner_product_mat
IP_burn = self.inner_product(test_vec, test_vec)
File "vtkDMD.py", line 142, in inner_product
return v1.inner_product(v2)
File "vtkDMD.py", line 138, in inner_product
return self.weighted_ip(self.data_array, other.data_array)
File "C:\Python27\lib\site-packages\modred\vectors.py", line 159, in __call__
return self.inner_product(vec1, vec2)
File "C:\Python27\lib\site-packages\modred\vectors.py", line 168, in inner_product
IP = np.trapz(IP, x=grid)
File "C:\pv54\bin\lib\site-packages\numpy\lib\function_base.py", line 2941, in trapz
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
ValueError: operands could not be broadcast together with shapes (2,)(28330,)
Firstly I think it`s because the wrong parameters given to the function :
return CustomVector(grids, data_array)
Then I replace grids with grids.T which means the transpose of grids, but the same error appears again. And I get the same error when I try to replace data_array with data_array.T.
So, does anybody have a good idea to solve this error?
I encountered the same issue while computing POD using the same module. The two arrays you are dealing with are of the shape (2,) & (28330,). Try and convert them to numpy arrays where both the dimensions are specified. That will solve the problem.
Related
If anyone has experience building thread safe generators is there anyway to do this with a function generator? I came across this post at: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
and I have tried to base my code off of this post, but I am getting an error that does not really make sense to me, does anyone have any insight?
import numpy as np
import keras
import cv2
import os
from depth_functional import predict
class data_generator(keras.utils.Sequence):
def __init__(self,ImgPath,string_ids,keys,bs,model,bboxes):
'Initialize'
self.ImgPath=ImgPath
self.string_ids=string_ids
self.keys=keys
self.bs=bs
self.model=model
self.bboxes=bboxes
#will keep track how many times we have pulled batches of data
self.count=0
def __len__(self):
#Denotes Number of batches per epoch
return (int(np.floor(len(self.list_IDs))/self.bs))
def __getitem__(self,index):
#Generate index of the batch
indexes=self.indexes[index*self.bs:(index+1)*self.bs]
#find a list of ids you want to generate
string_ids_temp=[self.string_ids[k] for k in indexes]
#Generate data
RGBD,Masks=self.__data_generation(string_ids_temp)
return RGBD,Masks
def on_epoch_end(self):
self.indexes=np.arange(len(self.string_ids))
def __data_generation(self,string_ids_temp):
os.chdir('C:\\Users\\'+self.ImgPath)
RGBD_images=np.zeros((self.bs,240,320))
Masks=np.zeros((self.bs,240,320,19))
for i in range(len(string_ids_temp)):
img=cv2.imread(string_ids_temp[i] +'.jpg')
RGBD,masks=self.ProcessData(img,self.bboxes[self.count],self.keys[self.count])
RGBD_images[i,:,:,:]=RGBD
Masks[i,:,:,:]=masks
if self.count >= len(self.keys):
self.count=0
def ProcessData(self,img,bbox,keys):
zeros=np.zeros((img.shape[0],img.shape[1],18))
mask=np.zeros((240,320,19))
bbox_height=abs(bbox[0]-bbox[2])
bbox_width=abs(bbox[1]-bbox[3])
sigma_x=400*(bbox_width/224)
sigma_y=400*(bbox_height/224)
for part in range(18):
if keys[part][2]==2:
for j in range(img.shape[1]):
x=np.linspace(0,img.shape[0]-1,img.shape[0])
y=np.full((img.shape[0],),float(j))
x_key=np.full((img.shape[0],),keys[part][1])
y_key=np.full((img.shape[0],),keys[part][0])
zeros[:,j,part]=np.exp(-(np.square(x-x_key)/sigma_x+np.square(y-y_key)/sigma_y))
zeros=cv2.resize(zeros,(320,240))
mask[:,:,18]=np.amax(mask,axis=-1)
resized_img=cv2.resize(img,(640,480))/255.0
depth=predict(self.model,resized_img)[0]
depth=depth/np.amax(depth)
smaller_img=cv2.resize(img,(320,240))/255.0
RGBD = np.concatenate((smaller_img,depth),axis=-1)
return RGBD,mask
I keep getting the following error which I do not really understand...
File "C:\Users\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 1297, in fit_generator
steps_name='steps_per_epoch')
File "C:\Users\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_generator.py", line 144, in model_iteration
shuffle=shuffle)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_generator.py", line 477, in convert_to_generator_like
num_samples = int(nest.flatten(data)[0].shape[0])
AttributeError: 'data_generator' object has no attribute 'shape'
Edit following TheLoneDeranger skeleton:
class Generator():
def __init__(self,ImgPath,string_ids,keys,bboxes,bs,model):
self.ImgPath=ImgPath
self.string_ids=string_ids
self.keys=keys
self.bboxes=bboxes
self.bs=bs
self.model=model
#Will count how many data samples we have pulled
self.count=0
def __iter__(self):
return self
def __len__(self):
#returns the total number of training samples
return len(self.keys)
def __next__(self):
RGBD_Images=np.zeros((self.bs,240,320,4))
Masks=np.zeros((self.bs,240,320,19))
os.chdir('C:\\Users\\'+ImgPath)
for i in range(self.count*self.bs,(self.count+1)*self.bs):
img=cv2.imread(self.string_ids[i]+'.jpg')
zeros=np.zeros((img.shape[0],img.shape[1],18),dtype='float')
mask=np.zeros((240,320,19),dtype='float')
bbox_height = abs(self.bboxes[i][0]-self.bboxes[i][2])
bbox_width = abs(self.bboxes[i][1]-self.bboxes[i][3])
sigma_x=400*(bbox_width/224)
sigma_y=400*(bbox_height/224)
for part in range(18):
if self.keys[i][part][2]==2:
for j in range(img.shape[1]):
x = np.linspace(0,img.shape[0]-1,img.shape[0])
y = np.full((img.shape[0],),float(j))
x_key = np.full((img.shape[0],),self.keys[i][part][1])
y_key = np.full((img.shape[0],),self.keys[i][part][0])
zeros[:,j,part]=np.exp(-(np.square(x-x_key)/sigma_x+np.square(y-y_key)/sigma_y))
zeros=cv2.resize(zeros,(320,240))
mask[:,:,18]=np.amax(mask,axis=-1)
Masks[i%self.bs,:,:,:]=mask
resized_img = cv2.resize(img,(640,480))/255.0
depth = predict(self.model,resized_img)[0] #240 x 320 x1 output
depth=depth/np.amax(depth)
smaller_img = cv2.resize(img,(320,240))/255.0
RGBD = np.concatenate((smaller_img,depth),axis=2)
RGBD_Images[i%self.bs,:,:,:]=RGBD
self.count+=1
if (self.count+1)*self.bs >= len(self.keys):
self.count=0
return (RGBD_Images,Masks)
I keep this on hand as a basis for quickly making new generators for TF/Keras. There are other ways to do it, but I use this in the majority of circumstances.
class Generator:
def __init__(self,base_path):
# initialize however... for example:
self.base_path = base_path
self.files = os.listdir(self.base_path)
def __iter__(self):
return self
def __len__(self):
return len(self.files) # needs to return total training samples
def __next__(self):
# get your images and labels
return (images,labels)
You can then use them like so:
train_gen = Generator(train_path)
val_gen = Generator(val_path)
model.fit_generator(train_gen,
validation_data=val_gen,
steps_per_epoch=200,
validation_steps=200,
epochs=666)
Hope that helps!
I'm a beginner with myhdl.
I try to translate the following Verilog code to MyHDL:
module ModuleA(data_in, data_out, clk);
input data_in;
output reg data_out;
input clk;
always #(posedge clk) begin
data_out <= data_in;
end
endmodule
module ModuleB(data_in, data_out, clk);
input [1:0] data_in;
output [1:0] data_out;
input clk;
ModuleA instance1(data_in[0], data_out[0], clk);
ModuleA instance2(data_in[1], data_out[1], clk);
endmodule
Currently, I have this code:
import myhdl
#myhdl.block
def ModuleA(data_in, data_out, clk):
#myhdl.always(clk.posedge)
def logic():
data_out.next = data_in
return myhdl.instances()
#myhdl.block
def ModuleB(data_in, data_out, clk):
instance1 = ModuleA(data_in(0), data_out(0), clk)
instance2 = ModuleA(data_in(1), data_out(1), clk)
return myhdl.instances()
# Create signals
data_in = myhdl.Signal(myhdl.intbv()[2:])
data_out = myhdl.Signal(myhdl.intbv()[2:])
clk = myhdl.Signal(bool())
# Instantiate the DUT
dut = ModuleB(data_in, data_out, clk)
# Convert tfe DUT to Verilog
dut.convert()
But it doesn't works because signal slicing produce a read-only shadow signal (cf MEP-105).
So, what is it the good way in MyHDL to have a writable slice of a signal?
Edit:
This is the error I get
$ python demo.py
Traceback (most recent call last):
File "demo.py", line 29, in <module>
dut.convert()
File "/home/killruana/.local/share/virtualenvs/myhdl_sandbox-dYpBu4o5/lib/python3.6/site-packages/myhdl-0.10-py3.6.egg/myhdl/_block.py", line 342, in convert
File "/home/killruana/.local/share/virtualenvs/myhdl_sandbox-dYpBu4o5/lib/python3.6/site-packages/myhdl-0.10-py3.6.egg/myhdl/conversion/_toVerilog.py", line 177, in __call__
File "/home/killruana/.local/share/virtualenvs/myhdl_sandbox-dYpBu4o5/lib/python3.6/site-packages/myhdl-0.10-py3.6.egg/myhdl/conversion/_analyze.py", line 170, in _analyzeGens
File "/usr/lib/python3.6/ast.py", line 253, in visit
return visitor(node)
File "/home/killruana/.local/share/virtualenvs/myhdl_sandbox-dYpBu4o5/lib/python3.6/site-packages/myhdl-0.10-py3.6.egg/myhdl/conversion/_analyze.py", line 1072, in visit_Module
File "/home/killruana/.local/share/virtualenvs/myhdl_sandbox-dYpBu4o5/lib/python3.6/site-packages/myhdl-0.10-py3.6.egg/myhdl/conversion/_misc.py", line 148, in raiseError
myhdl.ConversionError: in file demo.py, line 4:
Signal has multiple drivers: data_out
You can use an intermediate list of Signal(bool()) as placeholder.
#myhdl.block
def ModuleB(data_in, data_out, clk):
tsig = [myhdl.Signal(bool(0)) for _ in range(len(data_in))]
instances = []
for i in range(len(data_in)):
instances.append(ModuleA(data_in(i), tsig[i], clk))
#myhdl.always_comb
def assign():
for i in range(len(data_out)):
data_out.next[i] = tsig[i]
return myhdl.instances()
A quick (probably non-fulfilling) comment, is that the intbv is treated as a single entity that can't have multiple drives. Two references that might help shed some light:
http://jandecaluwe.com/hdldesign/counting.html
http://docs.myhdl.org/en/stable/manual/structure.html#converting-between-lists-of-signals-and-bit-vectors
I am trying to calculate some values of satellites, the data-generation takes quite long so I want to implement this using multiprocessing.
The problem is that I get this error from pyEphem, TypeError: can't pickle ephem.EarthSatellite objects. The pyEphem objects are not used in the functions that I want to parallelize.
This is an example file of my code (minimized).
This is my main file:
main.py
import ephem
import numpy
import math
import multiprocessing as mp
from SampleSats import Sats
GPS_Satellites = []
SFrames = 1
TLE = ["GPS BIIR-3 (PRN 11)",
"1 25933U 99055A 18090.43292845 -.00000054 00000-0 00000+0 0 9994",
"2 25933 51.8367 65.0783 0165007 100.2058 316.9161 2.00568927135407"]
# PRN TLE file from CelesTrak
GPS_Satellites.append(Sats(TLE))
Position = ephem.Observer()
Position.date = '2018/3/31 00:00' # 1st January 2018 at 00:00 UTC
Position.lon, Position.lat = "36.845663", "-37.161123" # Coordinates for desired Position
# Calculate Satellites
for Frames in range(SFrames):
print("Generate Signals for Time: ", Position.date)
for Sats in GPS_Satellites: # par
Sats.compute(Position)
if ((float(repr(Sats.ephemeris.alt)) * 180 / math.pi) < 5) or ( # Calculate angle above horizon
(float(repr(Sats.ephemeris.alt)) * 180 / math.pi) > 90):
Sats.visible = 0
else:
Sats.visible = 1
with mp.Pool() as pool:
for value, obj in zip(pool.map(Sats.genSignal, GPS_Satellites), GPS_Satellites):
obj.Signal = value
Position.date = Position.date + 6*ephem.second # 1 Subframe is 6 seconds long
This is the Sats class that i wrote:
sats.py:
import ephem
import numpy
class Sats:
"""Save Satellites as Objects"""
def __init__(self, tle):
""":param tle: Two Line Element for ephemeris data also used to get PRN Number from name"""
self.ephemeris = ephem.readtle(tle[0], tle[1], tle[2])
self.visible = 1
self.subframes = 0
self.CAseq = [x for x in range(1023)]
self.Out = []
self.Signal = numpy.zeros(int(300*20*1023), dtype=numpy.int8)
def compute(self, pos):
self.ephemeris.compute(pos)
self.Out.append(numpy.arange(0, 299, 1))
self.subframes += 1
def calcData(self, bit, prn):
return (self.Out[self.subframes - 1].item(0)[0][bit] + self.CAseq[prn]) % 2
def genSignal(self):
if(self.visible == 1):
for bit in range(300): # 1 Subframe is 300 Bit long
for x in range(20): # The PRN Sequence reoccurs every ms -> 20 times per pit
for prn in range(1023): # length of the prn sequence
self.Signal[bit*x*prn] = (-1 if (self.calcData(bit, prn))==0 else 1)
else:
self.Signal = numpy.zeros(300*20*1023)
return self.Signal
Traceback:
Traceback (most recent call last):
File "C:/Users/PATH_TO_PROJECT/SampleTest.py", line 33, in <module>
for value, obj in zip(pool.map(Sats.genSignal, GPS_Satellites), GPS_Satellites):
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 644, in get
raise self._value
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 424, in _handle_tasks
put(task)
File "C:\Program Files\Python36\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Program Files\Python36\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle ephem.EarthSatellite objects
The reason is something like this... when you try to pickle a function, it can attempt to pickle globals(), so whatever you have in your global namespace is also pickled (just in case your function has a reference to something in globals() -- yes, that's unexpected, but that's how it is). So, an easy fix is to isolate the function you want to pickle in another file -- in this case, put the multiprocessing stuff in one file and the other code in another file... so there's less in globals() for the pickler to struggle with. Another thing that might help is to use multiprocess instead of multiprocessing -- multiprocess uses the dill serializer instead of pickle, so you have a better chance of serializing objects that will be sent across the workers in the Pool.
Playing around a lot with sympy lately, I came up with the problem of calculating divergence and gradient for scalar and vector fields. What I want to do for now is solving the heat equation, i.e.
d/dt u(x,t) - a * lap(u(x,t)) = 0, with lap(x) = (div(grad(x))
on a scalar field. Since I could not find lap, div and grad in sympy.physics.mechanics, I tried to implement them by myself
from sympy import *
from sympy.physics.mechanics import *
o = ReferenceFrame('o')
x,y,z = symbols('x y z')
class div(Function):
#classmethod
def eval(cls, F):
return F.diff(x, o).dot(o.x)+F.diff(y, o).dot(o.y)+F.diff(z, o).dot(o.z)
class grad(Function):
#classmethod
def eval(cls, F):
return o.x * F.diff(x) + o.y * F.diff(y) + o.z * F.diff(z)
f = x**2 + y**3 + z*0.5
print grad(f)
print type(grad(f))
print div(grad(f))
unluckily, this gives
2*x*o.x + 3*y**2*o.y + 0.500000000000000*o.z
Traceback (most recent call last):
File "/home/fortmeier/Desktop/autokernel/autokernel/tools/GenerateCode.py", line 24, in <module>
print div(grad(f))
File "/usr/local/lib/python2.7/dist-packages/sympy/core/cache.py", line 93, in wrapper
r = func(*args, **kw_args)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/function.py", line 368, in __new__
result = super(Function, cls).__new__(cls, *args, **options)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/cache.py", line 93, in wrapper
r = func(*args, **kw_args)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/function.py", line 188, in __new__
args = list(map(sympify, args))
File "/usr/local/lib/python2.7/dist-packages/sympy/core/sympify.py", line 313, in sympify
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
File "/usr/local/lib/python2.7/dist-packages/sympy/parsing/sympy_parser.py", line 757, in parse_expr
return eval_expr(code, local_dict, global_dict)
File "/usr/local/lib/python2.7/dist-packages/sympy/parsing/sympy_parser.py", line 691, in eval_expr
code, global_dict, local_dict) # take local objects in preference
File "<string>", line 1, in <module>
AttributeError: 'Symbol' object has no attribute 'x'
[Finished in 0.3s with exit code 1]
I know that I could do something with the galgebra module, but first I'd like to understand more whats going on here. The question thus is what am I missing?
This looks like a bug in SymPy. sympify(ReferenceFrame('o').x) does not work.
I'm trying to use qmath, a quaternion lib.
this
from qmath import qmathcore
a = qmathcore.quaternion([1,2,3,4])
print a.conj()
gives me such traceback
Traceback (most recent call last):
File "*******/q_test.py", line 25, in <module>
print str(a.conj())
File "*******/venv/lib/python2.7/site-packages/qmath/qmathcore.py", line 788, in conj
return self.real() - self.imag()
File "*******/venv/lib/python2.7/site-packages/qmath/qmathcore.py", line 762, in imag
return self - self.real()
File "*******/venv/lib/python2.7/site-packages/qmath/qmathcore.py", line 522, in __sub__
self -= other
File "*******/venv/lib/python2.7/site-packages/qmath/qmathcore.py", line 407, in __isub__
self.other = quaternion(other)
File "*******/venv/lib/python2.7/site-packages/qmath/qmathcore.py", line 81, in __init__
self.q = q.q
AttributeError: quaternion instance has no attribute 'q'
but in docs they said, that this must work:
def conj(self):
"""
Returns the conjugate of the quaternion
>>> import qmathcore
>>> a = qmathcore.quaternion([1,2,3,4])
>>> a.conj()
(1.0-2.0i-3.0j-4.0k)
>>> a = qmathcore.hurwitz([1,2,3,4])
>>> a.conj()
(1-2i-3j-4k)
"""
return self.real() - self.imag()
what is this?
qmathcore.py fails its own doctest with a newer (1.9) numpy.
Adding this test to quatereon()
elif isinstance(q,float) or isinstance(q,int): # accept np.float64
self.q = 1.0 * np.array([q,0.,0.,0.])
allows qmath.quaternion([1,2,3,4]).imag() (and conj).
The quaternion method is using a lot of type(q)==xxx tests. isinstance() is a more robust test. Also it ends with a else:pass, and thus doesn't catch q values that it can't handle.
After correcting some import errors, the qmathcore doctest runs fine.