I get the error module 'keygen' has no attribute 'keygen - python

I have also installed the appropriate libraries but still the error is showing. The code is written below:
import keygen as kg
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('Images/111.png')
plt.imshow(img)
#plt.show()
#Now generating the choatic Key
height = img.shape[0]
width = img.shape[1]
key = kg.keygen(0.01,3.951,height*width)
I get error at last line.

here keygen if a function not a library code for that function.
def keygen(x,r,size):
key = []
for i in range(size):
x = r*x*(1-x)
key.append(int((x*pow(10,16))%256))
return key

Related

Trying to Plot FFT for an Image Array

I'm trying to create a signal plot for an array of pictures using the following code:
import numpy as np
import sys
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
imgArr = {}
stnArr = {}
frmArr = {}
sgnArr = {}
for i in range(1,2397):
imgArr[i] = mpimg.imread("20210209_themis_rank"+ str(i)+ ".png")
stnArr[i] = np.mean([imgArr[i]]/std(imgArr[i]))
frmArr[i] = i
signal = np.fft.fft(imgArr[i])
for i in range(1,2397):
plt.plot(frmArr,np.abs(signal))
plt.show()
However, I keep on running into the following error. How can I get it to work?
raise ValueError(f"x and y must have same first dimension, but "
ValueError: x and y must have same first dimension, but have shapes (1,) and (600, 600, 4)

scipy and numpy not error with as_matrix function

This is a "working example" that does not work. Why does this not run? scipy seems to not work.
i get this error:
File "display_map.py", line 35, in
rot_cw = R.from_quat(keyframe["rot_cw"]).as_matrix()
AttributeError: 'Rotation' object has no attribute 'as_matrix'
please can someone help me me change it. I tried reducing the version of scipy
import msgpack
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy.linalg import inv
from scipy.spatial.transform import Rotation as R
import open3d as o3d
import sys
if len(sys.argv) < 2:
print(
"ERROR: Please provide path to .msg file. Example usage is; python3 visualize_openvslam_map.py path_to.msg"
)
exit()
with open(sys.argv[1], "rb") as f:
upacked_msg = msgpack.Unpacker(f)
packed_msg = upacked_msg.unpack()
keyfarmes = packed_msg["keyframes"]
landmarks = packed_msg["landmarks"]
# FILL IN KEYFRAME POINTS(ODOMETRY) TO ARRAY
keyframe_points = []
keyframe_points_color = []
for keyframe in keyfarmes.values():
# get conversion from camera to world
trans_cw = np.matrix(keyframe["trans_cw"]).T
rot_cw = R.from_quat(keyframe["rot_cw"]).as_matrix()
# compute conversion from world to camera
rot_wc = rot_cw.T
trans_wc = -rot_wc * trans_cw
keyframe_points.append((trans_wc[0, 0], trans_wc[1, 0], trans_wc[2, 0]))
keyframe_points = np.array(keyframe_points)
keyframe_points_color = np.repeat(np.array([[0., 1., 0.]]),
keyframe_points.shape[0],
axis=0)
# FILL IN LANDMARK POINTS TO ARRAY
landmark_points = []
landmark_points_color = []
for lm in landmarks.values():
landmark_points.append(lm["pos_w"])
landmark_points_color.append([
abs(lm["pos_w"][1]) * 4,
abs(lm["pos_w"][1]) * 2,
abs(lm["pos_w"][1]) * 3
])
landmark_points = np.array(landmark_points)
landmark_points_color = np.array(landmark_points_color)
# CONSTRUCT KEYFRAME(ODOMETRY) FOR VISUALIZTION
keyframe_points_pointcloud = o3d.geometry.PointCloud()
keyframe_points_pointcloud.points = o3d.utility.Vector3dVector(keyframe_points)
keyframe_points_pointcloud.colors = o3d.utility.Vector3dVector(
keyframe_points_color)
# CONSTRUCT LANDMARK POINTCLOUD FOR VISUALIZTION
landmark_points_pointcloud = o3d.geometry.PointCloud()
landmark_points_pointcloud.points = o3d.utility.Vector3dVector(landmark_points)
landmark_points_pointcloud.colors = o3d.utility.Vector3dVector(
landmark_points_color)
# VISULIZE MAP
o3d.visualization.draw_geometries([
keyframe_points_pointcloud, landmark_points_pointcloud,
o3d.geometry.TriangleMesh.create_coordinate_frame()
])
In scipy.spatial.Rotation methods from_dcm, as_dcm were renamed to from_matrix, as_matrix respectively.

Get a three dimensional array as a parameter and return a three dimensional arrays

I have to write a function to_red that should zero out the green and blue color components and return the result. I wrote the below code for an Image(.png) to zero out green and blue color and return red and it worked. However, as mentioned in title, the input parameter has to be a 3-d array and return a 3-d array. How should my below code be changed for that.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
def to_red()
src = plt.imread("C:\src\painting.png")
red_channel = src[:,:,0]
red_img = np.zeros(src.shape)
red_img[:,:,0] = red_channel
plt.imshow(red_img)
plt.show()
You can write your function like this:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
def to_red(src):
# Check if the input dimension is 3
if not src.ndim == 3:
# Raise exception or do something
print ("Dimension mismatch")
return 0
red_channel = src[:,:,0]
red_img = np.zeros(src.shape)
red_img[:,:,0] = red_channel
return red_img
And then you can call it like this
source_image = plt.imread("C:\src\painting.png")
red_image = to_red(source_image)
plt.imshow(red_image)
plt.show()
I also added a line to check if the input is actually 3 dimensional.
You can use numpy's powerful indexing capabilities
def to_red(src):
ret = a.copy()
ret[:,:,1:] = 0
return ret

Python 3.7 : multiprocessing a for loop with shared variables

first a bit of context :
I'm trying to write down a python script to convert Image in greyscale (.tif) to a .jpeg with the so called ''jet'' colormap. I managed to do it with a for loop but it's a bit long for one image (millions of pixels to treat !), so I would like to use multiprocessing.
My problem here is that to convert each grey pixel into a coloured one I have to use two variables (the minimum value of light intensity ''min_img'' and an vector ''dx_cm'' to go from the initial grey scale to a 256 scale, corresponding to the jet colormap).
So to pass the information of ''min_img'' and ''dx_cm'' to the processes I try to use multiprocessing.Value() but in return I get the error :
RuntimeError: Synchronized objects should only be shared between processes through inheritance
I tried many different things from different sources and no matter the version of my code I'm struggling with that error. So I'm sorry if my code isn't clean, I would be very grateful if someone could help me with that.
My non-working code :
import multiprocessing
from PIL import Image
from matplotlib import cm
def fun(gr_list,dx,minp):
dx_cmp = dx.value
min_imgp = minp.value
rgb_res=list()
for i in range(len(gr_list)):
rgb_res.extend(cm.jet(round(((gr_list[i]-min_imgp)/dx_cmp)-1))[0:-1])
return rgb_res
if __name__ == '__main__':
RGB_list=list()
n = multiprocessing.cpu_count()
img = Image.open(r'some_path_to_a.tif')
Img_grey=list(img.getdata())
dx_cm = multiprocessing.Value('d',(max(Img_grey)-min(Img_grey))/256)
min_img = multiprocessing.Value('d',min(Img_grey))
with multiprocessing.Pool(n) as p:
RGB_list = list(p.map(fun, (Img_grey,dx_cm,min_img)))
res = Image.frombytes("RGB", (img.size[0], img.size[1]), bytes([int(0.5 + 255*i) for i in RGB_list]))
res.save('rgb_file.jpg')
PS : Here is an example of the the initial for loop that I would like to parallelize :
from PIL import Image
from matplotlib import cm
if __name__ == '__main__':
img = Image.open(r'some_path_to_a.tif')
Img_grey = list(img.getdata())
dx_cm = (max(Img_grey)-min(Img_grey))/256
min_img = min(Img_grey)
Img_rgb = list()
for i in range(len(Img_grey)):
Img_rgb.extend(cm.jet(round(((Img_grey[i]-min_img)/dx_cm)-1))[0:-1])
res = Image.frombytes("RGB", (img.size[0], img.size[1]), bytes([int(0.5 + 255*i) for i in Img_rgb]))
res.save('rgb_file.jpg')
Your fun method is looping over some list, but in this case it will receive a "part", an item from your list, so it should return only the result of its processing.
I have changed the working code to run with multiprocessing.
As the fun method returns a list, the p.map will return a list of lists (a list of results) and that need to be flatten, were done with list extends method before.
Tried with process pool and thread pool multiprocessing, in my scenario there wasn't any performance gains.
Process multiprocessing:
from PIL import Image
from matplotlib import cm
import multiprocessing
def fun(d):
part, dx_cm, min_img = d
return cm.jet(round(((part-min_img)/dx_cm)-1))[0:-1]
if __name__ == '__main__':
img = Image.open(r'a.tif')
Img_grey = list(img.getdata())
def Gen(img_data):
dx_cm = (max(img_data)-min(img_data))/256
min_img = min(img_data)
for part in img_data:
yield part, dx_cm, min_img
n = multiprocessing.cpu_count()
with multiprocessing.Pool(n) as p:
Img_rgb = [item for sublist in p.map(fun, Gen(Img_grey)) for item in sublist]
res = Image.frombytes("RGB", (img.size[0], img.size[1]), bytes([int(0.5 + 255*i) for i in Img_rgb]))
res.save('b.jpg')
Thread multiprocessing:
from PIL import Image
from matplotlib import cm
import multiprocessing
from multiprocessing.pool import ThreadPool
if __name__ == '__main__':
img = Image.open(r'a.tif')
Img_grey = list(img.getdata())
dx_cm = (max(Img_grey)-min(Img_grey))/256
min_img = min(Img_grey)
def fun(part):
return cm.jet(round(((part-min_img)/dx_cm)-1))[0:-1]
n = multiprocessing.cpu_count()
with ThreadPool(n) as p:
Img_rgb = [item for sublist in p.map(fun, Img_grey) for item in sublist]
res = Image.frombytes("RGB", (img.size[0], img.size[1]), bytes([int(0.5 + 255*i) for i in Img_rgb]))
res.save('b.jpg')
So it seems that the computational burden isn't big enough for multiprocessing to be helpful.
Nevertheless, for those coming across this topic interested in the image processing part of my question, I found another much quicker way (15 to 20 x than previous method) to do the same thing without a for loop :
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from PIL import Image
cm_jet = cm.get_cmap('jet')
img_src = Image.open(r'path to your grey image')
img_src.mode='I'
Img_grey = list(img_src.getdata())
max_img = max(Img_grey)
min_img = min(Img_grey)
rgb_array=np.uint8(cm_jet(((np.array(img_src)-min_img)/(max_img-min_img)))*255)
ax = plt.subplot(111)
im = ax.imshow(rgb_array, cmap='jet')
divider = make_axes_locatable(ax)
cax_plot = divider.append_axes("right", size="5%", pad=0.05)
cbar=plt.colorbar(im, cax=cax_plot, ticks=[0,63.75,127.5,191.25,255])
dx_plot=(max_img-min_img)/255
cbar.ax.set_yticklabels([str(min_img),str(round(min_img+63.75*dx_plot)),str(round(min_img+127.5*dx_plot)),str(round(min_img+191.25*dx_plot)), str(max_img)])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.savefig('test_jet.jpg', quality=95, dpi=1000)

Dask delayed + Matplotlib.savefig() -> FAIL

My goal is to produce multiple png files from respectively multiple numpy arrays, loaded from medical images in my HD.
To make things quicker, I'm using dask delayed.
Here's my working code:
import os.path
from glob import glob
import nibabel as nib
import numpy as np
from dask import delayed
def process(data):
# Need to have the import inside so that multiprocessing works.
# Apparently doesn't solve the issue anyway..
import matplotlib.pyplot as plt
outpath = '/Users/user/outputdir/'
name = os.path.basename(data.get_filename())
savename = name[:name.index('.')] + '.png'
plt.imshow(np.rot90(data.get_data()[15:74, 6:82, 18, 0]),
extent=[0, 1, 0, 1], aspect=1.28, cmap='gray')
plt.axis('off')
out = os.path.join(outpath, savename)
plt.savefig(out)
plt.close()
return out
L = []
for fn in glob("/Users/user/imagefiles/mb*.nii.gz"):
nifti = delayed(nib.load)(fn)
outpng = delayed(process)(nifti)
L.append(outpng)
results = delayed(print)(L)
results.compute()
My problem is that after each run some of the output images are empty (nothing in the png), and which images are empty seem pretty random, since all input data is valid.
I suspect this is a problem of multiprocessing and matplotlib, as seen in other related thread.
Does anyone have a suggestion on how to get this working with dask?
EDIT: Minimal working example
import os.path
import random
import string
import numpy as np
from dask import delayed
def gendata(fn):
return
def process(data):
# Need to have the import inside so that multiprocessing works.
import matplotlib.pyplot as plt
outpath = '/Users/user/Pictures/test/'
name = ''.join(random.choices(string.ascii_lowercase, k=10))
savename = name + '.png'
data = np.random.randint(0, 255, size=(100,100,20,2))
plt.imshow(np.rot90(data[15:74, 6:82, 18, 0]),
extent=[0, 1, 0, 1], aspect=1.28, cmap='gray')
plt.axis('off')
out = os.path.join(outpath, savename)
plt.savefig(out)
plt.close()
return out
L = []
for fn in range(0, 10):
nifti = delayed(gendata)(fn)
outpng = delayed(process)(nifti)
L.append(outpng)
results = delayed(print)(L)
results.compute()

Categories